file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
raft.go | [prevLogIndex].Term
leaderCommit := rf.commitIndex
rf.mu.Unlock()
peerNum := len(rf.peers)
for i := 0; i < peerNum; i++ {
if i == rf.me {
continue
}
args := AppendEntriesArgs{term, rf.me, prevLogIndex, prevLogTerm, []Log{}, leaderCommit}
reply := AppendEntriesReply{}
go rf.sendAppendEntries(i, &args, &reply)
}
time.Sleep(HEARTBEAT)
}
}
func (rf *Raft) leader() {
// comes to power, initialize fields
rf.mu.Lock()
rf.state = LEADER
rf.votedFor = -1
peerNum := len(rf.peers)
for i := 0; i < peerNum; i++ {
rf.nextIndex[i] = len(rf.log) // leader last log index + 1
rf.matchIndex[i] = 0
}
rf.mu.Unlock()
go rf.startHeartBeat()
// leader work
for {
if rf.killed() {
return
}
rf.mu.Lock()
if rf.state == FOLLOWER {
rf.mu.Unlock()
go rf.follower()
return
}
// log replication
for server, index := range rf.nextIndex {
if len(rf.log)-1 >= index {
args := AppendEntriesArgs{
rf.currentTerm, rf.me, index - 1,
rf.log[index-1].Term, rf.log[index:],
rf.commitIndex,
}
reply := AppendEntriesReply{}
go rf.sendAppendEntries(server, &args, &reply)
}
}
// commit log entry if possible
for n := len(rf.log) - 1; n > rf.commitIndex; n-- {
if rf.log[n].Term != rf.currentTerm {
continue
}
// a majority of matchIndex[i] >= n
matchNum := 0
for _, index := range rf.matchIndex {
if index >= n {
matchNum++
}
}
if matchNum > len(rf.peers)/2 {
rf.commitIndex = n
rf.applyCond.Broadcast()
break
}
}
rf.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
}
func (rf *Raft) candidate() {
rf.mu.Lock()
rf.currentTerm++
rf.state = CANDIDATE
rf.votedFor = rf.me
rf.persist()
rf.voteCount = 1 // vote for itself
peerNum := len(rf.peers)
term := rf.currentTerm
me := rf.me
lastLogIndex := len(rf.log) - 1
lastLogTerm := rf.log[lastLogIndex].Term
rf.mu.Unlock()
rf.resetTimer()
go rf.startElectionTimer()
// send RequestVote to all peers
for i := 0; i < peerNum; i++ {
if i == me {
continue
}
args := RequestVoteArgs{term, me, lastLogIndex, lastLogTerm}
reply := RequestVoteReply{}
go rf.sendRequestVote(i, &args, &reply)
}
// a candidate continues its state until one of three things happens
// a conditional variable should be used here, but event a) b) and c)
// are triggered by different goroutines, which increases complexity
// therefore busy waiting is used here
for {
if rf.killed() {
return
}
rf.mu.Lock()
if rf.voteCount > peerNum/2 {
// a) the candidate wins and becomes leader
rf.mu.Unlock()
go rf.leader()
break
}
if rf.state == FOLLOWER {
// b) another server establishes itself as leader
rf.mu.Unlock()
go rf.follower()
break
}
if rf.currentTerm > term {
// c) a certain peer has already started a new election
// at this moment, this peer is either running follower() or candidate()
rf.mu.Unlock()
break
}
rf.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
}
// follower's behaviors are mostly handled by RPC handlers
func (rf *Raft) follower() {
go rf.startElectionTimer()
}
// election timeout goroutine periodically checks
// whether the time since the last time heard from the leader is greater than the timeout period.
// If so, start a new election and return
// each time a server becomes a follower or starts an election, start this timer goroutine
func (rf *Raft) startElectionTimer() {
for {
if rf.killed() {
return
}
rf.mu.Lock()
electionTimeout := rf.electionTimeout
lastHeard := rf.lastHeard
rf.mu.Unlock()
now := time.Now()
if now.After(lastHeard.Add(electionTimeout)) {
go rf.candidate()
return
}
time.Sleep(10 * time.Millisecond)
}
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
// this function can only be called when `rf` holds the lock
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
buf := new(bytes.Buffer)
enc := labgob.NewEncoder(buf)
enc.Encode(rf.currentTerm)
enc.Encode(rf.votedFor)
enc.Encode(rf.log)
data := buf.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
buf := bytes.NewBuffer(data)
de := labgob.NewDecoder(buf)
var currentTerm int
var votedFor int
var log []Log
count := 0
for de.Decode(¤tTerm) != nil || de.Decode(&votedFor) != nil || de.Decode(&log) != nil |
rf.currentTerm = currentTerm
rf.votedFor = votedFor
rf.log = log
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateID int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// Your code here (2A, 2B).
rf.mu.Lock()
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// follow the second rule in "Rules for Servers" in figure 2 before handling an incoming RPC
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = FOLLOWER
rf.votedFor = -1
rf.persist()
}
reply.Term = rf.currentTerm
reply.VoteGranted = true
// deny vote if already voted
if rf.votedFor != -1 {
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// deny vote if consistency check fails (candidate is less up-to-date)
lastLog := rf.log[len(rf.log)-1]
if args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// now this peer must vote for the candidate
rf.votedFor = args.CandidateID
rf.mu.Unlock()
rf.resetTimer()
}
type | {
fmt.Fprintf(os.Stderr, "Peer #%d failed to decode state from persister, retrying...\n", rf.me)
count++
if count > 5 {
panic("Peer #%d failed to decode state from persister, abort\n")
}
} | conditional_block |
raft.go | .log[prevLogIndex].Term
leaderCommit := rf.commitIndex
rf.mu.Unlock()
peerNum := len(rf.peers)
for i := 0; i < peerNum; i++ {
if i == rf.me {
continue
}
args := AppendEntriesArgs{term, rf.me, prevLogIndex, prevLogTerm, []Log{}, leaderCommit}
reply := AppendEntriesReply{}
go rf.sendAppendEntries(i, &args, &reply)
}
time.Sleep(HEARTBEAT)
}
}
func (rf *Raft) leader() {
// comes to power, initialize fields
rf.mu.Lock()
rf.state = LEADER
rf.votedFor = -1
peerNum := len(rf.peers)
for i := 0; i < peerNum; i++ {
rf.nextIndex[i] = len(rf.log) // leader last log index + 1
rf.matchIndex[i] = 0
}
rf.mu.Unlock()
go rf.startHeartBeat()
// leader work
for {
if rf.killed() {
return
}
rf.mu.Lock()
if rf.state == FOLLOWER {
rf.mu.Unlock()
go rf.follower()
return
}
// log replication
for server, index := range rf.nextIndex {
if len(rf.log)-1 >= index {
args := AppendEntriesArgs{
rf.currentTerm, rf.me, index - 1,
rf.log[index-1].Term, rf.log[index:],
rf.commitIndex,
}
reply := AppendEntriesReply{}
go rf.sendAppendEntries(server, &args, &reply)
}
}
// commit log entry if possible
for n := len(rf.log) - 1; n > rf.commitIndex; n-- {
if rf.log[n].Term != rf.currentTerm {
continue
}
// a majority of matchIndex[i] >= n
matchNum := 0
for _, index := range rf.matchIndex {
if index >= n {
matchNum++
}
}
if matchNum > len(rf.peers)/2 {
rf.commitIndex = n
rf.applyCond.Broadcast()
break
}
}
rf.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
}
func (rf *Raft) candidate() {
rf.mu.Lock()
rf.currentTerm++
rf.state = CANDIDATE
rf.votedFor = rf.me
rf.persist()
rf.voteCount = 1 // vote for itself
peerNum := len(rf.peers)
term := rf.currentTerm
me := rf.me
lastLogIndex := len(rf.log) - 1
lastLogTerm := rf.log[lastLogIndex].Term
rf.mu.Unlock()
rf.resetTimer()
go rf.startElectionTimer()
// send RequestVote to all peers
for i := 0; i < peerNum; i++ {
if i == me {
continue
}
args := RequestVoteArgs{term, me, lastLogIndex, lastLogTerm}
reply := RequestVoteReply{}
go rf.sendRequestVote(i, &args, &reply)
}
// a candidate continues its state until one of three things happens
// a conditional variable should be used here, but event a) b) and c)
// are triggered by different goroutines, which increases complexity
// therefore busy waiting is used here
for {
if rf.killed() {
return
}
rf.mu.Lock()
if rf.voteCount > peerNum/2 {
// a) the candidate wins and becomes leader
rf.mu.Unlock()
go rf.leader()
break
}
if rf.state == FOLLOWER {
// b) another server establishes itself as leader
rf.mu.Unlock()
go rf.follower()
break
}
if rf.currentTerm > term {
// c) a certain peer has already started a new election
// at this moment, this peer is either running follower() or candidate()
rf.mu.Unlock()
break
}
rf.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
}
// follower's behaviors are mostly handled by RPC handlers
func (rf *Raft) follower() {
go rf.startElectionTimer()
}
// election timeout goroutine periodically checks
// whether the time since the last time heard from the leader is greater than the timeout period.
// If so, start a new election and return
// each time a server becomes a follower or starts an election, start this timer goroutine
func (rf *Raft) startElectionTimer() {
for {
if rf.killed() {
return
}
rf.mu.Lock()
electionTimeout := rf.electionTimeout
lastHeard := rf.lastHeard
rf.mu.Unlock()
now := time.Now()
if now.After(lastHeard.Add(electionTimeout)) {
go rf.candidate()
return
}
time.Sleep(10 * time.Millisecond)
}
} | //
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
// this function can only be called when `rf` holds the lock
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
buf := new(bytes.Buffer)
enc := labgob.NewEncoder(buf)
enc.Encode(rf.currentTerm)
enc.Encode(rf.votedFor)
enc.Encode(rf.log)
data := buf.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
buf := bytes.NewBuffer(data)
de := labgob.NewDecoder(buf)
var currentTerm int
var votedFor int
var log []Log
count := 0
for de.Decode(¤tTerm) != nil || de.Decode(&votedFor) != nil || de.Decode(&log) != nil {
fmt.Fprintf(os.Stderr, "Peer #%d failed to decode state from persister, retrying...\n", rf.me)
count++
if count > 5 {
panic("Peer #%d failed to decode state from persister, abort\n")
}
}
rf.currentTerm = currentTerm
rf.votedFor = votedFor
rf.log = log
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateID int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// Your code here (2A, 2B).
rf.mu.Lock()
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// follow the second rule in "Rules for Servers" in figure 2 before handling an incoming RPC
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = FOLLOWER
rf.votedFor = -1
rf.persist()
}
reply.Term = rf.currentTerm
reply.VoteGranted = true
// deny vote if already voted
if rf.votedFor != -1 {
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// deny vote if consistency check fails (candidate is less up-to-date)
lastLog := rf.log[len(rf.log)-1]
if args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// now this peer must vote for the candidate
rf.votedFor = args.CandidateID
rf.mu.Unlock()
rf.resetTimer()
}
type Append | random_line_split |
|
raft.go | ft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
buf := bytes.NewBuffer(data)
de := labgob.NewDecoder(buf)
var currentTerm int
var votedFor int
var log []Log
count := 0
for de.Decode(¤tTerm) != nil || de.Decode(&votedFor) != nil || de.Decode(&log) != nil {
fmt.Fprintf(os.Stderr, "Peer #%d failed to decode state from persister, retrying...\n", rf.me)
count++
if count > 5 {
panic("Peer #%d failed to decode state from persister, abort\n")
}
}
rf.currentTerm = currentTerm
rf.votedFor = votedFor
rf.log = log
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateID int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// Your code here (2A, 2B).
rf.mu.Lock()
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// follow the second rule in "Rules for Servers" in figure 2 before handling an incoming RPC
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = FOLLOWER
rf.votedFor = -1
rf.persist()
}
reply.Term = rf.currentTerm
reply.VoteGranted = true
// deny vote if already voted
if rf.votedFor != -1 {
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// deny vote if consistency check fails (candidate is less up-to-date)
lastLog := rf.log[len(rf.log)-1]
if args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// now this peer must vote for the candidate
rf.votedFor = args.CandidateID
rf.mu.Unlock()
rf.resetTimer()
}
type AppendEntriesArgs struct {
Term int
LeaderID int
PrevLogIndex int
PrevLogTerm int
Entries []Log
LeaderCommit int
}
type AppendEntriesReply struct {
Term int
Success bool
// for roll back optimization
ConflictTerm int
ConflictIndex int
LogLen int
}
func (rf *Raft) AppendEntries(args *AppendEntriesArgs, reply *AppendEntriesReply) {
rf.mu.Lock()
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.Success = false
rf.mu.Unlock()
return
}
// follow the second rule in "Rules for Servers" in figure 2 before handling an incoming RPC
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = FOLLOWER
rf.votedFor = -1
rf.persist()
}
rf.mu.Unlock()
// now we must have rf.currentTerm == args.Term, which means receiving from leader and reset timer
rf.resetTimer()
rf.mu.Lock()
defer rf.mu.Unlock()
// consistency check
if len(rf.log)-1 < args.PrevLogIndex {
// case 3: follower's log is too short
reply.Term = rf.currentTerm
reply.Success = false
reply.ConflictIndex = 0
reply.ConflictTerm = 0
reply.LogLen = len(rf.log) - 1
return
}
if rf.log[args.PrevLogIndex].Term != args.PrevLogTerm {
// case 1, 2: conflict at entry PrevLogIndex
reply.Term = rf.currentTerm
reply.Success = false
reply.ConflictTerm = rf.log[args.PrevLogIndex].Term
for i := args.PrevLogIndex; i > 0; i-- {
if rf.log[i-1].Term != reply.ConflictTerm {
reply.ConflictIndex = i
break
}
}
return
}
// accept new entries
reply.Term = rf.currentTerm
reply.Success = true
// log replication
if len(args.Entries) == 0 {
return
}
conflictEntry := -1
for i := 0; i < len(args.Entries); i++ {
if len(rf.log)-1 < args.PrevLogIndex+i+1 || args.Entries[i].Term != rf.log[args.PrevLogIndex+i+1].Term {
// existing an entry conflicts with a new one, truncate the log
rf.log = rf.log[:args.PrevLogIndex+i+1]
conflictEntry = i
break
}
}
if conflictEntry != -1 {
// need to append new entries to the log
for i := conflictEntry; i < len(args.Entries); i++ {
rf.log = append(rf.log, args.Entries[i])
}
}
rf.persist() // log has changed
// advance commitIndex if possible
if args.LeaderCommit > rf.commitIndex {
// BUG? index of last new entry == args.PrevLogIndex+len(args.Entries)) based on my comprehension
rf.commitIndex = min(args.LeaderCommit, args.PrevLogIndex+len(args.Entries))
rf.applyCond.Broadcast()
}
}
// I just wonder why `math` package does not provide this simple function
func min(x, y int) int {
if x < y {
return x
}
return y
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) {
if ok := rf.peers[server].Call("Raft.RequestVote", args, reply); !ok {
return
}
rf.mu.Lock()
defer rf.mu.Unlock()
// drop old reply
if reply.Term != args.Term {
return
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.persist()
rf.state = FOLLOWER
return
}
if reply.VoteGranted {
rf.voteCount++
}
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesArgs, reply *AppendEntriesReply) | {
if ok := rf.peers[server].Call("Raft.AppendEntries", args, reply); !ok {
return
}
rf.mu.Lock()
defer rf.mu.Unlock()
// drop old reply
if args.Term != reply.Term {
return
}
if reply.Term > rf.currentTerm {
rf.currentTerm = reply.Term
rf.persist()
rf.state = FOLLOWER
return
}
if reply.Success { | identifier_body |
|
raft.go | [prevLogIndex].Term
leaderCommit := rf.commitIndex
rf.mu.Unlock()
peerNum := len(rf.peers)
for i := 0; i < peerNum; i++ {
if i == rf.me {
continue
}
args := AppendEntriesArgs{term, rf.me, prevLogIndex, prevLogTerm, []Log{}, leaderCommit}
reply := AppendEntriesReply{}
go rf.sendAppendEntries(i, &args, &reply)
}
time.Sleep(HEARTBEAT)
}
}
func (rf *Raft) | () {
// comes to power, initialize fields
rf.mu.Lock()
rf.state = LEADER
rf.votedFor = -1
peerNum := len(rf.peers)
for i := 0; i < peerNum; i++ {
rf.nextIndex[i] = len(rf.log) // leader last log index + 1
rf.matchIndex[i] = 0
}
rf.mu.Unlock()
go rf.startHeartBeat()
// leader work
for {
if rf.killed() {
return
}
rf.mu.Lock()
if rf.state == FOLLOWER {
rf.mu.Unlock()
go rf.follower()
return
}
// log replication
for server, index := range rf.nextIndex {
if len(rf.log)-1 >= index {
args := AppendEntriesArgs{
rf.currentTerm, rf.me, index - 1,
rf.log[index-1].Term, rf.log[index:],
rf.commitIndex,
}
reply := AppendEntriesReply{}
go rf.sendAppendEntries(server, &args, &reply)
}
}
// commit log entry if possible
for n := len(rf.log) - 1; n > rf.commitIndex; n-- {
if rf.log[n].Term != rf.currentTerm {
continue
}
// a majority of matchIndex[i] >= n
matchNum := 0
for _, index := range rf.matchIndex {
if index >= n {
matchNum++
}
}
if matchNum > len(rf.peers)/2 {
rf.commitIndex = n
rf.applyCond.Broadcast()
break
}
}
rf.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
}
func (rf *Raft) candidate() {
rf.mu.Lock()
rf.currentTerm++
rf.state = CANDIDATE
rf.votedFor = rf.me
rf.persist()
rf.voteCount = 1 // vote for itself
peerNum := len(rf.peers)
term := rf.currentTerm
me := rf.me
lastLogIndex := len(rf.log) - 1
lastLogTerm := rf.log[lastLogIndex].Term
rf.mu.Unlock()
rf.resetTimer()
go rf.startElectionTimer()
// send RequestVote to all peers
for i := 0; i < peerNum; i++ {
if i == me {
continue
}
args := RequestVoteArgs{term, me, lastLogIndex, lastLogTerm}
reply := RequestVoteReply{}
go rf.sendRequestVote(i, &args, &reply)
}
// a candidate continues its state until one of three things happens
// a conditional variable should be used here, but event a) b) and c)
// are triggered by different goroutines, which increases complexity
// therefore busy waiting is used here
for {
if rf.killed() {
return
}
rf.mu.Lock()
if rf.voteCount > peerNum/2 {
// a) the candidate wins and becomes leader
rf.mu.Unlock()
go rf.leader()
break
}
if rf.state == FOLLOWER {
// b) another server establishes itself as leader
rf.mu.Unlock()
go rf.follower()
break
}
if rf.currentTerm > term {
// c) a certain peer has already started a new election
// at this moment, this peer is either running follower() or candidate()
rf.mu.Unlock()
break
}
rf.mu.Unlock()
time.Sleep(10 * time.Millisecond)
}
}
// follower's behaviors are mostly handled by RPC handlers
func (rf *Raft) follower() {
go rf.startElectionTimer()
}
// election timeout goroutine periodically checks
// whether the time since the last time heard from the leader is greater than the timeout period.
// If so, start a new election and return
// each time a server becomes a follower or starts an election, start this timer goroutine
func (rf *Raft) startElectionTimer() {
for {
if rf.killed() {
return
}
rf.mu.Lock()
electionTimeout := rf.electionTimeout
lastHeard := rf.lastHeard
rf.mu.Unlock()
now := time.Now()
if now.After(lastHeard.Add(electionTimeout)) {
go rf.candidate()
return
}
time.Sleep(10 * time.Millisecond)
}
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
// this function can only be called when `rf` holds the lock
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
buf := new(bytes.Buffer)
enc := labgob.NewEncoder(buf)
enc.Encode(rf.currentTerm)
enc.Encode(rf.votedFor)
enc.Encode(rf.log)
data := buf.Bytes()
rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
buf := bytes.NewBuffer(data)
de := labgob.NewDecoder(buf)
var currentTerm int
var votedFor int
var log []Log
count := 0
for de.Decode(¤tTerm) != nil || de.Decode(&votedFor) != nil || de.Decode(&log) != nil {
fmt.Fprintf(os.Stderr, "Peer #%d failed to decode state from persister, retrying...\n", rf.me)
count++
if count > 5 {
panic("Peer #%d failed to decode state from persister, abort\n")
}
}
rf.currentTerm = currentTerm
rf.votedFor = votedFor
rf.log = log
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateID int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
//
// example RequestVote RPC handler.
//
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
// Your code here (2A, 2B).
rf.mu.Lock()
if args.Term < rf.currentTerm {
reply.Term = rf.currentTerm
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// follow the second rule in "Rules for Servers" in figure 2 before handling an incoming RPC
if args.Term > rf.currentTerm {
rf.currentTerm = args.Term
rf.state = FOLLOWER
rf.votedFor = -1
rf.persist()
}
reply.Term = rf.currentTerm
reply.VoteGranted = true
// deny vote if already voted
if rf.votedFor != -1 {
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// deny vote if consistency check fails (candidate is less up-to-date)
lastLog := rf.log[len(rf.log)-1]
if args.LastLogTerm < lastLog.Term || (args.LastLogTerm == lastLog.Term && args.LastLogIndex < len(rf.log)-1) {
reply.VoteGranted = false
rf.mu.Unlock()
return
}
// now this peer must vote for the candidate
rf.votedFor = args.CandidateID
rf.mu.Unlock()
rf.resetTimer()
}
type Append | leader | identifier_name |
dashboard.py | os.makedirs(ontology_dir, exist_ok=True)
# Launch the JVM using the robot JAR
py4j.java_gateway.launch_gateway(
jarpath='build/robot.jar', classpath='org.obolibrary.robot.PythonOperation', die_on_exit=True, port=25333)
# Activate gateway to JVM
gateway = JavaGateway()
robot_gateway = gateway.jvm.org.obolibrary.robot
# IOHelper for working with ontologies
io_helper = robot_gateway.IOHelper()
# Handle ontology file
big = namespace in BIG_ONTS
if not big:
# Load ontology as OWLOntology object
if not ontology_file:
ont_or_file = None
try:
ont_or_file = io_helper.loadOntology(ontology_file)
except Exception:
print('ERROR: Unable to load \'{0}\''.format(ontology_fil), flush=True)
ont_or_file = None
# Get the Verison IRI
version_iri = dash_utils.get_version_iri(ont_or_file)
else:
# Just provide path to file
ont_or_file = ontology_file
# Get the version IRI by text parsing
version_iri = dash_utils.get_big_version_iri(ont_or_file)
# Get the registry data
yaml_data = yaml.load(registry, Loader=yaml.SafeLoader)
yaml_data = yaml_data['ontologies']
data = dash_utils.get_data(namespace, yaml_data)
# Map of all ontologies to their domains
domain_map = dash_utils.get_domains(yaml_data)
# Map of RO labels to RO IRIs
ro_props = fp_007.get_ro_properties(ro_file)
if 'is_obsolete' in data and data['is_obsolete'] is 'true':
# do not run on obsolete ontologies
print('{0} is obsolete and will not be checked...'.format(namespace), flush=True)
sys.exit(0)
# ---------------------------- #
# RUN CHECKS
# ---------------------------- #
print('-----------------\nChecking ' + namespace, flush=True)
# Get the report based on if it's big or not
report = None
good_format = None
if big:
if namespace != 'gaz':
# Report currently takes TOO LONG for GAZ
print('Running ROBOT report on {0}...'.format(namespace), flush=True)
report_obj = report_utils.BigReport(robot_gateway, namespace, ont_or_file)
report = report_obj.get_report()
good_format = report_obj.get_good_format()
else:
if ont_or_file:
# Ontology is not None
print('Running ROBOT report on {0}...'.format(namespace), flush=True)
report = report_utils.run_report(robot_gateway, io_helper, ont_or_file)
# Execute the numbered checks
check_map = {}
try:
if big:
check_map[1] = fp_001.big_is_open(ont_or_file, data, license_schema)
else:
check_map[1] = fp_001.is_open(ont_or_file, data, license_schema)
except Exception as e:
check_map[1] = 'INFO|unable to run check 1'
print('ERROR: unable to run check 1 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[2] = fp_002.big_is_common_format(good_format)
else:
check_map[2] = fp_002.is_common_format(ont_or_file)
except Exception as e:
check_map[2] = 'INFO|unable to run check 2'
print('ERROR: unable to run check 2 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[3] = fp_003.big_has_valid_uris(namespace, ont_or_file, ontology_dir)
else:
check_map[3] = fp_003.has_valid_uris(robot_gateway, namespace, ont_or_file, ontology_dir)
except Exception as e:
check_map[3] = 'INFO|unable to run check 3'
print('ERROR: unable to run check 3 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[4] = fp_004.big_has_versioning(ont_or_file)
else:
check_map[4] = fp_004.has_versioning(ont_or_file)
except Exception as e:
check_map[4] = 'INFO|unable to run check 4'
print('ERROR: unable to run check 4 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[5] = fp_005.has_scope(data, domain_map)
except Exception as e:
check_map[5] = 'INFO|unable to run check 5'
print('ERROR: unable to run check 5 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[6] = fp_006.has_valid_definitions(report)
except Exception as e:
check_map[6] = 'INFO|unable to run check 6'
print('ERROR: unable to run check 6 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[7] = fp_007.big_has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)
else:
check_map[7] = fp_007.has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)
except Exception as e:
check_map[7] = 'INFO|unable to run check 7'
print('ERROR: unable to run check 7 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[8] = fp_008.has_documentation(data)
except Exception as e:
check_map[8] = 'INFO|unable to run check 8'
print('ERROR: unable to run check 8 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[9] = fp_009.has_users(data)
except Exception as e:
check_map[9] = 'INFO|unable to run check 9'
print('ERROR: unable to run check 9 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[11] = fp_011.has_contact(data, contact_schema)
except Exception as e:
check_map[11] = 'INFO|unable to run check 11'
print('ERROR: unable to run check 11 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[12] = fp_012.has_valid_labels(report)
except Exception as e:
check_map[12] = 'INFO|unable to run check 12'
print('ERROR: unable to run check 12 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[16] = fp_016.big_is_maintained(ont_or_file)
else:
check_map[16] = fp_016.is_maintained(ont_or_file)
except Exception as e:
check_map[16] = 'INFO|unable to run check 16'
print('ERROR: unable to run check 16 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
# finally, add the report results to the dashboard and save the report
try:
check_map['report'] = report_utils.process_report(robot_gateway, report, ontology_dir)
except Exception as e:
check_map['report'] = 'INFO|unable to save report'
print('ERROR: unable to save ROBOT report for {0}\nCAUSE:\n{1 | parser = ArgumentParser(description='Create dashboard files')
parser.add_argument('ontology', type=str, help='Input ontology file')
parser.add_argument('registry', type=FileType('r'), help='Registry YAML file')
parser.add_argument('license', type=FileType('r'), help='License JSON schema')
parser.add_argument('contact', type=FileType('r'), help='Contact JSON schema')
parser.add_argument('relations', type=FileType('r'), help='Table containing RO IRIs and labels')
parser.add_argument('outdir', type=str, help='Output directory')
args = parser.parse_args()
owl = os.path.basename(args.ontology)
namespace = os.path.splitext(owl)[0]
ontology_file = args.ontology
registry = args.registry
license_schema = json.load(args.license)
contact_schema = json.load(args.contact)
ro_file = args.relations
# Create the build directory for this ontology
ontology_dir = args.outdir | identifier_body |
|
dashboard.py | if 'is_obsolete' in data and data['is_obsolete'] is 'true':
# do not run on obsolete ontologies
print('{0} is obsolete and will not be checked...'.format(namespace), flush=True)
sys.exit(0)
# ---------------------------- #
# RUN CHECKS
# ---------------------------- #
print('-----------------\nChecking ' + namespace, flush=True)
# Get the report based on if it's big or not
report = None
good_format = None
if big:
if namespace != 'gaz':
# Report currently takes TOO LONG for GAZ
print('Running ROBOT report on {0}...'.format(namespace), flush=True)
report_obj = report_utils.BigReport(robot_gateway, namespace, ont_or_file)
report = report_obj.get_report()
good_format = report_obj.get_good_format()
else:
if ont_or_file:
# Ontology is not None
print('Running ROBOT report on {0}...'.format(namespace), flush=True)
report = report_utils.run_report(robot_gateway, io_helper, ont_or_file)
# Execute the numbered checks
check_map = {}
try:
if big:
check_map[1] = fp_001.big_is_open(ont_or_file, data, license_schema)
else:
check_map[1] = fp_001.is_open(ont_or_file, data, license_schema)
except Exception as e:
check_map[1] = 'INFO|unable to run check 1'
print('ERROR: unable to run check 1 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[2] = fp_002.big_is_common_format(good_format)
else:
check_map[2] = fp_002.is_common_format(ont_or_file)
except Exception as e:
check_map[2] = 'INFO|unable to run check 2'
print('ERROR: unable to run check 2 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[3] = fp_003.big_has_valid_uris(namespace, ont_or_file, ontology_dir)
else:
check_map[3] = fp_003.has_valid_uris(robot_gateway, namespace, ont_or_file, ontology_dir)
except Exception as e:
check_map[3] = 'INFO|unable to run check 3'
print('ERROR: unable to run check 3 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[4] = fp_004.big_has_versioning(ont_or_file)
else:
check_map[4] = fp_004.has_versioning(ont_or_file)
except Exception as e:
check_map[4] = 'INFO|unable to run check 4'
print('ERROR: unable to run check 4 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[5] = fp_005.has_scope(data, domain_map)
except Exception as e:
check_map[5] = 'INFO|unable to run check 5'
print('ERROR: unable to run check 5 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[6] = fp_006.has_valid_definitions(report)
except Exception as e:
check_map[6] = 'INFO|unable to run check 6'
print('ERROR: unable to run check 6 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[7] = fp_007.big_has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)
else:
check_map[7] = fp_007.has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)
except Exception as e:
check_map[7] = 'INFO|unable to run check 7'
print('ERROR: unable to run check 7 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[8] = fp_008.has_documentation(data)
except Exception as e:
check_map[8] = 'INFO|unable to run check 8'
print('ERROR: unable to run check 8 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[9] = fp_009.has_users(data)
except Exception as e:
check_map[9] = 'INFO|unable to run check 9'
print('ERROR: unable to run check 9 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[11] = fp_011.has_contact(data, contact_schema)
except Exception as e:
check_map[11] = 'INFO|unable to run check 11'
print('ERROR: unable to run check 11 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[12] = fp_012.has_valid_labels(report)
except Exception as e:
check_map[12] = 'INFO|unable to run check 12'
print('ERROR: unable to run check 12 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[16] = fp_016.big_is_maintained(ont_or_file)
else:
check_map[16] = fp_016.is_maintained(ont_or_file)
except Exception as e:
check_map[16] = 'INFO|unable to run check 16'
print('ERROR: unable to run check 16 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
# finally, add the report results to the dashboard and save the report
try:
check_map['report'] = report_utils.process_report(robot_gateway, report, ontology_dir)
except Exception as e:
check_map['report'] = 'INFO|unable to save report'
print('ERROR: unable to save ROBOT report for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
# ---------------------------- #
# SAVE RESULTS
# ---------------------------- #
# Parse results
err = 0
warn = 0
info = 0
all_checks = {}
for check, result in check_map.items():
if result is None or 'status' not in result:
print('Missing result for check {0}'.format(check), flush=True)
continue
status = result['status']
if status == 'ERROR':
err += 1
elif status == 'WARN':
warn += 1
elif status == 'INFO':
info += 1
elif status != 'PASS':
print('Unknown status "{0}" for check {1}'.format(status, check), flush=True)
continue
key = check
if check in PRINCIPLE_MAP:
key = PRINCIPLE_MAP[check]
elif check == 'report':
key = 'ROBOT Report'
all_checks[key] = result
# Summary status
if err > 0:
summary = 'ERROR'
summary_comment = '{0} errors'.format(err)
elif warn > 0:
summary = 'WARN'
summary_comment = '{0} warnings'.format(warn)
elif info > 0:
summary = 'INFO'
summary_comment = '{0} info messages'.format(info)
else:
summary = 'PASS'
summary_comment = ''
date = datetime.datetime.today()
save_data = {'namespace': namespace, 'version': version_iri, 'date': date.strftime('%Y-%m-%d'),
'summary': {'status': summary, 'comment': summary_comment}, 'results': all_checks}
# Save to YAML file
outfile = os.path.join(ontology_dir, 'dashboard.yml')
print('Saving results to {0}'.format(outfile))
with open(outfile, 'w+') as f:
yaml.dump(save_data, f)
sys.exit(0)
BIG_ONTS = ['bto', 'chebi', 'dron', 'gaz', 'ncbitaxon', 'ncit', 'pr', 'uberon']
OBO = 'http://purl.obolibrary.org/obo'
PRINCIPLE_MAP = {
1: 'FP1 Open',
2: 'FP2 Common Format',
3: 'FP3 URIs',
4: 'FP4 Versioning', | 5: 'FP5 Scope',
6: 'FP6 Textual Definitions',
7: 'FP7 Relations',
8: 'FP8 Documented',
9: 'FP9 Plurality of Users', | random_line_split |
|
dashboard.py | Helper for working with ontologies
io_helper = robot_gateway.IOHelper()
# Handle ontology file
big = namespace in BIG_ONTS
if not big:
# Load ontology as OWLOntology object
if not ontology_file:
ont_or_file = None
try:
ont_or_file = io_helper.loadOntology(ontology_file)
except Exception:
print('ERROR: Unable to load \'{0}\''.format(ontology_fil), flush=True)
ont_or_file = None
# Get the Verison IRI
version_iri = dash_utils.get_version_iri(ont_or_file)
else:
# Just provide path to file
ont_or_file = ontology_file
# Get the version IRI by text parsing
version_iri = dash_utils.get_big_version_iri(ont_or_file)
# Get the registry data
yaml_data = yaml.load(registry, Loader=yaml.SafeLoader)
yaml_data = yaml_data['ontologies']
data = dash_utils.get_data(namespace, yaml_data)
# Map of all ontologies to their domains
domain_map = dash_utils.get_domains(yaml_data)
# Map of RO labels to RO IRIs
ro_props = fp_007.get_ro_properties(ro_file)
if 'is_obsolete' in data and data['is_obsolete'] is 'true':
# do not run on obsolete ontologies
print('{0} is obsolete and will not be checked...'.format(namespace), flush=True)
sys.exit(0)
# ---------------------------- #
# RUN CHECKS
# ---------------------------- #
print('-----------------\nChecking ' + namespace, flush=True)
# Get the report based on if it's big or not
report = None
good_format = None
if big:
if namespace != 'gaz':
# Report currently takes TOO LONG for GAZ
print('Running ROBOT report on {0}...'.format(namespace), flush=True)
report_obj = report_utils.BigReport(robot_gateway, namespace, ont_or_file)
report = report_obj.get_report()
good_format = report_obj.get_good_format()
else:
if ont_or_file:
# Ontology is not None
print('Running ROBOT report on {0}...'.format(namespace), flush=True)
report = report_utils.run_report(robot_gateway, io_helper, ont_or_file)
# Execute the numbered checks
check_map = {}
try:
if big:
check_map[1] = fp_001.big_is_open(ont_or_file, data, license_schema)
else:
check_map[1] = fp_001.is_open(ont_or_file, data, license_schema)
except Exception as e:
check_map[1] = 'INFO|unable to run check 1'
print('ERROR: unable to run check 1 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[2] = fp_002.big_is_common_format(good_format)
else:
check_map[2] = fp_002.is_common_format(ont_or_file)
except Exception as e:
check_map[2] = 'INFO|unable to run check 2'
print('ERROR: unable to run check 2 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[3] = fp_003.big_has_valid_uris(namespace, ont_or_file, ontology_dir)
else:
check_map[3] = fp_003.has_valid_uris(robot_gateway, namespace, ont_or_file, ontology_dir)
except Exception as e:
check_map[3] = 'INFO|unable to run check 3'
print('ERROR: unable to run check 3 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[4] = fp_004.big_has_versioning(ont_or_file)
else:
check_map[4] = fp_004.has_versioning(ont_or_file)
except Exception as e:
check_map[4] = 'INFO|unable to run check 4'
print('ERROR: unable to run check 4 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[5] = fp_005.has_scope(data, domain_map)
except Exception as e:
check_map[5] = 'INFO|unable to run check 5'
print('ERROR: unable to run check 5 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[6] = fp_006.has_valid_definitions(report)
except Exception as e:
check_map[6] = 'INFO|unable to run check 6'
print('ERROR: unable to run check 6 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[7] = fp_007.big_has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)
else:
check_map[7] = fp_007.has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)
except Exception as e:
check_map[7] = 'INFO|unable to run check 7'
print('ERROR: unable to run check 7 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[8] = fp_008.has_documentation(data)
except Exception as e:
check_map[8] = 'INFO|unable to run check 8'
print('ERROR: unable to run check 8 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[9] = fp_009.has_users(data)
except Exception as e:
check_map[9] = 'INFO|unable to run check 9'
print('ERROR: unable to run check 9 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[11] = fp_011.has_contact(data, contact_schema)
except Exception as e:
check_map[11] = 'INFO|unable to run check 11'
print('ERROR: unable to run check 11 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[12] = fp_012.has_valid_labels(report)
except Exception as e:
check_map[12] = 'INFO|unable to run check 12'
print('ERROR: unable to run check 12 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[16] = fp_016.big_is_maintained(ont_or_file)
else:
check_map[16] = fp_016.is_maintained(ont_or_file)
except Exception as e:
check_map[16] = 'INFO|unable to run check 16'
print('ERROR: unable to run check 16 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
# finally, add the report results to the dashboard and save the report
try:
check_map['report'] = report_utils.process_report(robot_gateway, report, ontology_dir)
except Exception as e:
check_map['report'] = 'INFO|unable to save report'
print('ERROR: unable to save ROBOT report for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
# ---------------------------- #
# SAVE RESULTS
# ---------------------------- #
# Parse results
err = 0
warn = 0
info = 0
all_checks = {}
for check, result in check_map.items():
if result is None or 'status' not in result:
print('Missing result for check {0}'.format(check), flush=True)
continue
status = result['status']
if status == 'ERROR':
err += 1
elif status == 'WARN':
warn += 1
elif status == 'INFO':
info += 1
elif status != 'PASS':
print('Unknown status "{0}" for check {1}'.format(status, check), flush=True)
continue
key = check
if check in PRINCIPLE_MAP:
key = PRINCIPLE_MAP[check]
elif check == 'report':
key = 'ROBOT Report'
all_checks[key] = result
# Summary status
if err > 0:
summary = 'ERROR'
summary_comment = '{0} errors'.format(err)
elif warn > 0:
summary = 'WARN'
summary_comment = '{0} warnings'.format(warn)
elif info > 0:
| summary = 'INFO'
summary_comment = '{0} info messages'.format(info) | conditional_block |
|
dashboard.py | ():
# ---------------------------- #
# PREPARE INPUT
# ---------------------------- #
# parse input args
parser = ArgumentParser(description='Create dashboard files')
parser.add_argument('ontology', type=str, help='Input ontology file')
parser.add_argument('registry', type=FileType('r'), help='Registry YAML file')
parser.add_argument('license', type=FileType('r'), help='License JSON schema')
parser.add_argument('contact', type=FileType('r'), help='Contact JSON schema')
parser.add_argument('relations', type=FileType('r'), help='Table containing RO IRIs and labels')
parser.add_argument('outdir', type=str, help='Output directory')
args = parser.parse_args()
owl = os.path.basename(args.ontology)
namespace = os.path.splitext(owl)[0]
ontology_file = args.ontology
registry = args.registry
license_schema = json.load(args.license)
contact_schema = json.load(args.contact)
ro_file = args.relations
# Create the build directory for this ontology
ontology_dir = args.outdir
os.makedirs(ontology_dir, exist_ok=True)
# Launch the JVM using the robot JAR
py4j.java_gateway.launch_gateway(
jarpath='build/robot.jar', classpath='org.obolibrary.robot.PythonOperation', die_on_exit=True, port=25333)
# Activate gateway to JVM
gateway = JavaGateway()
robot_gateway = gateway.jvm.org.obolibrary.robot
# IOHelper for working with ontologies
io_helper = robot_gateway.IOHelper()
# Handle ontology file
big = namespace in BIG_ONTS
if not big:
# Load ontology as OWLOntology object
if not ontology_file:
ont_or_file = None
try:
ont_or_file = io_helper.loadOntology(ontology_file)
except Exception:
print('ERROR: Unable to load \'{0}\''.format(ontology_fil), flush=True)
ont_or_file = None
# Get the Verison IRI
version_iri = dash_utils.get_version_iri(ont_or_file)
else:
# Just provide path to file
ont_or_file = ontology_file
# Get the version IRI by text parsing
version_iri = dash_utils.get_big_version_iri(ont_or_file)
# Get the registry data
yaml_data = yaml.load(registry, Loader=yaml.SafeLoader)
yaml_data = yaml_data['ontologies']
data = dash_utils.get_data(namespace, yaml_data)
# Map of all ontologies to their domains
domain_map = dash_utils.get_domains(yaml_data)
# Map of RO labels to RO IRIs
ro_props = fp_007.get_ro_properties(ro_file)
if 'is_obsolete' in data and data['is_obsolete'] is 'true':
# do not run on obsolete ontologies
print('{0} is obsolete and will not be checked...'.format(namespace), flush=True)
sys.exit(0)
# ---------------------------- #
# RUN CHECKS
# ---------------------------- #
print('-----------------\nChecking ' + namespace, flush=True)
# Get the report based on if it's big or not
report = None
good_format = None
if big:
if namespace != 'gaz':
# Report currently takes TOO LONG for GAZ
print('Running ROBOT report on {0}...'.format(namespace), flush=True)
report_obj = report_utils.BigReport(robot_gateway, namespace, ont_or_file)
report = report_obj.get_report()
good_format = report_obj.get_good_format()
else:
if ont_or_file:
# Ontology is not None
print('Running ROBOT report on {0}...'.format(namespace), flush=True)
report = report_utils.run_report(robot_gateway, io_helper, ont_or_file)
# Execute the numbered checks
check_map = {}
try:
if big:
check_map[1] = fp_001.big_is_open(ont_or_file, data, license_schema)
else:
check_map[1] = fp_001.is_open(ont_or_file, data, license_schema)
except Exception as e:
check_map[1] = 'INFO|unable to run check 1'
print('ERROR: unable to run check 1 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[2] = fp_002.big_is_common_format(good_format)
else:
check_map[2] = fp_002.is_common_format(ont_or_file)
except Exception as e:
check_map[2] = 'INFO|unable to run check 2'
print('ERROR: unable to run check 2 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[3] = fp_003.big_has_valid_uris(namespace, ont_or_file, ontology_dir)
else:
check_map[3] = fp_003.has_valid_uris(robot_gateway, namespace, ont_or_file, ontology_dir)
except Exception as e:
check_map[3] = 'INFO|unable to run check 3'
print('ERROR: unable to run check 3 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[4] = fp_004.big_has_versioning(ont_or_file)
else:
check_map[4] = fp_004.has_versioning(ont_or_file)
except Exception as e:
check_map[4] = 'INFO|unable to run check 4'
print('ERROR: unable to run check 4 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[5] = fp_005.has_scope(data, domain_map)
except Exception as e:
check_map[5] = 'INFO|unable to run check 5'
print('ERROR: unable to run check 5 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[6] = fp_006.has_valid_definitions(report)
except Exception as e:
check_map[6] = 'INFO|unable to run check 6'
print('ERROR: unable to run check 6 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[7] = fp_007.big_has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)
else:
check_map[7] = fp_007.has_valid_relations(namespace, ont_or_file, ro_props, ontology_dir)
except Exception as e:
check_map[7] = 'INFO|unable to run check 7'
print('ERROR: unable to run check 7 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[8] = fp_008.has_documentation(data)
except Exception as e:
check_map[8] = 'INFO|unable to run check 8'
print('ERROR: unable to run check 8 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[9] = fp_009.has_users(data)
except Exception as e:
check_map[9] = 'INFO|unable to run check 9'
print('ERROR: unable to run check 9 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[11] = fp_011.has_contact(data, contact_schema)
except Exception as e:
check_map[11] = 'INFO|unable to run check 11'
print('ERROR: unable to run check 11 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
check_map[12] = fp_012.has_valid_labels(report)
except Exception as e:
check_map[12] = 'INFO|unable to run check 12'
print('ERROR: unable to run check 12 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
try:
if big:
check_map[16] = fp_016.big_is_maintained(ont_or_file)
else:
check_map[16] = fp_016.is_maintained(ont_or_file)
except Exception as e:
check_map[16] = 'INFO|unable to run check 16'
print('ERROR: unable to run check 16 for {0}\nCAUSE:\n{1}'.format(namespace, str(e)), flush=True)
# finally, add the report results to the dashboard and save the report
try:
check_map['report'] = report_utils.process_report(robot_gateway, report, ontology_dir)
except Exception as e:
check_map['report'] = 'INFO| | run | identifier_name |
|
bench.rs | )]
pub struct TestBodySummary {
pub name: String,
pub summary: Summary,
}
/// The outcome of a test
#[derive(Clone)]
struct TestResult {
name: String,
grand_summary: Summary,
bodies_summary: Vec<TestBodySummary>,
}
/// The outcome of a test, without the name of the test
pub struct AnonymousTestResult {
pub grand_summary: Summary,
pub bodies_summary: Vec<TestBodySummary>,
}
impl Default for AnonymousTestResult {
fn default() -> Self {
Self {
grand_summary: Summary::new(&[0.0]),
bodies_summary: vec![],
}
}
}
impl From<TestResult> for AnonymousTestResult {
fn from(test_result: TestResult) -> Self {
AnonymousTestResult {
grand_summary: test_result.grand_summary,
bodies_summary: test_result.bodies_summary,
}
}
}
/// Environment for a single test
#[derive(Clone)]
struct TestBodiesBench {
precision: Precision,
ctx: TestCtx,
bodies: Vec<unsafe extern "C" fn(TestCtx)>,
}
#[derive(Default, Debug, Clone)]
pub struct Sample<T>(Vec<T>);
impl<T> Sample<T> {
pub fn empty() -> Self {
Sample(vec![])
}
}
pub trait Runnable<Ret> {
fn setup(&mut self) {}
fn teardown(&mut self) {}
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> Ret + 't>>;
}
impl TestBodiesBench {
#[inline]
fn body(&self, body_id: usize) {
unsafe { (self.bodies[body_id])(self.ctx) }
}
}
impl Runnable<()> for TestBodiesBench {
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> () + 't>> {
let mut fns: Vec<Box<dyn Fn(usize) -> () + 't>> = vec![];
for _ in 0..self.bodies.len() {
let this = self.clone();
fns.push(Box::new(move |body_id| this.body(body_id)))
}
fns
}
}
pub struct AdaptiveRunner {
round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: Precision,
}
#[derive(Clone)]
pub struct RunnerResult {
pub summaries: Vec<Summary>,
pub grand_summary: Summary,
}
impl AdaptiveRunner {
pub fn new(
initial_round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: &Precision,
) -> Self {
AdaptiveRunner {
round_size: initial_round_size,
min_sample_size,
min_run_time_ms,
max_run_time_ms,
precision: precision.clone(),
}
}
pub fn bench<Target, Ret>(&self, target: &mut Target) -> RunnerResult
where
Target: Runnable<Ret>,
{
let mut sample_for_all_bodies: Sample<Elapsed> = Sample::empty();
let mut samples: Vec<Sample<Elapsed>> = vec![];
let bodies = target.bodies();
samples.resize(bodies.len(), Sample::empty());
let mut round_size = self.round_size;
let ts_bench_start = self.precision.now();
let mut sample_id = 0;
loop {
let mut elapsed_vec: Vec<Elapsed> = vec![];
elapsed_vec.resize(bodies.len(), Elapsed::new());
for _ in 0..round_size {
for (body_id, body) in bodies.iter().enumerate() {
let ts_start = self.precision.now();
body(body_id);
let ts_end = self.precision.now();
elapsed_vec[body_id] += ts_end - ts_start;
}
}
let mut elapsed_for_all_bodies = Elapsed::new();
for (body_id, elapsed) in elapsed_vec.into_iter().enumerate() {
samples[body_id]
.0
.push(Elapsed::from_ticks(elapsed.ticks() / round_size as u64));
elapsed_for_all_bodies += elapsed;
}
sample_for_all_bodies.0.push(Elapsed::from_ticks(
elapsed_for_all_bodies.ticks() / round_size as u64,
));
let elapsed_total = (self.precision.now() - ts_bench_start).as_millis(&self.precision);
if elapsed_total < self.min_run_time_ms {
round_size = round_size.saturating_add(round_size);
continue;
}
if elapsed_total > self.max_run_time_ms {
break;
}
sample_id += 1;
if sample_id >= self.min_sample_size {
break;
}
}
let summaries: Vec<_> = samples
.into_iter()
.map(|sample| {
Summary::new(
sample
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
)
})
.collect();
let grand_summary = Summary::new(
sample_for_all_bodies
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
);
RunnerResult {
summaries,
grand_summary,
}
}
}
/// Run an individual test
fn run_test(
config: &Config,
precision: &Precision,
global_ctx: TestCtx,
test: &Test,
) -> Result<TestResult, BenchError> {
let mut ctx: TestCtx = ptr::null_mut();
if let Some(setup) = (*test).setup_fn {
unsafe { setup(global_ctx, &mut ctx) }
}
let bench_runner = AdaptiveRunner::new(
config
.initial_round_size
.unwrap_or(DEFAULT_INITIAL_ROUND_SIZE),
config.min_sample_size.unwrap_or(DEFAULT_MIN_SAMPLE_SIZE),
config.min_run_time_ms.unwrap_or(DEFAULT_MIN_RUN_TIME_MS),
config.max_run_time_ms.unwrap_or(DEFAULT_MAX_RUN_TIME_MS),
precision,
);
let mut test_bodies_bench = TestBodiesBench {
precision: precision.clone(),
ctx,
bodies: (*test)
.bodies
.clone()
.iter()
.map(|body| body.body_fn)
.collect(),
};
let bench_result = bench_runner.bench(&mut test_bodies_bench);
let mut bodies_summary = vec![];
for (body_id, body) in (*test).bodies.iter().enumerate() {
let test_body_summary = TestBodySummary {
name: body.name.clone(),
summary: bench_result.summaries[body_id].clone(),
};
bodies_summary.push(test_body_summary);
}
unsafe { (*test).teardown_fn.map(|teardown_fn| teardown_fn(ctx)) };
let grand_summary = bench_result.grand_summary;
let name = test.name.clone();
Ok(TestResult {
name,
grand_summary,
bodies_summary,
})
}
/// Run a sequence of tests
fn run_tests(
config: &Config,
global_ctx: TestCtx,
tests: Vec<Test>,
) -> Result<Vec<TestResult>, BenchError> {
let mut test_results: Vec<TestResult> = vec![];
let precision = Precision::new(precision::Config::default())?;
for test in tests {
eprintln!(" - {}", test.name);
let test_result = run_test(config, &precision, global_ctx, &test)?;
test_results.push(test_result);
}
Ok(test_results)
}
#[cfg(unix)]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
rtld_lazy: bool,
rtld_global: bool,
) -> Result<Library, BenchError> {
let mut flags = 0;
if rtld_lazy {
flags |= RTLD_LAZY;
} else {
flags |= RTLD_NOW;
}
if rtld_global {
flags |= RTLD_GLOBAL;
} else {
flags |= RTLD_LOCAL;
}
let library = libloading::os::unix::Library::open(Some(library_path), flags)?.into();
Ok(library)
}
#[cfg(not(unix))]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
_rtld_lazy: bool,
_rtld_global: bool,
) -> Result<Library, BenchError> {
Ok(Library::new(library_path)?)
}
/// Bench all functions contained in a shared library
fn bench_library(config: &Config, library_path: &Path) -> Result<Vec<TestResult>, BenchError> {
let tests_symbols = symbols::extract_tests_symbols(library_path)?;
let library = load_library(library_path, false, true)?;
let tests_runner: Symbol<'_, &TestsConfig> =
unsafe { library.get(TEST_LIBRARIES_TABLE_SYMBOL) }.map_err(BenchError::from)?;
if tests_runner.version != TEST_ABI_VERSION {
return Err(BenchError::ABIError("Incompatible ABI version"));
}
let tests = symbols::resolve(&tests_symbols, &library);
let mut global_ctx: TestCtx = ptr::null_mut();
if let Some(global_setup) = tests_runner.global_setup | {
unsafe { global_setup(&mut global_ctx) }
} | conditional_block |
|
bench.rs | unsafe extern "C" fn(*mut TestCtx)>,
global_teardown: Option<unsafe extern "C" fn(TestCtx)>,
version: u64,
}
/// A named test body function
#[derive(Clone, Debug)]
pub struct TestBody {
pub name: String,
pub body_fn: TestBodyFn,
}
/// An individual test, with function pointers for each step
#[derive(Clone, Debug)]
pub struct Test {
pub name: String,
pub setup_fn: Option<TestSetupFn>,
pub bodies: Vec<TestBody>,
pub teardown_fn: Option<TestTeardownFn>,
}
/// Measurements for a "body" of a test
#[derive(Clone)]
pub struct TestBodySummary {
pub name: String,
pub summary: Summary,
}
/// The outcome of a test
#[derive(Clone)]
struct TestResult {
name: String,
grand_summary: Summary,
bodies_summary: Vec<TestBodySummary>,
}
/// The outcome of a test, without the name of the test
pub struct | {
pub grand_summary: Summary,
pub bodies_summary: Vec<TestBodySummary>,
}
impl Default for AnonymousTestResult {
fn default() -> Self {
Self {
grand_summary: Summary::new(&[0.0]),
bodies_summary: vec![],
}
}
}
impl From<TestResult> for AnonymousTestResult {
fn from(test_result: TestResult) -> Self {
AnonymousTestResult {
grand_summary: test_result.grand_summary,
bodies_summary: test_result.bodies_summary,
}
}
}
/// Environment for a single test
#[derive(Clone)]
struct TestBodiesBench {
precision: Precision,
ctx: TestCtx,
bodies: Vec<unsafe extern "C" fn(TestCtx)>,
}
#[derive(Default, Debug, Clone)]
pub struct Sample<T>(Vec<T>);
impl<T> Sample<T> {
pub fn empty() -> Self {
Sample(vec![])
}
}
pub trait Runnable<Ret> {
fn setup(&mut self) {}
fn teardown(&mut self) {}
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> Ret + 't>>;
}
impl TestBodiesBench {
#[inline]
fn body(&self, body_id: usize) {
unsafe { (self.bodies[body_id])(self.ctx) }
}
}
impl Runnable<()> for TestBodiesBench {
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> () + 't>> {
let mut fns: Vec<Box<dyn Fn(usize) -> () + 't>> = vec![];
for _ in 0..self.bodies.len() {
let this = self.clone();
fns.push(Box::new(move |body_id| this.body(body_id)))
}
fns
}
}
pub struct AdaptiveRunner {
round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: Precision,
}
#[derive(Clone)]
pub struct RunnerResult {
pub summaries: Vec<Summary>,
pub grand_summary: Summary,
}
impl AdaptiveRunner {
pub fn new(
initial_round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: &Precision,
) -> Self {
AdaptiveRunner {
round_size: initial_round_size,
min_sample_size,
min_run_time_ms,
max_run_time_ms,
precision: precision.clone(),
}
}
pub fn bench<Target, Ret>(&self, target: &mut Target) -> RunnerResult
where
Target: Runnable<Ret>,
{
let mut sample_for_all_bodies: Sample<Elapsed> = Sample::empty();
let mut samples: Vec<Sample<Elapsed>> = vec![];
let bodies = target.bodies();
samples.resize(bodies.len(), Sample::empty());
let mut round_size = self.round_size;
let ts_bench_start = self.precision.now();
let mut sample_id = 0;
loop {
let mut elapsed_vec: Vec<Elapsed> = vec![];
elapsed_vec.resize(bodies.len(), Elapsed::new());
for _ in 0..round_size {
for (body_id, body) in bodies.iter().enumerate() {
let ts_start = self.precision.now();
body(body_id);
let ts_end = self.precision.now();
elapsed_vec[body_id] += ts_end - ts_start;
}
}
let mut elapsed_for_all_bodies = Elapsed::new();
for (body_id, elapsed) in elapsed_vec.into_iter().enumerate() {
samples[body_id]
.0
.push(Elapsed::from_ticks(elapsed.ticks() / round_size as u64));
elapsed_for_all_bodies += elapsed;
}
sample_for_all_bodies.0.push(Elapsed::from_ticks(
elapsed_for_all_bodies.ticks() / round_size as u64,
));
let elapsed_total = (self.precision.now() - ts_bench_start).as_millis(&self.precision);
if elapsed_total < self.min_run_time_ms {
round_size = round_size.saturating_add(round_size);
continue;
}
if elapsed_total > self.max_run_time_ms {
break;
}
sample_id += 1;
if sample_id >= self.min_sample_size {
break;
}
}
let summaries: Vec<_> = samples
.into_iter()
.map(|sample| {
Summary::new(
sample
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
)
})
.collect();
let grand_summary = Summary::new(
sample_for_all_bodies
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
);
RunnerResult {
summaries,
grand_summary,
}
}
}
/// Run an individual test
fn run_test(
config: &Config,
precision: &Precision,
global_ctx: TestCtx,
test: &Test,
) -> Result<TestResult, BenchError> {
let mut ctx: TestCtx = ptr::null_mut();
if let Some(setup) = (*test).setup_fn {
unsafe { setup(global_ctx, &mut ctx) }
}
let bench_runner = AdaptiveRunner::new(
config
.initial_round_size
.unwrap_or(DEFAULT_INITIAL_ROUND_SIZE),
config.min_sample_size.unwrap_or(DEFAULT_MIN_SAMPLE_SIZE),
config.min_run_time_ms.unwrap_or(DEFAULT_MIN_RUN_TIME_MS),
config.max_run_time_ms.unwrap_or(DEFAULT_MAX_RUN_TIME_MS),
precision,
);
let mut test_bodies_bench = TestBodiesBench {
precision: precision.clone(),
ctx,
bodies: (*test)
.bodies
.clone()
.iter()
.map(|body| body.body_fn)
.collect(),
};
let bench_result = bench_runner.bench(&mut test_bodies_bench);
let mut bodies_summary = vec![];
for (body_id, body) in (*test).bodies.iter().enumerate() {
let test_body_summary = TestBodySummary {
name: body.name.clone(),
summary: bench_result.summaries[body_id].clone(),
};
bodies_summary.push(test_body_summary);
}
unsafe { (*test).teardown_fn.map(|teardown_fn| teardown_fn(ctx)) };
let grand_summary = bench_result.grand_summary;
let name = test.name.clone();
Ok(TestResult {
name,
grand_summary,
bodies_summary,
})
}
/// Run a sequence of tests
fn run_tests(
config: &Config,
global_ctx: TestCtx,
tests: Vec<Test>,
) -> Result<Vec<TestResult>, BenchError> {
let mut test_results: Vec<TestResult> = vec![];
let precision = Precision::new(precision::Config::default())?;
for test in tests {
eprintln!(" - {}", test.name);
let test_result = run_test(config, &precision, global_ctx, &test)?;
test_results.push(test_result);
}
Ok(test_results)
}
#[cfg(unix)]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
rtld_lazy: bool,
rtld_global: bool,
) -> Result<Library, BenchError> {
let mut flags = 0;
if rtld_lazy {
flags |= RTLD_LAZY;
} else {
flags |= RTLD_NOW;
}
if rtld_global {
flags |= RTLD_GLOBAL;
} else {
flags |= RTLD_LOCAL;
}
let library = libloading::os::unix::Library::open(Some(library_path), flags)?.into();
Ok(library)
}
#[cfg(not(unix))]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
_rtld_lazy: bool,
_rtld_global: bool,
) -> Result<Library, BenchError> {
Ok(Library::new(library_path)?)
}
/// Bench all functions contained in a shared library
fn bench_library(config: &Config, library_path: &Path) -> Result<Vec<TestResult>, BenchError | AnonymousTestResult | identifier_name |
bench.rs | setup(&mut self) {}
fn teardown(&mut self) {}
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> Ret + 't>>;
}
impl TestBodiesBench {
#[inline]
fn body(&self, body_id: usize) {
unsafe { (self.bodies[body_id])(self.ctx) }
}
}
impl Runnable<()> for TestBodiesBench {
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> () + 't>> {
let mut fns: Vec<Box<dyn Fn(usize) -> () + 't>> = vec![];
for _ in 0..self.bodies.len() {
let this = self.clone();
fns.push(Box::new(move |body_id| this.body(body_id)))
}
fns
}
}
pub struct AdaptiveRunner {
round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: Precision,
}
#[derive(Clone)]
pub struct RunnerResult {
pub summaries: Vec<Summary>,
pub grand_summary: Summary,
}
impl AdaptiveRunner {
pub fn new(
initial_round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: &Precision,
) -> Self {
AdaptiveRunner {
round_size: initial_round_size,
min_sample_size,
min_run_time_ms,
max_run_time_ms,
precision: precision.clone(),
}
}
pub fn bench<Target, Ret>(&self, target: &mut Target) -> RunnerResult
where
Target: Runnable<Ret>,
{
let mut sample_for_all_bodies: Sample<Elapsed> = Sample::empty();
let mut samples: Vec<Sample<Elapsed>> = vec![];
let bodies = target.bodies();
samples.resize(bodies.len(), Sample::empty());
let mut round_size = self.round_size;
let ts_bench_start = self.precision.now();
let mut sample_id = 0;
loop {
let mut elapsed_vec: Vec<Elapsed> = vec![];
elapsed_vec.resize(bodies.len(), Elapsed::new());
for _ in 0..round_size {
for (body_id, body) in bodies.iter().enumerate() {
let ts_start = self.precision.now();
body(body_id);
let ts_end = self.precision.now();
elapsed_vec[body_id] += ts_end - ts_start;
}
}
let mut elapsed_for_all_bodies = Elapsed::new();
for (body_id, elapsed) in elapsed_vec.into_iter().enumerate() {
samples[body_id]
.0
.push(Elapsed::from_ticks(elapsed.ticks() / round_size as u64));
elapsed_for_all_bodies += elapsed;
}
sample_for_all_bodies.0.push(Elapsed::from_ticks(
elapsed_for_all_bodies.ticks() / round_size as u64,
));
let elapsed_total = (self.precision.now() - ts_bench_start).as_millis(&self.precision);
if elapsed_total < self.min_run_time_ms {
round_size = round_size.saturating_add(round_size);
continue;
}
if elapsed_total > self.max_run_time_ms {
break;
}
sample_id += 1;
if sample_id >= self.min_sample_size {
break;
}
}
let summaries: Vec<_> = samples
.into_iter()
.map(|sample| {
Summary::new(
sample
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
)
})
.collect();
let grand_summary = Summary::new(
sample_for_all_bodies
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
);
RunnerResult {
summaries,
grand_summary,
}
}
}
/// Run an individual test
fn run_test(
config: &Config,
precision: &Precision,
global_ctx: TestCtx,
test: &Test,
) -> Result<TestResult, BenchError> {
let mut ctx: TestCtx = ptr::null_mut();
if let Some(setup) = (*test).setup_fn {
unsafe { setup(global_ctx, &mut ctx) }
}
let bench_runner = AdaptiveRunner::new(
config
.initial_round_size
.unwrap_or(DEFAULT_INITIAL_ROUND_SIZE),
config.min_sample_size.unwrap_or(DEFAULT_MIN_SAMPLE_SIZE),
config.min_run_time_ms.unwrap_or(DEFAULT_MIN_RUN_TIME_MS),
config.max_run_time_ms.unwrap_or(DEFAULT_MAX_RUN_TIME_MS),
precision,
);
let mut test_bodies_bench = TestBodiesBench {
precision: precision.clone(),
ctx,
bodies: (*test)
.bodies
.clone()
.iter()
.map(|body| body.body_fn)
.collect(),
};
let bench_result = bench_runner.bench(&mut test_bodies_bench);
let mut bodies_summary = vec![];
for (body_id, body) in (*test).bodies.iter().enumerate() {
let test_body_summary = TestBodySummary {
name: body.name.clone(),
summary: bench_result.summaries[body_id].clone(),
};
bodies_summary.push(test_body_summary);
}
unsafe { (*test).teardown_fn.map(|teardown_fn| teardown_fn(ctx)) };
let grand_summary = bench_result.grand_summary;
let name = test.name.clone();
Ok(TestResult {
name,
grand_summary,
bodies_summary,
})
}
/// Run a sequence of tests
fn run_tests(
config: &Config,
global_ctx: TestCtx,
tests: Vec<Test>,
) -> Result<Vec<TestResult>, BenchError> {
let mut test_results: Vec<TestResult> = vec![];
let precision = Precision::new(precision::Config::default())?;
for test in tests {
eprintln!(" - {}", test.name);
let test_result = run_test(config, &precision, global_ctx, &test)?;
test_results.push(test_result);
}
Ok(test_results)
}
#[cfg(unix)]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
rtld_lazy: bool,
rtld_global: bool,
) -> Result<Library, BenchError> {
let mut flags = 0;
if rtld_lazy {
flags |= RTLD_LAZY;
} else {
flags |= RTLD_NOW;
}
if rtld_global {
flags |= RTLD_GLOBAL;
} else {
flags |= RTLD_LOCAL;
}
let library = libloading::os::unix::Library::open(Some(library_path), flags)?.into();
Ok(library)
}
#[cfg(not(unix))]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
_rtld_lazy: bool,
_rtld_global: bool,
) -> Result<Library, BenchError> {
Ok(Library::new(library_path)?)
}
/// Bench all functions contained in a shared library
fn bench_library(config: &Config, library_path: &Path) -> Result<Vec<TestResult>, BenchError> {
let tests_symbols = symbols::extract_tests_symbols(library_path)?;
let library = load_library(library_path, false, true)?;
let tests_runner: Symbol<'_, &TestsConfig> =
unsafe { library.get(TEST_LIBRARIES_TABLE_SYMBOL) }.map_err(BenchError::from)?;
if tests_runner.version != TEST_ABI_VERSION {
return Err(BenchError::ABIError("Incompatible ABI version"));
}
let tests = symbols::resolve(&tests_symbols, &library);
let mut global_ctx: TestCtx = ptr::null_mut();
if let Some(global_setup) = tests_runner.global_setup {
unsafe { global_setup(&mut global_ctx) }
}
let test_results = run_tests(config, global_ctx, tests)?;
if let Some(global_teardown) = tests_runner.global_teardown {
unsafe { global_teardown(global_ctx) }
}
Ok(test_results)
}
/// Run an optional guard command
/// Returns `false` on success (return code = `0`), `true` on failure
fn disabled_due_to_guard(guard: &[String]) -> bool {
match Command::new(&guard[0]).args(&guard[1..]).status() {
Err(e) => {
eprintln!(
"Cannot run the [{}] guard script: [{}]",
&guard[0],
e.to_string()
);
true
}
Ok(status) => !status.success(),
}
}
/// Entry point to run benchmarks according to a given configuration
pub fn bench(config: &Config) -> Result<(), BenchError> {
let mut test_suites_results: HashMap<String, HashMap<String, AnonymousTestResult>> =
HashMap::new();
for test_suite in &config.test_suites {
if let Some(guard) = &test_suite.guard {
if !guard.is_empty() && disabled_due_to_guard(guard) {
continue;
}
}
eprintln!("{}:", test_suite.name); | let library_path = &test_suite.library_path; | random_line_split |
|
bench.rs | <unsafe extern "C" fn(*mut TestCtx)>,
global_teardown: Option<unsafe extern "C" fn(TestCtx)>,
version: u64,
}
/// A named test body function
#[derive(Clone, Debug)]
pub struct TestBody {
pub name: String,
pub body_fn: TestBodyFn,
}
/// An individual test, with function pointers for each step
#[derive(Clone, Debug)]
pub struct Test {
pub name: String,
pub setup_fn: Option<TestSetupFn>,
pub bodies: Vec<TestBody>,
pub teardown_fn: Option<TestTeardownFn>,
}
/// Measurements for a "body" of a test
#[derive(Clone)]
pub struct TestBodySummary {
pub name: String,
pub summary: Summary,
}
/// The outcome of a test
#[derive(Clone)]
struct TestResult {
name: String,
grand_summary: Summary,
bodies_summary: Vec<TestBodySummary>,
}
/// The outcome of a test, without the name of the test
pub struct AnonymousTestResult {
pub grand_summary: Summary,
pub bodies_summary: Vec<TestBodySummary>,
}
impl Default for AnonymousTestResult {
fn default() -> Self {
Self {
grand_summary: Summary::new(&[0.0]),
bodies_summary: vec![],
}
}
}
impl From<TestResult> for AnonymousTestResult {
fn from(test_result: TestResult) -> Self {
AnonymousTestResult {
grand_summary: test_result.grand_summary,
bodies_summary: test_result.bodies_summary,
}
}
}
/// Environment for a single test
#[derive(Clone)]
struct TestBodiesBench {
precision: Precision,
ctx: TestCtx,
bodies: Vec<unsafe extern "C" fn(TestCtx)>,
}
#[derive(Default, Debug, Clone)]
pub struct Sample<T>(Vec<T>);
impl<T> Sample<T> {
pub fn empty() -> Self {
Sample(vec![])
}
}
pub trait Runnable<Ret> {
fn setup(&mut self) {}
fn teardown(&mut self) {}
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> Ret + 't>>;
}
impl TestBodiesBench {
#[inline]
fn body(&self, body_id: usize) {
unsafe { (self.bodies[body_id])(self.ctx) }
}
}
impl Runnable<()> for TestBodiesBench {
fn bodies<'t>(&'t mut self) -> Vec<Box<dyn Fn(usize) -> () + 't>> {
let mut fns: Vec<Box<dyn Fn(usize) -> () + 't>> = vec![];
for _ in 0..self.bodies.len() {
let this = self.clone();
fns.push(Box::new(move |body_id| this.body(body_id)))
}
fns
}
}
pub struct AdaptiveRunner {
round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: Precision,
}
#[derive(Clone)]
pub struct RunnerResult {
pub summaries: Vec<Summary>,
pub grand_summary: Summary,
}
impl AdaptiveRunner {
pub fn new(
initial_round_size: usize,
min_sample_size: usize,
min_run_time_ms: u64,
max_run_time_ms: u64,
precision: &Precision,
) -> Self {
AdaptiveRunner {
round_size: initial_round_size,
min_sample_size,
min_run_time_ms,
max_run_time_ms,
precision: precision.clone(),
}
}
pub fn bench<Target, Ret>(&self, target: &mut Target) -> RunnerResult
where
Target: Runnable<Ret>,
{
let mut sample_for_all_bodies: Sample<Elapsed> = Sample::empty();
let mut samples: Vec<Sample<Elapsed>> = vec![];
let bodies = target.bodies();
samples.resize(bodies.len(), Sample::empty());
let mut round_size = self.round_size;
let ts_bench_start = self.precision.now();
let mut sample_id = 0;
loop {
let mut elapsed_vec: Vec<Elapsed> = vec![];
elapsed_vec.resize(bodies.len(), Elapsed::new());
for _ in 0..round_size {
for (body_id, body) in bodies.iter().enumerate() {
let ts_start = self.precision.now();
body(body_id);
let ts_end = self.precision.now();
elapsed_vec[body_id] += ts_end - ts_start;
}
}
let mut elapsed_for_all_bodies = Elapsed::new();
for (body_id, elapsed) in elapsed_vec.into_iter().enumerate() {
samples[body_id]
.0
.push(Elapsed::from_ticks(elapsed.ticks() / round_size as u64));
elapsed_for_all_bodies += elapsed;
}
sample_for_all_bodies.0.push(Elapsed::from_ticks(
elapsed_for_all_bodies.ticks() / round_size as u64,
));
let elapsed_total = (self.precision.now() - ts_bench_start).as_millis(&self.precision);
if elapsed_total < self.min_run_time_ms {
round_size = round_size.saturating_add(round_size);
continue;
}
if elapsed_total > self.max_run_time_ms {
break;
}
sample_id += 1;
if sample_id >= self.min_sample_size {
break;
}
}
let summaries: Vec<_> = samples
.into_iter()
.map(|sample| {
Summary::new(
sample
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
)
})
.collect();
let grand_summary = Summary::new(
sample_for_all_bodies
.0
.into_iter()
.map(|elapsed| elapsed.as_ns(&self.precision) as f64)
.collect::<Vec<f64>>()
.as_slice(),
);
RunnerResult {
summaries,
grand_summary,
}
}
}
/// Run an individual test
fn run_test(
config: &Config,
precision: &Precision,
global_ctx: TestCtx,
test: &Test,
) -> Result<TestResult, BenchError> | .clone()
.iter()
.map(|body| body.body_fn)
.collect(),
};
let bench_result = bench_runner.bench(&mut test_bodies_bench);
let mut bodies_summary = vec![];
for (body_id, body) in (*test).bodies.iter().enumerate() {
let test_body_summary = TestBodySummary {
name: body.name.clone(),
summary: bench_result.summaries[body_id].clone(),
};
bodies_summary.push(test_body_summary);
}
unsafe { (*test).teardown_fn.map(|teardown_fn| teardown_fn(ctx)) };
let grand_summary = bench_result.grand_summary;
let name = test.name.clone();
Ok(TestResult {
name,
grand_summary,
bodies_summary,
})
}
/// Run a sequence of tests
fn run_tests(
config: &Config,
global_ctx: TestCtx,
tests: Vec<Test>,
) -> Result<Vec<TestResult>, BenchError> {
let mut test_results: Vec<TestResult> = vec![];
let precision = Precision::new(precision::Config::default())?;
for test in tests {
eprintln!(" - {}", test.name);
let test_result = run_test(config, &precision, global_ctx, &test)?;
test_results.push(test_result);
}
Ok(test_results)
}
#[cfg(unix)]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
rtld_lazy: bool,
rtld_global: bool,
) -> Result<Library, BenchError> {
let mut flags = 0;
if rtld_lazy {
flags |= RTLD_LAZY;
} else {
flags |= RTLD_NOW;
}
if rtld_global {
flags |= RTLD_GLOBAL;
} else {
flags |= RTLD_LOCAL;
}
let library = libloading::os::unix::Library::open(Some(library_path), flags)?.into();
Ok(library)
}
#[cfg(not(unix))]
fn load_library<P: AsRef<OsStr>>(
library_path: P,
_rtld_lazy: bool,
_rtld_global: bool,
) -> Result<Library, BenchError> {
Ok(Library::new(library_path)?)
}
/// Bench all functions contained in a shared library
fn bench_library(config: &Config, library_path: &Path) -> Result<Vec<TestResult>, BenchError | {
let mut ctx: TestCtx = ptr::null_mut();
if let Some(setup) = (*test).setup_fn {
unsafe { setup(global_ctx, &mut ctx) }
}
let bench_runner = AdaptiveRunner::new(
config
.initial_round_size
.unwrap_or(DEFAULT_INITIAL_ROUND_SIZE),
config.min_sample_size.unwrap_or(DEFAULT_MIN_SAMPLE_SIZE),
config.min_run_time_ms.unwrap_or(DEFAULT_MIN_RUN_TIME_MS),
config.max_run_time_ms.unwrap_or(DEFAULT_MAX_RUN_TIME_MS),
precision,
);
let mut test_bodies_bench = TestBodiesBench {
precision: precision.clone(),
ctx,
bodies: (*test)
.bodies | identifier_body |
opmapi.rs | 25 = 0x8,
OPM_PROTECTION_STANDARD_EN300294_625I = 0x10,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_525P = 0x20,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_750P = 0x40,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_1125I = 0x80,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_525P = 0x100,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_750P = 0x200,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_1125I = 0x400,
OPM_PROTECTION_STANDARD_ARIBTRB15_525I = 0x800,
OPM_PROTECTION_STANDARD_ARIBTRB15_525P = 0x1000,
OPM_PROTECTION_STANDARD_ARIBTRB15_750P = 0x2000,
OPM_PROTECTION_STANDARD_ARIBTRB15_1125I = 0x4000,
}}
ENUM!{enum OPM_IMAGE_ASPECT_RATIO_EN300294 {
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3 = 0,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_CENTER = 1,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_TOP = 2,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_CENTER = 3,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_TOP = 4,
OPM_ASPECT_RATIO_EN300294_BOX_GT_16_BY_9_CENTER = 5,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3_PROTECTED_CENTER = 6,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_16_BY_9_ANAMORPHIC = 7,
OPM_ASPECT_RATIO_FORCE_ULONG = 0x7fffffff,
}}
STRUCT!{#[repr(packed)] struct OPM_RANDOM_NUMBER {
abRandomNumber: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_OMAC {
abOMAC: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_ENCRYPTED_INITIALIZATION_PARAMETERS {
abEncryptedInitializationParameters: [BYTE; 256],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_INFO_PARAMETERS {
omac: OPM_OMAC,
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS {
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_HDCP_KEY_SELECTION_VECTOR {
abKeySelectionVector: [BYTE; 5],
}}
STRUCT!{#[repr(packed)] struct OPM_CONNECTED_HDCP_DEVICE_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulHDCPFlags: ULONG,
ksvB: OPM_HDCP_KEY_SELECTION_VECTOR,
Reserved: [BYTE; 11],
Reserved2: [BYTE; 16],
Reserved3: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_REQUESTED_INFORMATION {
omac: OPM_OMAC,
cbRequestedInformationSize: ULONG,
abRequestedInformation: [BYTE; 4076],
}}
STRUCT!{#[repr(packed)] struct OPM_STANDARD_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulInformation: ULONG,
ulReserved: ULONG,
ulReserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACTUAL_OUTPUT_FORMAT {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulDisplayWidth: ULONG,
ulDisplayHeight: ULONG,
dsfSampleInterleaveFormat: DXVA2_SampleFormat,
d3dFormat: D3DFORMAT,
ulFrequencyNumerator: ULONG,
ulFrequencyDenominator: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACP_AND_CGMSA_SIGNALING {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulAvailableTVProtectionStandards: ULONG,
ulActiveTVProtectionStandard: ULONG,
ulReserved: ULONG,
ulAspectRatioValidMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioValidMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioValidMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved2: [ULONG; 4],
ulReserved3: [ULONG; 4],
}}
STRUCT!{#[repr(packed)] struct OPM_OUTPUT_ID_DATA {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
OutputId: UINT64,
}}
STRUCT!{#[repr(packed)] struct OPM_CONFIGURE_PARAMETERS {
omac: OPM_OMAC,
guidSetting: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_SET_PROTECTION_LEVEL_PARAMETERS {
ulProtectionType: ULONG,
ulProtectionLevel: ULONG,
Reserved: ULONG,
Reserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_ACP_AND_CGMSA_SIGNALING_PARAMETERS {
ulNewTVProtectionStandard: ULONG,
ulAspectRatioChangeMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioChangeMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioChangeMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved: [ULONG; 4],
ulReserved2: [ULONG; 4],
ulReserved3: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_HDCP_SRM_PARAMETERS {
ulSRMVersion: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_PARAMETERS {
cbVerifier: DWORD,
Verifier: [BYTE; 4052],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
Merit: DWORD,
}}
DEFINE_GUID!{IID_IOPMVideoOutput,
0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d}
RIDL!{#[uuid(0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d)]
interface IOPMVideoOutput(IOPMVideoOutputVtbl): IUnknown(IUnknownVtbl) {
fn StartInitialization(
prnRandomNumber: *mut OPM_RANDOM_NUMBER,
ppbCertificate: *mut *mut BYTE,
pulCertificateLength: *mut ULONG,
) -> HRESULT,
fn FinishInitialization(
pParameters: *const OPM_ENCRYPTED_INITIALIZATION_PARAMETERS,
) -> HRESULT,
fn GetInformation(
pParameters: *const OPM_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn COPPCompatibleGetInformation(
pParameters: *const OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn Configure(
pParameters: *const OPM_CONFIGURE_PARAMETERS,
ulAdditionalParametersSize: ULONG,
pbAdditionalParameters: *const BYTE,
) -> HRESULT,
}}
#[inline]
pub fn GetBusType(ulBusTypeAndImplementation: ULONG) -> ULONG {
ulBusTypeAndImplementation & OPM_BUS_TYPE_MASK
}
#[inline]
pub fn GetBusImplementation(ulBusTypeAndImplementation: ULONG) -> ULONG {
(ulBusTypeAndImplementation & OPM_BUS_IMPLEMENTATION_MODIFIER_MASK) >> 16
}
#[inline]
pub fn IsNonStandardBusImplementation(ulBusTypeAndImplementation: ULONG) -> ULONG | {
ulBusTypeAndImplementation & OPM_BUS_IMPLEMENTATION_MODIFIER_NON_STANDARD
} | identifier_body |
|
opmapi.rs | _525I = 0x1,
OPM_PROTECTION_STANDARD_IEC61880_2_525I = 0x2,
OPM_PROTECTION_STANDARD_IEC62375_625P = 0x4,
OPM_PROTECTION_STANDARD_EIA608B_525 = 0x8,
OPM_PROTECTION_STANDARD_EN300294_625I = 0x10,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_525P = 0x20,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_750P = 0x40,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_1125I = 0x80,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_525P = 0x100,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_750P = 0x200,
OPM_PROTECTION_STANDARD_CEA805A_TYPEB_1125I = 0x400,
OPM_PROTECTION_STANDARD_ARIBTRB15_525I = 0x800,
OPM_PROTECTION_STANDARD_ARIBTRB15_525P = 0x1000,
OPM_PROTECTION_STANDARD_ARIBTRB15_750P = 0x2000,
OPM_PROTECTION_STANDARD_ARIBTRB15_1125I = 0x4000,
}}
ENUM!{enum OPM_IMAGE_ASPECT_RATIO_EN300294 {
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3 = 0,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_CENTER = 1,
OPM_ASPECT_RATIO_EN300294_BOX_14_BY_9_TOP = 2,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_CENTER = 3,
OPM_ASPECT_RATIO_EN300294_BOX_16_BY_9_TOP = 4,
OPM_ASPECT_RATIO_EN300294_BOX_GT_16_BY_9_CENTER = 5,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_4_BY_3_PROTECTED_CENTER = 6,
OPM_ASPECT_RATIO_EN300294_FULL_FORMAT_16_BY_9_ANAMORPHIC = 7,
OPM_ASPECT_RATIO_FORCE_ULONG = 0x7fffffff,
}}
STRUCT!{#[repr(packed)] struct OPM_RANDOM_NUMBER {
abRandomNumber: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_OMAC {
abOMAC: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_ENCRYPTED_INITIALIZATION_PARAMETERS {
abEncryptedInitializationParameters: [BYTE; 256],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_INFO_PARAMETERS {
omac: OPM_OMAC,
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS {
rnRandomNumber: OPM_RANDOM_NUMBER,
guidInformation: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_HDCP_KEY_SELECTION_VECTOR {
abKeySelectionVector: [BYTE; 5],
}}
STRUCT!{#[repr(packed)] struct OPM_CONNECTED_HDCP_DEVICE_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulHDCPFlags: ULONG,
ksvB: OPM_HDCP_KEY_SELECTION_VECTOR,
Reserved: [BYTE; 11],
Reserved2: [BYTE; 16],
Reserved3: [BYTE; 16],
}}
STRUCT!{#[repr(packed)] struct OPM_REQUESTED_INFORMATION {
omac: OPM_OMAC,
cbRequestedInformationSize: ULONG,
abRequestedInformation: [BYTE; 4076],
}}
STRUCT!{#[repr(packed)] struct OPM_STANDARD_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulInformation: ULONG,
ulReserved: ULONG,
ulReserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACTUAL_OUTPUT_FORMAT {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulDisplayWidth: ULONG,
ulDisplayHeight: ULONG,
dsfSampleInterleaveFormat: DXVA2_SampleFormat,
d3dFormat: D3DFORMAT,
ulFrequencyNumerator: ULONG,
ulFrequencyDenominator: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_ACP_AND_CGMSA_SIGNALING {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
ulAvailableTVProtectionStandards: ULONG,
ulActiveTVProtectionStandard: ULONG,
ulReserved: ULONG,
ulAspectRatioValidMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioValidMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioValidMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved2: [ULONG; 4],
ulReserved3: [ULONG; 4],
}}
STRUCT!{#[repr(packed)] struct OPM_OUTPUT_ID_DATA {
rnRandomNumber: OPM_RANDOM_NUMBER,
ulStatusFlags: ULONG,
OutputId: UINT64,
}}
STRUCT!{#[repr(packed)] struct OPM_CONFIGURE_PARAMETERS {
omac: OPM_OMAC,
guidSetting: GUID,
ulSequenceNumber: ULONG,
cbParametersSize: ULONG,
abParameters: [BYTE; 4056],
}}
STRUCT!{#[repr(packed)] struct OPM_SET_PROTECTION_LEVEL_PARAMETERS {
ulProtectionType: ULONG,
ulProtectionLevel: ULONG,
Reserved: ULONG,
Reserved2: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_ACP_AND_CGMSA_SIGNALING_PARAMETERS {
ulNewTVProtectionStandard: ULONG,
ulAspectRatioChangeMask1: ULONG,
ulAspectRatioData1: ULONG,
ulAspectRatioChangeMask2: ULONG,
ulAspectRatioData2: ULONG,
ulAspectRatioChangeMask3: ULONG,
ulAspectRatioData3: ULONG,
ulReserved: [ULONG; 4],
ulReserved2: [ULONG; 4],
ulReserved3: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_SET_HDCP_SRM_PARAMETERS {
ulSRMVersion: ULONG,
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_PARAMETERS {
cbVerifier: DWORD,
Verifier: [BYTE; 4052],
}}
STRUCT!{#[repr(packed)] struct OPM_GET_CODEC_INFO_INFORMATION {
rnRandomNumber: OPM_RANDOM_NUMBER,
Merit: DWORD,
}}
DEFINE_GUID!{IID_IOPMVideoOutput,
0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d}
RIDL!{#[uuid(0x0a15159d, 0x41c7, 0x4456, 0x93, 0xe1, 0x28, 0x4c, 0xd6, 0x1d, 0x4e, 0x8d)]
interface IOPMVideoOutput(IOPMVideoOutputVtbl): IUnknown(IUnknownVtbl) {
fn StartInitialization(
prnRandomNumber: *mut OPM_RANDOM_NUMBER,
ppbCertificate: *mut *mut BYTE,
pulCertificateLength: *mut ULONG,
) -> HRESULT,
fn FinishInitialization(
pParameters: *const OPM_ENCRYPTED_INITIALIZATION_PARAMETERS,
) -> HRESULT,
fn GetInformation(
pParameters: *const OPM_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn COPPCompatibleGetInformation(
pParameters: *const OPM_COPP_COMPATIBLE_GET_INFO_PARAMETERS,
pRequestedInformation: *mut OPM_REQUESTED_INFORMATION,
) -> HRESULT,
fn Configure(
pParameters: *const OPM_CONFIGURE_PARAMETERS,
ulAdditionalParametersSize: ULONG,
pbAdditionalParameters: *const BYTE,
) -> HRESULT,
}}
#[inline]
pub fn GetBusType(ulBusTypeAndImplementation: ULONG) -> ULONG {
ulBusTypeAndImplementation & OPM_BUS_TYPE_MASK
}
#[inline]
pub fn | GetBusImplementation | identifier_name |
|
opmapi.rs | 8b5ef5d1, 0xc30d, 0x44ff, 0x84, 0xa5, 0xea, 0x71, 0xdc, 0xe7, 0x8f, 0x13}
DEFINE_GUID!{OPM_SET_PROTECTION_LEVEL_ACCORDING_TO_CSS_DVD,
0x39ce333e, 0x4cc0, 0x44ae, 0xbf, 0xcc, 0xda, 0x50, 0xb5, 0xf8, 0x2e, 0x72}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0001 {
OPM_OMAC_SIZE = 16,
OPM_128_BIT_RANDOM_NUMBER_SIZE = 16,
OPM_ENCRYPTED_INITIALIZATION_PARAMETERS_SIZE = 256,
OPM_CONFIGURE_SETTING_DATA_SIZE = 4056,
OPM_GET_INFORMATION_PARAMETERS_SIZE = 4056,
OPM_REQUESTED_INFORMATION_SIZE = 4076,
OPM_HDCP_KEY_SELECTION_VECTOR_SIZE = 5,
OPM_PROTECTION_TYPE_SIZE = 4,
OPM_BUS_TYPE_MASK = 0xffff,
OPM_BUS_IMPLEMENTATION_MODIFIER_MASK = 0x7fff,
}}
ENUM!{enum OPM_VIDEO_OUTPUT_SEMANTICS {
OPM_VOS_COPP_SEMANTICS = 0,
OPM_VOS_OPM_SEMANTICS = 1,
OPM_VOS_OPM_INDIRECT_DISPLAY = 2,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0002 {
OPM_HDCP_FLAG_NONE = 0,
OPM_HDCP_FLAG_REPEATER = 0x1,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0003 {
OPM_STATUS_NORMAL = 0,
OPM_STATUS_LINK_LOST = 0x1,
OPM_STATUS_RENEGOTIATION_REQUIRED = 0x2,
OPM_STATUS_TAMPERING_DETECTED = 0x4,
OPM_STATUS_REVOKED_HDCP_DEVICE_ATTACHED = 0x8,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0004 {
OPM_CONNECTOR_TYPE_OTHER = -1i32 as u32,
OPM_CONNECTOR_TYPE_VGA = 0,
OPM_CONNECTOR_TYPE_SVIDEO = 1,
OPM_CONNECTOR_TYPE_COMPOSITE_VIDEO = 2,
OPM_CONNECTOR_TYPE_COMPONENT_VIDEO = 3,
OPM_CONNECTOR_TYPE_DVI = 4,
OPM_CONNECTOR_TYPE_HDMI = 5,
OPM_CONNECTOR_TYPE_LVDS = 6,
OPM_CONNECTOR_TYPE_D_JPN = 8,
OPM_CONNECTOR_TYPE_SDI = 9,
OPM_CONNECTOR_TYPE_DISPLAYPORT_EXTERNAL = 10,
OPM_CONNECTOR_TYPE_DISPLAYPORT_EMBEDDED = 11,
OPM_CONNECTOR_TYPE_UDI_EXTERNAL = 12,
OPM_CONNECTOR_TYPE_UDI_EMBEDDED = 13,
OPM_CONNECTOR_TYPE_RESERVED = 14,
OPM_CONNECTOR_TYPE_MIRACAST = 15,
OPM_CONNECTOR_TYPE_TRANSPORT_AGNOSTIC_DIGITAL_MODE_A = 16,
OPM_CONNECTOR_TYPE_TRANSPORT_AGNOSTIC_DIGITAL_MODE_B = 17,
OPM_COPP_COMPATIBLE_CONNECTOR_TYPE_INTERNAL = 0x80000000,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0005 {
OPM_DVI_CHARACTERISTIC_1_0 = 1,
OPM_DVI_CHARACTERISTIC_1_1_OR_ABOVE = 2,
}}
ENUM!{enum OPM_OUTPUT_HARDWARE_PROTECTION {
OPM_OUTPUT_HARDWARE_PROTECTION_NOT_SUPPORTED = 0,
OPM_OUTPUT_HARDWARE_PROTECTION_SUPPORTED = 0x1,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0006 {
OPM_BUS_TYPE_OTHER = 0,
OPM_BUS_TYPE_PCI = 0x1,
OPM_BUS_TYPE_PCIX = 0x2,
OPM_BUS_TYPE_PCIEXPRESS = 0x3,
OPM_BUS_TYPE_AGP = 0x4,
OPM_BUS_IMPLEMENTATION_MODIFIER_INSIDE_OF_CHIPSET = 0x10000,
OPM_BUS_IMPLEMENTATION_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_CHIP = 0x20000,
OPM_BUS_IMPLEMENTATION_MODIFIER_TRACKS_ON_MOTHER_BOARD_TO_SOCKET = 0x30000,
OPM_BUS_IMPLEMENTATION_MODIFIER_DAUGHTER_BOARD_CONNECTOR = 0x40000,
OPM_BUS_IMPLEMENTATION_MODIFIER_DAUGHTER_BOARD_CONNECTOR_INSIDE_OF_NUAE = 0x50000,
OPM_BUS_IMPLEMENTATION_MODIFIER_NON_STANDARD = 0x80000000,
OPM_COPP_COMPATIBLE_BUS_TYPE_INTEGRATED = 0x80000000,
}}
ENUM!{enum OPM_DPCP_PROTECTION_LEVEL {
OPM_DPCP_OFF = 0,
OPM_DPCP_ON = 1,
OPM_DPCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum OPM_HDCP_PROTECTION_LEVEL {
OPM_HDCP_OFF = 0,
OPM_HDCP_ON = 1,
OPM_HDCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum OPM_TYPE_ENFORCEMENT_HDCP_PROTECTION_LEVEL {
OPM_TYPE_ENFORCEMENT_HDCP_OFF = OPM_HDCP_OFF,
OPM_TYPE_ENFORCEMENT_HDCP_ON_WITH_NO_TYPE_RESTRICTION = OPM_HDCP_ON,
OPM_TYPE_ENFORCEMENT_HDCP_ON_WITH_TYPE1_RESTRICTION = OPM_HDCP_ON + 1,
OPM_TYPE_ENFORCEMENT_HDCP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0007 {
OPM_CGMSA_OFF = 0,
OPM_CGMSA_COPY_FREELY = 0x1,
OPM_CGMSA_COPY_NO_MORE = 0x2,
OPM_CGMSA_COPY_ONE_GENERATION = 0x3,
OPM_CGMSA_COPY_NEVER = 0x4,
OPM_CGMSA_REDISTRIBUTION_CONTROL_REQUIRED = 0x8,
}}
ENUM!{enum OPM_ACP_PROTECTION_LEVEL {
OPM_ACP_OFF = 0,
OPM_ACP_LEVEL_ONE = 1,
OPM_ACP_LEVEL_TWO = 2,
OPM_ACP_LEVEL_THREE = 3,
OPM_ACP_FORCE_ULONG = 0x7fffffff,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0008 {
OPM_PROTECTION_TYPE_OTHER = 0x80000000,
OPM_PROTECTION_TYPE_NONE = 0,
OPM_PROTECTION_TYPE_COPP_COMPATIBLE_HDCP = 0x1,
OPM_PROTECTION_TYPE_ACP = 0x2,
OPM_PROTECTION_TYPE_CGMSA = 0x4,
OPM_PROTECTION_TYPE_HDCP = 0x8,
OPM_PROTECTION_TYPE_DPCP = 0x10,
OPM_PROTECTION_TYPE_TYPE_ENFORCEMENT_HDCP = 0x20,
}}
ENUM!{enum __MIDL___MIDL_itf_opmapi_0000_0000_0009 {
OPM_PROTECTION_STANDARD_OTHER = 0x80000000,
OPM_PROTECTION_STANDARD_NONE = 0,
OPM_PROTECTION_STANDARD_IEC61880_525I = 0x1,
OPM_PROTECTION_STANDARD_IEC61880_2_525I = 0x2,
OPM_PROTECTION_STANDARD_IEC62375_625P = 0x4,
OPM_PROTECTION_STANDARD_EIA608B_525 = 0x8,
OPM_PROTECTION_STANDARD_EN300294_625I = 0x10, | OPM_PROTECTION_STANDARD_CEA805A_TYPEA_525P = 0x20,
OPM_PROTECTION_STANDARD_CEA805A_TYPEA_750P = 0x40, | random_line_split |
|
oauth.rs | state: String,
}
impl OAuthClient {
pub fn new() -> OAuthClient {
build_oauth_client()
}
pub fn get_access_token(&self) -> String {
if let token = self.oauth_token.as_ref().unwrap() {
return token.access_token.clone();
}
"".to_string()
}
}
fn build_oauth_client() -> OAuthClient {
let client_state = generate_random_string(10);
let client_id = "7tMofTv8Ip3-Ig".to_string();
let authorization_link = format!( "https://www.reddit.com/api/v1/authorize?client_id={}&response_type=code&state={}&redirect_uri=http%3A%2F%2F127.0.0.1:8000&duration=permanent&scope=identity",client_id,client_state);
OAuthClient {
client_id,
client_state,
oauth_token: None,
authorization_link,
auth_state: OAuthState::IDLE,
error_state: "Initialized".to_string(),
code: "".to_string(),
}
}
pub fn curl_site(subreddit: &str, amount: usize, before: &str, after: &str) -> String {
let mut limit = amount;
if limit == 0 {
limit = 1;
}
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
let reddit_base_url = format!(
"https://www.reddit.com{}/.json?limit={}&after={}&before={}",
subreddit, limit, after, before
);
easy.url(&reddit_base_url).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut return_data: Vec<String> = Vec::new();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
return_data.push(html.clone());
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
return_data.join("")
}
pub fn request_site(token: String, url: String) {
println!("token:{} url:{}", token, url);
let mut list = List::new();
let data_header = format!("Authorization: bearer {}", token);
println!("data header: {}", data_header);
list.append(&data_header.to_string()).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url(&url).unwrap();
easy.http_headers(list).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
}
pub fn authorize_user(wait_time: usize) -> OAuthClient {
println!("Logging in...");
let mut oauth_client = build_oauth_client();
if does_access_token_exist() {
println!("Client already authorized");
use std::fs;
let access_token_serialized: String = fs::read_to_string("./access_token.rvp").unwrap();
let access_token: OAuthToken = serde_json::from_str(&access_token_serialized).unwrap();
oauth_client = OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(access_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: "".to_string(),
}
} else {
oauth_client = authorize_client(oauth_client, wait_time);
println!("Done!");
}
oauth_client
}
fn authorize_client(oauth_client: OAuthClient, wait_time: usize) -> OAuthClient {
if !webbrowser::open(&oauth_client.authorization_link).is_ok() {
println!("Could not open web browser");
}
let final_response =
Response::from_string("Authentication complete. You may close this window.");
let (tx_authentication, rx) = mpsc::channel();
let tx_countdown = mpsc::Sender::clone(&tx_authentication);
thread::spawn(move || {
let server = Server::http("127.0.0.1:8000").unwrap();
for request in server.incoming_requests() {
let request_url = request.url().to_string().clone();
let parameter_string: Vec<&str> = request_url.split("/?").collect();
if parameter_string.len() <= 1 {
continue;
};
let parameters: Vec<&str> = parameter_string[1].split('&').collect();
// Expect state and code parameters
if parameters.len() != 2 {
let auth_box = AuthBox {
has_error: true,
error_msg: "Unexpected response from reddit api".to_string(),
code: "".to_string(),
state: "".to_string(),
};
tx_authentication.send(auth_box);
} else {
let state: Vec<&str> = parameters[0].split('=').collect();
let code: Vec<&str> = parameters[1].split('=').collect();
let auth_box = AuthBox {
has_error: false,
error_msg: "".to_string(),
code: code[1].to_string(),
state: state[1].to_string(),
};
tx_authentication.send(auth_box).unwrap();
}
}
drop(server);
});
thread::spawn(move || {
for passed_seconds in 0..wait_time {
thread::sleep(Duration::from_secs(1));
}
let auth_box = AuthBox {
has_error: true,
error_msg: "Reached timeout. User did not authorize usage of RPV in time".to_string(),
code: "".to_string(),
state: "".to_string(),
};
println!("Timeout during authentication");
tx_countdown.send(auth_box).unwrap();
});
//print!("{}[2J", 27 as char);
let auth_box = rx.recv().unwrap();
println!("Now waiting for access token.");
let data_field_string = format!(
"grant_type=authorization_code&code={}&redirect_uri=http://127.0.0.1:8000",
auth_box.code
);
println!("Datafield: {}", data_field_string);
let mut data_field = data_field_string.as_bytes();
let mut list = List::new();
let data_header = "Authorization: Basic N3RNb2ZUdjhJcDMtSWc6";
list.append(data_header).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url("https://www.reddit.com/api/v1/access_token")
.unwrap();
easy.http_headers(list).unwrap();
easy.post(true).unwrap();
easy.useragent(user_agent_header).unwrap();
easy.post_field_size(data_field.len() as u64).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.read_function(|buf| Ok(data_field.read(buf).unwrap_or(0)))
.unwrap();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
let oauth_token: OAuthToken = serde_json::from_str(&html).unwrap();
// Handle authentication response
if !auth_box.has_error {
if auth_box.state == oauth_client.client_state {
save_token(&oauth_token);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(oauth_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: auth_box.code,
}
} else {
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: "Return code is not the same. There is some tampering happening."
.to_string(),
code: auth_box.code,
}
}
} else {
println!("Error: {}", auth_box.error_msg);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: auth_box.error_msg,
code: oauth_client.code,
}
}
}
fn does_access_token_exist() -> bool {
use std::path::Path;
Path::new("./access_token.rvp").exists()
}
fn save_token(token: &OAuthToken) {
let serialized_token = serde_json::to_string(&token).unwrap();
use std::fs;
use std::fs::File;
use std::io::prelude::*;
if does_access_token_exist() | {
fs::remove_file("access_token.rvp").expect("Could not remove file");
} | conditional_block |
|
oauth.rs | _in: usize,
scope: String,
refresh_token: String,
}
#[derive(Debug)]
pub struct | {
client_id: String,
client_state: String,
authorization_link: String,
auth_state: OAuthState,
oauth_token: Option<OAuthToken>,
pub error_state: String,
pub code: String,
}
struct AuthBox {
has_error: bool,
error_msg: String,
code: String,
state: String,
}
impl OAuthClient {
pub fn new() -> OAuthClient {
build_oauth_client()
}
pub fn get_access_token(&self) -> String {
if let token = self.oauth_token.as_ref().unwrap() {
return token.access_token.clone();
}
"".to_string()
}
}
fn build_oauth_client() -> OAuthClient {
let client_state = generate_random_string(10);
let client_id = "7tMofTv8Ip3-Ig".to_string();
let authorization_link = format!( "https://www.reddit.com/api/v1/authorize?client_id={}&response_type=code&state={}&redirect_uri=http%3A%2F%2F127.0.0.1:8000&duration=permanent&scope=identity",client_id,client_state);
OAuthClient {
client_id,
client_state,
oauth_token: None,
authorization_link,
auth_state: OAuthState::IDLE,
error_state: "Initialized".to_string(),
code: "".to_string(),
}
}
pub fn curl_site(subreddit: &str, amount: usize, before: &str, after: &str) -> String {
let mut limit = amount;
if limit == 0 {
limit = 1;
}
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
let reddit_base_url = format!(
"https://www.reddit.com{}/.json?limit={}&after={}&before={}",
subreddit, limit, after, before
);
easy.url(&reddit_base_url).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut return_data: Vec<String> = Vec::new();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
return_data.push(html.clone());
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
return_data.join("")
}
pub fn request_site(token: String, url: String) {
println!("token:{} url:{}", token, url);
let mut list = List::new();
let data_header = format!("Authorization: bearer {}", token);
println!("data header: {}", data_header);
list.append(&data_header.to_string()).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url(&url).unwrap();
easy.http_headers(list).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
}
pub fn authorize_user(wait_time: usize) -> OAuthClient {
println!("Logging in...");
let mut oauth_client = build_oauth_client();
if does_access_token_exist() {
println!("Client already authorized");
use std::fs;
let access_token_serialized: String = fs::read_to_string("./access_token.rvp").unwrap();
let access_token: OAuthToken = serde_json::from_str(&access_token_serialized).unwrap();
oauth_client = OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(access_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: "".to_string(),
}
} else {
oauth_client = authorize_client(oauth_client, wait_time);
println!("Done!");
}
oauth_client
}
fn authorize_client(oauth_client: OAuthClient, wait_time: usize) -> OAuthClient {
if !webbrowser::open(&oauth_client.authorization_link).is_ok() {
println!("Could not open web browser");
}
let final_response =
Response::from_string("Authentication complete. You may close this window.");
let (tx_authentication, rx) = mpsc::channel();
let tx_countdown = mpsc::Sender::clone(&tx_authentication);
thread::spawn(move || {
let server = Server::http("127.0.0.1:8000").unwrap();
for request in server.incoming_requests() {
let request_url = request.url().to_string().clone();
let parameter_string: Vec<&str> = request_url.split("/?").collect();
if parameter_string.len() <= 1 {
continue;
};
let parameters: Vec<&str> = parameter_string[1].split('&').collect();
// Expect state and code parameters
if parameters.len() != 2 {
let auth_box = AuthBox {
has_error: true,
error_msg: "Unexpected response from reddit api".to_string(),
code: "".to_string(),
state: "".to_string(),
};
tx_authentication.send(auth_box);
} else {
let state: Vec<&str> = parameters[0].split('=').collect();
let code: Vec<&str> = parameters[1].split('=').collect();
let auth_box = AuthBox {
has_error: false,
error_msg: "".to_string(),
code: code[1].to_string(),
state: state[1].to_string(),
};
tx_authentication.send(auth_box).unwrap();
}
}
drop(server);
});
thread::spawn(move || {
for passed_seconds in 0..wait_time {
thread::sleep(Duration::from_secs(1));
}
let auth_box = AuthBox {
has_error: true,
error_msg: "Reached timeout. User did not authorize usage of RPV in time".to_string(),
code: "".to_string(),
state: "".to_string(),
};
println!("Timeout during authentication");
tx_countdown.send(auth_box).unwrap();
});
//print!("{}[2J", 27 as char);
let auth_box = rx.recv().unwrap();
println!("Now waiting for access token.");
let data_field_string = format!(
"grant_type=authorization_code&code={}&redirect_uri=http://127.0.0.1:8000",
auth_box.code
);
println!("Datafield: {}", data_field_string);
let mut data_field = data_field_string.as_bytes();
let mut list = List::new();
let data_header = "Authorization: Basic N3RNb2ZUdjhJcDMtSWc6";
list.append(data_header).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url("https://www.reddit.com/api/v1/access_token")
.unwrap();
easy.http_headers(list).unwrap();
easy.post(true).unwrap();
easy.useragent(user_agent_header).unwrap();
easy.post_field_size(data_field.len() as u64).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.read_function(|buf| Ok(data_field.read(buf).unwrap_or(0)))
.unwrap();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
let oauth_token: OAuthToken = serde_json::from_str(&html).unwrap();
// Handle authentication response
if !auth_box.has_error {
if auth_box.state == oauth_client.client_state {
save_token(&oauth_token);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(oauth_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: auth_box.code,
}
} else {
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: "Return code is not the same. There is some tampering happening."
.to_string(),
code: auth_box.code,
}
}
} else {
println!("Error: {}", auth_box.error_msg);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: auth_box.error_msg,
code: oauth_client.code,
}
}
}
fn does_access_token_exist() -> bool {
use | OAuthClient | identifier_name |
oauth.rs | expires_in: usize,
scope: String,
refresh_token: String,
}
#[derive(Debug)]
pub struct OAuthClient {
client_id: String,
client_state: String,
authorization_link: String,
auth_state: OAuthState,
oauth_token: Option<OAuthToken>,
pub error_state: String,
pub code: String,
}
struct AuthBox {
has_error: bool,
error_msg: String,
code: String,
state: String,
}
impl OAuthClient {
pub fn new() -> OAuthClient {
build_oauth_client()
}
pub fn get_access_token(&self) -> String {
if let token = self.oauth_token.as_ref().unwrap() {
return token.access_token.clone();
}
"".to_string()
}
}
fn build_oauth_client() -> OAuthClient {
let client_state = generate_random_string(10);
let client_id = "7tMofTv8Ip3-Ig".to_string();
let authorization_link = format!( "https://www.reddit.com/api/v1/authorize?client_id={}&response_type=code&state={}&redirect_uri=http%3A%2F%2F127.0.0.1:8000&duration=permanent&scope=identity",client_id,client_state);
OAuthClient {
client_id,
client_state,
oauth_token: None,
authorization_link,
auth_state: OAuthState::IDLE,
error_state: "Initialized".to_string(),
code: "".to_string(),
}
}
pub fn curl_site(subreddit: &str, amount: usize, before: &str, after: &str) -> String {
let mut limit = amount;
if limit == 0 {
limit = 1;
}
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
let reddit_base_url = format!(
"https://www.reddit.com{}/.json?limit={}&after={}&before={}",
subreddit, limit, after, before
);
easy.url(&reddit_base_url).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut return_data: Vec<String> = Vec::new();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
return_data.push(html.clone());
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
return_data.join("")
}
pub fn request_site(token: String, url: String) {
println!("token:{} url:{}", token, url);
let mut list = List::new();
let data_header = format!("Authorization: bearer {}", token);
println!("data header: {}", data_header);
list.append(&data_header.to_string()).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url(&url).unwrap();
easy.http_headers(list).unwrap();
easy.useragent(user_agent_header).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
}
pub fn authorize_user(wait_time: usize) -> OAuthClient {
println!("Logging in...");
let mut oauth_client = build_oauth_client();
if does_access_token_exist() {
println!("Client already authorized");
use std::fs;
let access_token_serialized: String = fs::read_to_string("./access_token.rvp").unwrap();
let access_token: OAuthToken = serde_json::from_str(&access_token_serialized).unwrap();
oauth_client = OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(access_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: "".to_string(),
}
} else {
oauth_client = authorize_client(oauth_client, wait_time);
println!("Done!");
}
oauth_client
}
fn authorize_client(oauth_client: OAuthClient, wait_time: usize) -> OAuthClient {
if !webbrowser::open(&oauth_client.authorization_link).is_ok() {
println!("Could not open web browser");
}
let final_response =
Response::from_string("Authentication complete. You may close this window.");
let (tx_authentication, rx) = mpsc::channel();
let tx_countdown = mpsc::Sender::clone(&tx_authentication);
thread::spawn(move || {
let server = Server::http("127.0.0.1:8000").unwrap();
for request in server.incoming_requests() {
let request_url = request.url().to_string().clone();
let parameter_string: Vec<&str> = request_url.split("/?").collect();
if parameter_string.len() <= 1 {
continue;
};
let parameters: Vec<&str> = parameter_string[1].split('&').collect();
// Expect state and code parameters
if parameters.len() != 2 {
let auth_box = AuthBox {
has_error: true,
error_msg: "Unexpected response from reddit api".to_string(),
code: "".to_string(),
state: "".to_string(),
};
tx_authentication.send(auth_box);
} else {
let state: Vec<&str> = parameters[0].split('=').collect();
let code: Vec<&str> = parameters[1].split('=').collect();
let auth_box = AuthBox {
has_error: false,
error_msg: "".to_string(),
code: code[1].to_string(),
state: state[1].to_string(),
};
tx_authentication.send(auth_box).unwrap();
}
}
drop(server);
});
thread::spawn(move || { | }
let auth_box = AuthBox {
has_error: true,
error_msg: "Reached timeout. User did not authorize usage of RPV in time".to_string(),
code: "".to_string(),
state: "".to_string(),
};
println!("Timeout during authentication");
tx_countdown.send(auth_box).unwrap();
});
//print!("{}[2J", 27 as char);
let auth_box = rx.recv().unwrap();
println!("Now waiting for access token.");
let data_field_string = format!(
"grant_type=authorization_code&code={}&redirect_uri=http://127.0.0.1:8000",
auth_box.code
);
println!("Datafield: {}", data_field_string);
let mut data_field = data_field_string.as_bytes();
let mut list = List::new();
let data_header = "Authorization: Basic N3RNb2ZUdjhJcDMtSWc6";
list.append(data_header).unwrap();
let user_agent_header = "User-Agent: RVP/0.1 by Gitrog_Frog";
let mut easy = Easy::new();
easy.url("https://www.reddit.com/api/v1/access_token")
.unwrap();
easy.http_headers(list).unwrap();
easy.post(true).unwrap();
easy.useragent(user_agent_header).unwrap();
easy.post_field_size(data_field.len() as u64).unwrap();
let mut html: String = String::new();
{
let mut transfer = easy.transfer();
transfer
.read_function(|buf| Ok(data_field.read(buf).unwrap_or(0)))
.unwrap();
transfer
.write_function(|data| {
html = String::from_utf8(Vec::from(data)).unwrap();
Ok(data.len())
})
.unwrap();
transfer.perform().unwrap();
};
let oauth_token: OAuthToken = serde_json::from_str(&html).unwrap();
// Handle authentication response
if !auth_box.has_error {
if auth_box.state == oauth_client.client_state {
save_token(&oauth_token);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: Some(oauth_token),
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::AUTHORIZED,
error_state: "".to_string(),
code: auth_box.code,
}
} else {
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: "Return code is not the same. There is some tampering happening."
.to_string(),
code: auth_box.code,
}
}
} else {
println!("Error: {}", auth_box.error_msg);
OAuthClient {
client_id: oauth_client.client_id,
client_state: oauth_client.client_state,
oauth_token: oauth_client.oauth_token,
authorization_link: oauth_client.authorization_link,
auth_state: OAuthState::ERROR,
error_state: auth_box.error_msg,
code: oauth_client.code,
}
}
}
fn does_access_token_exist() -> bool {
use | for passed_seconds in 0..wait_time {
thread::sleep(Duration::from_secs(1)); | random_line_split |
make_index_file.py | (self):
return Path('run{:06d}-{:06d}'.format(self.obs_group[0], self.obs_group[1]))
@property
def _obs_folder(self):
return Path('run{:06d}'.format(self.obs_id))
def folder(self, step=None):
"""Create folder for a given step.
"""
if step is None:
return self._obs_group_folder / self._obs_folder
else:
return Path(step) / self._obs_group_folder / self._obs_folder
def hap_filename(self, filetype):
"""Name of FITS file generated by HAP"""
if filetype == 'events':
return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)
# return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)
elif filetype == 'aeff':
return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)
elif filetype == 'edisp':
return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)
elif filetype == 'psf_3gauss':
return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)
else:
raise ValueError('Invalid {} {}'.format(filetype))
def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):
"""Name of FITS file in out folder"""
filename = self.filename(filetype=filetype, format=format)
# return Path(dir) / filename
return filename
def filename(self, filetype, format='old'):
if format == 'old':
TAGS = dict(
events='events',
aeff='aeff_2d',
edisp='edisp_2d',
psf_3gauss='psf_3gauss',
psf_king='psf_king',
psf_table='psf_table',
background='bkg_offruns',
)
elif format == 'new':
TAGS = dict(
events='events',
aeff='aeff',
edisp='edisp',
psf_3gauss='psf_3gauss',
psf_king='psf_king',
psf_table='psf_table',
background='background',
)
tag = TAGS[filetype]
if (filetype == "events"):
filename = '{}_{:06d}.fits.gz'.format(tag, self.obs_id)
else:
if(self.obs_id>99999):
filename = '{}_0{:06d}.fits'.format(tag, self.obs_id)
else:
filename = '{}_{:06d}.fits'.format(tag, self.obs_id)
#filename = '{}_{:06d}.fits'.format(tag, self.obs_id)
return self.folder() / filename
def mkdir(self, step):
"""Make directory (parts=True, exists_ok=True)"""
path = self.folder(step)
if not path.exists():
path.mkdir(parents=True)
return path
def check_out_files_exist(self):
"""Check if all out files exist"""
for filetype in self.filetypes:
filename = self.out_filename(filetype)
if not filename.is_file():
log.error('MISSING: {}'.format(filename))
return False
return True
class ListObservations:
def __init__(self, runlist_file, config):
self.observations = []
runlist = np.loadtxt(runlist_file, ndmin=2)
obs_ids = runlist[:, 0].astype(int)
telcodes = runlist[:, 1].astype(int)
for obs_id, telcode in zip(obs_ids, telcodes):
obs = Observation(obs_id, config, telcode)
self.observations.append(obs)
def summary_info_events(filename):
"""Extract FITS header info from EVENTS files to dict"""
# filename = self.out_filename('events')
print('Reading {}'.format(filename))
table = Table.read(str(filename), hdu='EVENTS')
data = dict()
# Copy over header info to the summary table
data['RA_PNT'] = np.float32(table.meta['RA_PNT'])
data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])
# data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])
# data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])
data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])
data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])
#data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])
data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())
data['ONTIME'] = np.float32(table.meta['ONTIME'])
data['LIVETIME'] = np.float32(table.meta['LIVETIME'])
data['DEADC'] = np.float32(table.meta['DEADC'])
MJDREFI = table.meta['MJDREFI']
MJDREFF = table.meta['MJDREFF']
MJDREF = MJDREFI + MJDREFF
TSTART_MET = table.meta['TSTART'] / 3600. / 24.
TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.
start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')
stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')
data['TSTART'] = np.float32(start_time.utc.mjd)
data['TSTOP'] = np.float32(stop_time.utc.mjd)
data['TSTART_STR'] = str(start_time.utc.iso[:-4])
data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])
data['N_TELS'] = table.meta['N_TELS']
data['TELLIST'] = table.meta['TELLIST']
try:
data['OBJECT'] = table.meta['OBJECT']
except KeyError:
data['OBJECT'] = ""
data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])
data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])
# data['OBS_MODE'] = table.meta['OBS_MODE']
try:
data['MUONEFF'] = np.float32(table.meta['MUONEFF'])
except KeyError:
data['MUONEFF'] = np.float32(-1)
# Calculate some summary statistics for important event columns
data['EVENT_COUNT'] = len(table)
data['EVENT_TIME_MIN'] = table['TIME'].min()
data['EVENT_TIME_MAX'] = table['TIME'].max()
data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))
data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))
data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))
return data
def summary_info_aeff(filename):
"""Extract FITS header info from AEFF files to dict"""
# filename = self.out_filename('aeff')
print('Reading {}'.format(filename))
table = Table.read(str(filename), hdu='AEFF_2D')
data = dict()
# Copy over header info to the summary table
data['LO_THRES'] = table.meta['LO_THRES']
data['HI_THRES'] = table.meta['HI_THRES']
# Summary stats on IRF file content
data['EFFAREA_MAX'] = table['EFFAREA'].max()
data['EFFAREA_RECO_MAX'] = table['EFFAREA_RECO'].max()
return data
def obs_table(list_observations, indir, informat, outfile):
"""Create obs-index.fits.gz file.
"""
print('Creating observations summary table ...')
# We gather all infos in a list of dicts and write this
# as a FITS table at the end.
rows = []
for obs in list_observations.observations:
events_filename = Path(indir) / obs.filename('events', format=informat)
try:
table = Table.read(str(events_filename), hdu='EVENTS')
except Exception:
print "fits corrupted for file " + str(events_filename)
continue
if table.meta["OBS_ID"]!=obs.obs_id:
continue
data = dict()
data['OBS_ID'] = obs.obs_id
if events_filename.exists():
events_info = summary_info_events(events_filename)
data.update(events_info)
else:
print('File not found: {}'.format(events_filename))
aeff_filename = Path(indir) / obs.filename('aeff', format=informat)
if aeff_filename.exists():
aeff_info = summary_info_aeff(aeff_filename)
data.update(aeff_info)
# check that thresholds are meaningful in the effective area table
if ((aeff_info['HI_THRES'] <= a | _obs_group_folder | identifier_name |
|
make_index_file.py |
# Background modeling
BG_MODEL_DIR = OUT_DIR / 'background'
BG_MODEL_GROUPING = BG_MODEL_DIR / 'background_grouping.ecsv'
BG_MODEL_OFF_RUNS = BG_MODEL_DIR / 'background_runs.ecsv'
BG_MODEL_OFF_RUNS_GROUPED = BG_MODEL_DIR / 'background_runs_grouped.ecsv'
class Observation:
"""Helper functions to compute file and folder names.
"""
# filetypes = ['events', 'aeff', 'edisp', 'psf_3gauss']
filetypes = ['events']
def __init__(self, obs_id, hap_config=None, telpattern=None):
self.obs_id = obs_id
self.hap_config = hap_config
self.telpattern = telpattern
@property
def obs_group(self):
obs_id_min = self.obs_id - (self.obs_id % 200)
obs_id_max = obs_id_min + 199
return obs_id_min, obs_id_max
@property
def _obs_group_folder(self):
return Path('run{:06d}-{:06d}'.format(self.obs_group[0], self.obs_group[1]))
@property
def _obs_folder(self):
return Path('run{:06d}'.format(self.obs_id))
def folder(self, step=None):
"""Create folder for a given step.
"""
if step is None:
return self._obs_group_folder / self._obs_folder
else:
return Path(step) / self._obs_group_folder / self._obs_folder
def hap_filename(self, filetype):
"""Name of FITS file generated by HAP"""
if filetype == 'events':
return self.folder('events') / 'run_{:07d}_{}_eventlist.fits'.format(self.obs_id, self.hap_config)
# return self.folder('events') / 'events_{:06d}.fits.gz'.format(self.obs_id)
elif filetype == 'aeff':
return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id)
elif filetype == 'edisp':
return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)
elif filetype == 'psf_3gauss':
return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)
else:
raise ValueError('Invalid {} {}'.format(filetype))
def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):
"""Name of FITS file in out folder"""
filename = self.filename(filetype=filetype, format=format)
# return Path(dir) / filename
return filename
def filename(self, filetype, format='old'):
if format == 'old':
TAGS = dict(
events='events',
aeff='aeff_2d',
edisp='edisp_2d',
psf_3gauss='psf_3gauss',
psf_king='psf_king',
psf_table='psf_table',
background='bkg_offruns',
)
elif format == 'new':
TAGS = dict(
events='events',
aeff='aeff',
edisp='edisp',
psf_3gauss='psf_3gauss',
psf_king='psf_king',
psf_table='psf_table',
background='background',
)
tag = TAGS[filetype]
if (filetype == "events"):
filename = '{}_{:06d}.fits.gz'.format(tag, self.obs_id)
else:
if(self.obs_id>99999):
filename = '{}_0{:06d}.fits'.format(tag, self.obs_id)
else:
filename = '{}_{:06d}.fits'.format(tag, self.obs_id)
#filename = '{}_{:06d}.fits'.format(tag, self.obs_id)
return self.folder() / filename
def mkdir(self, step):
"""Make directory (parts=True, exists_ok=True)"""
path = self.folder(step)
if not path.exists():
path.mkdir(parents=True)
return path
def check_out_files_exist(self):
"""Check if all out files exist"""
for filetype in self.filetypes:
filename = self.out_filename(filetype)
if not filename.is_file():
log.error('MISSING: {}'.format(filename))
return False
return True
class ListObservations:
def __init__(self, runlist_file, config):
self.observations = []
runlist = np.loadtxt(runlist_file, ndmin=2)
obs_ids = runlist[:, 0].astype(int)
telcodes = runlist[:, 1].astype(int)
for obs_id, telcode in zip(obs_ids, telcodes):
obs = Observation(obs_id, config, telcode)
self.observations.append(obs)
def summary_info_events(filename):
"""Extract FITS header info from EVENTS files to dict"""
# filename = self.out_filename('events')
print('Reading {}'.format(filename))
table = Table.read(str(filename), hdu='EVENTS')
data = dict()
# Copy over header info to the summary table
data['RA_PNT'] = np.float32(table.meta['RA_PNT'])
data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])
# data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])
# data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])
data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])
data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])
#data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])
data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())
data['ONTIME'] = np.float32(table.meta['ONTIME'])
data['LIVETIME'] = np.float32(table.meta['LIVETIME'])
data['DEADC'] = np.float32(table.meta['DEADC'])
MJDREFI = table.meta['MJDREFI']
MJDREFF = table.meta['MJDREFF']
MJDREF = MJDREFI + MJDREFF
TSTART_MET = table.meta['TSTART'] / 3600. / 24.
TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.
start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')
stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')
data['TSTART'] = np.float32(start_time.utc.mjd)
data['TSTOP'] = np.float32(stop_time.utc.mjd)
data['TSTART_STR'] = str(start_time.utc.iso[:-4])
data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])
data['N_TELS'] = table.meta['N_TELS']
data['TELLIST'] = table.meta['TELLIST']
try:
data['OBJECT'] = table.meta['OBJECT']
except KeyError:
data['OBJECT'] = ""
data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])
data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])
# data['OBS_MODE'] = table.meta['OBS_MODE']
try:
data['MUONEFF'] = np.float32(table.meta['MUONEFF'])
except KeyError:
data['MUONEFF'] = np.float32(-1)
# Calculate some summary statistics for important event columns
data['EVENT_COUNT'] = len(table)
data['EVENT_TIME_MIN'] = table['TIME'].min()
data['EVENT_TIME_MAX'] = table['TIME'].max()
data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))
data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))
data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))
return data
def summary_info_aeff(filename):
"""Extract FITS header info from AEFF files to dict"""
# filename = self.out_filename('aeff')
print('Reading {}'.format(filename))
table = Table.read(str(filename), hdu='AEFF_2D')
data = dict()
# Copy over header info to the summary table
data['LO_THRES'] = table.meta['LO_THRES']
data['HI_THRES'] = table.meta['HI_THRES']
# Summary stats on IRF file content
data['EFFAREA_MAX'] = table['EFFAREA'].max()
data['EFFAREA_RECO_MAX'] = table['EFFAREA_RECO'].max()
return data
def obs_table(list_observations, indir, informat, outfile):
"""Create obs-index.fits.gz file.
"""
print('Creating observations summary table ...')
# We gather all infos in a list of dicts and write this
| random_line_split |
||
make_index_file.py |
elif filetype == 'edisp':
return self.folder('irfs') / 'edisp_{:06d}.fits.gz'.format(self.obs_id)
elif filetype == 'psf_3gauss':
return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)
else:
raise ValueError('Invalid {} {}'.format(filetype))
def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):
"""Name of FITS file in out folder"""
filename = self.filename(filetype=filetype, format=format)
# return Path(dir) / filename
return filename
def filename(self, filetype, format='old'):
if format == 'old':
TAGS = dict(
events='events',
aeff='aeff_2d',
edisp='edisp_2d',
psf_3gauss='psf_3gauss',
psf_king='psf_king',
psf_table='psf_table',
background='bkg_offruns',
)
elif format == 'new':
TAGS = dict(
events='events',
aeff='aeff',
edisp='edisp',
psf_3gauss='psf_3gauss',
psf_king='psf_king',
psf_table='psf_table',
background='background',
)
tag = TAGS[filetype]
if (filetype == "events"):
filename = '{}_{:06d}.fits.gz'.format(tag, self.obs_id)
else:
if(self.obs_id>99999):
filename = '{}_0{:06d}.fits'.format(tag, self.obs_id)
else:
filename = '{}_{:06d}.fits'.format(tag, self.obs_id)
#filename = '{}_{:06d}.fits'.format(tag, self.obs_id)
return self.folder() / filename
def mkdir(self, step):
"""Make directory (parts=True, exists_ok=True)"""
path = self.folder(step)
if not path.exists():
path.mkdir(parents=True)
return path
def check_out_files_exist(self):
"""Check if all out files exist"""
for filetype in self.filetypes:
filename = self.out_filename(filetype)
if not filename.is_file():
log.error('MISSING: {}'.format(filename))
return False
return True
class ListObservations:
def __init__(self, runlist_file, config):
self.observations = []
runlist = np.loadtxt(runlist_file, ndmin=2)
obs_ids = runlist[:, 0].astype(int)
telcodes = runlist[:, 1].astype(int)
for obs_id, telcode in zip(obs_ids, telcodes):
obs = Observation(obs_id, config, telcode)
self.observations.append(obs)
def summary_info_events(filename):
"""Extract FITS header info from EVENTS files to dict"""
# filename = self.out_filename('events')
print('Reading {}'.format(filename))
table = Table.read(str(filename), hdu='EVENTS')
data = dict()
# Copy over header info to the summary table
data['RA_PNT'] = np.float32(table.meta['RA_PNT'])
data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])
# data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])
# data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])
data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])
data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])
#data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])
data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())
data['ONTIME'] = np.float32(table.meta['ONTIME'])
data['LIVETIME'] = np.float32(table.meta['LIVETIME'])
data['DEADC'] = np.float32(table.meta['DEADC'])
MJDREFI = table.meta['MJDREFI']
MJDREFF = table.meta['MJDREFF']
MJDREF = MJDREFI + MJDREFF
TSTART_MET = table.meta['TSTART'] / 3600. / 24.
TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.
start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')
stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')
data['TSTART'] = np.float32(start_time.utc.mjd)
data['TSTOP'] = np.float32(stop_time.utc.mjd)
data['TSTART_STR'] = str(start_time.utc.iso[:-4])
data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])
data['N_TELS'] = table.meta['N_TELS']
data['TELLIST'] = table.meta['TELLIST']
try:
data['OBJECT'] = table.meta['OBJECT']
except KeyError:
data['OBJECT'] = ""
data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])
data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])
# data['OBS_MODE'] = table.meta['OBS_MODE']
try:
data['MUONEFF'] = np.float32(table.meta['MUONEFF'])
except KeyError:
data['MUONEFF'] = np.float32(-1)
# Calculate some summary statistics for important event columns
data['EVENT_COUNT'] = len(table)
data['EVENT_TIME_MIN'] = table['TIME'].min()
data['EVENT_TIME_MAX'] = table['TIME'].max()
data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))
data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))
data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))
return data
def summary_info_aeff(filename):
"""Extract FITS header info from AEFF files to dict"""
# filename = self.out_filename('aeff')
print('Reading {}'.format(filename))
table = Table.read(str(filename), hdu='AEFF_2D')
data = dict()
# Copy over header info to the summary table
data['LO_THRES'] = table.meta['LO_THRES']
data['HI_THRES'] = table.meta['HI_THRES']
# Summary stats on IRF file content
data['EFFAREA_MAX'] = table['EFFAREA'].max()
data['EFFAREA_RECO_MAX'] = table['EFFAREA_RECO'].max()
return data
def obs_table(list_observations, indir, informat, outfile):
"""Create obs-index.fits.gz file.
"""
print('Creating observations summary table ...')
# We gather all infos in a list of dicts and write this
# as a FITS table at the end.
rows = []
for obs in list_observations.observations:
events_filename = Path(indir) / obs.filename('events', format=informat)
try:
table = Table.read(str(events_filename), hdu='EVENTS')
except Exception:
print "fits corrupted for file " + str(events_filename)
continue
if table.meta["OBS_ID"]!=obs.obs_id:
continue
data = dict()
data['OBS_ID'] = obs.obs_id
if events_filename.exists():
events_info = summary_info_events(events_filename)
data.update(events_info)
else:
print('File not found: {}'.format(events_filename))
aeff_filename = Path(indir) / obs.filename('aeff', format=informat)
if aeff_filename.exists():
aeff_info = summary_info_aeff(aeff_filename)
data.update(aeff_info)
# check that thresholds are meaningful in the effective area table
if ((aeff_info['HI_THRES'] <= aeff_info['LO_THRES']) & (aeff_info['LO_THRES'] != -1)):
print('HI_THRES < LO_THRES for aeff : {}'.format(obs.obs_id))
data['QUALITY'] = DataQuality.bad['id']
else:
print('File not found: {}'.format(aeff_filename))
# check that the energy column is filled
if events_info['EVENT_ENERGY_MEDIAN'] <= 0:
print('EVENT_ENERGY_MEDIAN <= 0 : {}'.format(obs.obs_id))
data['QUALITY'] = DataQuality.bad['id']
# TODO add more checks?
# This currently only works if the dir is called 'out':
# if not Observation(obs.obs_id).check_out_files_exist():
# print('Missing files: {}'.format(obs.obs_id))
# data['QUALITY'] = DataQuality.bad['id']
rows.append(data)
#import IPython; IPython.embed()
table | return self.folder('irfs') / 'aeff_{:06d}.fits.gz'.format(self.obs_id) | conditional_block |
|
make_index_file.py | .gz'.format(self.obs_id)
elif filetype == 'psf_3gauss':
return self.folder('irfs') / 'psf_3gauss_{:06d}.fits.gz'.format(self.obs_id)
else:
raise ValueError('Invalid {} {}'.format(filetype))
def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):
"""Name of FITS file in out folder"""
filename = self.filename(filetype=filetype, format=format)
# return Path(dir) / filename
return filename
def filename(self, filetype, format='old'):
if format == 'old':
TAGS = dict(
events='events',
aeff='aeff_2d',
edisp='edisp_2d',
psf_3gauss='psf_3gauss',
psf_king='psf_king',
psf_table='psf_table',
background='bkg_offruns',
)
elif format == 'new':
TAGS = dict(
events='events',
aeff='aeff',
edisp='edisp',
psf_3gauss='psf_3gauss',
psf_king='psf_king',
psf_table='psf_table',
background='background',
)
tag = TAGS[filetype]
if (filetype == "events"):
filename = '{}_{:06d}.fits.gz'.format(tag, self.obs_id)
else:
if(self.obs_id>99999):
filename = '{}_0{:06d}.fits'.format(tag, self.obs_id)
else:
filename = '{}_{:06d}.fits'.format(tag, self.obs_id)
#filename = '{}_{:06d}.fits'.format(tag, self.obs_id)
return self.folder() / filename
def mkdir(self, step):
"""Make directory (parts=True, exists_ok=True)"""
path = self.folder(step)
if not path.exists():
path.mkdir(parents=True)
return path
def check_out_files_exist(self):
|
class ListObservations:
def __init__(self, runlist_file, config):
self.observations = []
runlist = np.loadtxt(runlist_file, ndmin=2)
obs_ids = runlist[:, 0].astype(int)
telcodes = runlist[:, 1].astype(int)
for obs_id, telcode in zip(obs_ids, telcodes):
obs = Observation(obs_id, config, telcode)
self.observations.append(obs)
def summary_info_events(filename):
"""Extract FITS header info from EVENTS files to dict"""
# filename = self.out_filename('events')
print('Reading {}'.format(filename))
table = Table.read(str(filename), hdu='EVENTS')
data = dict()
# Copy over header info to the summary table
data['RA_PNT'] = np.float32(table.meta['RA_PNT'])
data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])
# data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])
# data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])
data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])
data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])
#data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])
data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())
data['ONTIME'] = np.float32(table.meta['ONTIME'])
data['LIVETIME'] = np.float32(table.meta['LIVETIME'])
data['DEADC'] = np.float32(table.meta['DEADC'])
MJDREFI = table.meta['MJDREFI']
MJDREFF = table.meta['MJDREFF']
MJDREF = MJDREFI + MJDREFF
TSTART_MET = table.meta['TSTART'] / 3600. / 24.
TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.
start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')
stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')
data['TSTART'] = np.float32(start_time.utc.mjd)
data['TSTOP'] = np.float32(stop_time.utc.mjd)
data['TSTART_STR'] = str(start_time.utc.iso[:-4])
data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])
data['N_TELS'] = table.meta['N_TELS']
data['TELLIST'] = table.meta['TELLIST']
try:
data['OBJECT'] = table.meta['OBJECT']
except KeyError:
data['OBJECT'] = ""
data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])
data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])
# data['OBS_MODE'] = table.meta['OBS_MODE']
try:
data['MUONEFF'] = np.float32(table.meta['MUONEFF'])
except KeyError:
data['MUONEFF'] = np.float32(-1)
# Calculate some summary statistics for important event columns
data['EVENT_COUNT'] = len(table)
data['EVENT_TIME_MIN'] = table['TIME'].min()
data['EVENT_TIME_MAX'] = table['TIME'].max()
data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))
data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))
data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))
return data
def summary_info_aeff(filename):
"""Extract FITS header info from AEFF files to dict"""
# filename = self.out_filename('aeff')
print('Reading {}'.format(filename))
table = Table.read(str(filename), hdu='AEFF_2D')
data = dict()
# Copy over header info to the summary table
data['LO_THRES'] = table.meta['LO_THRES']
data['HI_THRES'] = table.meta['HI_THRES']
# Summary stats on IRF file content
data['EFFAREA_MAX'] = table['EFFAREA'].max()
data['EFFAREA_RECO_MAX'] = table['EFFAREA_RECO'].max()
return data
def obs_table(list_observations, indir, informat, outfile):
"""Create obs-index.fits.gz file.
"""
print('Creating observations summary table ...')
# We gather all infos in a list of dicts and write this
# as a FITS table at the end.
rows = []
for obs in list_observations.observations:
events_filename = Path(indir) / obs.filename('events', format=informat)
try:
table = Table.read(str(events_filename), hdu='EVENTS')
except Exception:
print "fits corrupted for file " + str(events_filename)
continue
if table.meta["OBS_ID"]!=obs.obs_id:
continue
data = dict()
data['OBS_ID'] = obs.obs_id
if events_filename.exists():
events_info = summary_info_events(events_filename)
data.update(events_info)
else:
print('File not found: {}'.format(events_filename))
aeff_filename = Path(indir) / obs.filename('aeff', format=informat)
if aeff_filename.exists():
aeff_info = summary_info_aeff(aeff_filename)
data.update(aeff_info)
# check that thresholds are meaningful in the effective area table
if ((aeff_info['HI_THRES'] <= aeff_info['LO_THRES']) & (aeff_info['LO_THRES'] != -1)):
print('HI_THRES < LO_THRES for aeff : {}'.format(obs.obs_id))
data['QUALITY'] = DataQuality.bad['id']
else:
print('File not found: {}'.format(aeff_filename))
# check that the energy column is filled
if events_info['EVENT_ENERGY_MEDIAN'] <= 0:
print('EVENT_ENERGY_MEDIAN <= 0 : {}'.format(obs.obs_id))
data['QUALITY'] = DataQuality.bad['id']
# TODO add more checks?
# This currently only works if the dir is called 'out':
# if not Observation(obs.obs_id).check_out_files_exist():
# print('Missing files: {}'.format(obs.obs_id))
# data['QUALITY'] = DataQuality.bad['id']
rows.append(data)
#import IPython; IPython.embed()
table = Table(rows=rows)
table.meta['MJDREFI'] = 51544
table.meta['MJDREFF'] = 0.5
table['ZEN_PNT'].unit = "deg"
table | """Check if all out files exist"""
for filetype in self.filetypes:
filename = self.out_filename(filetype)
if not filename.is_file():
log.error('MISSING: {}'.format(filename))
return False
return True | identifier_body |
user-management.component.ts | import 'jspdf-autotable';
import { UserManagementService } from '../../services/user-management.service';
declare var $: any
import * as FileSaver from 'file-saver';
import * as XLSX from 'xlsx';
import { analyzeFile } from '@angular/compiler';
import { environment } from 'src/environments/environment';
const EXCEL_TYPE = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet;charset=UTF-8';
const EXCEL_EXTENSION = '.csv';
@Component({
selector: 'app-user-management',
templateUrl: './user-management.component.html',
providers: [CommonService, MessageService, ToastModule],
styleUrls: ['./user-management.component.css']
})
export class UserManagementComponent implements OnInit {
selectedValue: any;
cols: any[];
exportPdfcolumn: any;
clients: any = [];
status: any = [];
orderType: any = [];
userManagement: any;
userManagementForm: FormGroup;
BulkUploadDetailsForm: FormGroup;
showCount: boolean;
update: boolean = false;
showSideBar = true;
isSubmitted = false;
disabled: boolean;
currentUserId: number;
userLoginId: number;
deleteUserName: string;
uploadData: any;
public fileName = 'Choose File';
public headers: any;
PasswordErrorMessage: string;
constructor(
private http: HttpClient, private formBuilder: FormBuilder,
private UserManagement: UserManagementService,
private messageService: MessageService,
) { }
public exportAsExcelFile(json: any, excelFileName: string): void {
const ws: XLSX.WorkSheet = XLSX.utils.json_to_sheet([], {header:json});
const wb: XLSX.WorkBook = XLSX.utils.book_new();
XLSX.utils.book_append_sheet(wb, ws, 'Transactions');
const excelBuffer: any = XLSX.write(wb, { bookType: 'csv', type: 'array' });
this.saveAsExcelFile(excelBuffer, excelFileName);
}
private saveAsExcelFile(buffer: any, fileName: string): void {
const data: Blob = new Blob([buffer], {
type: EXCEL_TYPE
});
FileSaver.saveAs(data, 'Workflow_Input' + EXCEL_EXTENSION);
}
downloadCsv() {
this.status = ['approved', 'rejected', 'pending'];
const data = ['FirstName', 'LastName', 'Email', 'Phone', 'WorkPhone'];
this.exportAsExcelFile(data, 'Workflow_Input');
}
ngOnInit() {
debugger
$('.spinner').show();
$('.modal').appendTo('#fullscreen');
// this.shrService.getSideBarDetail().subscribe(resp => { this.showSideBar = resp });
this.selectedValue = 'true';
this.GetUserManagementList();
this.userManagementForm = this.formBuilder.group({
// OrganizationName: ["", [Validators.required, Validators.maxLength(50)]],
// FirstName: ["", [Validators.required, formValidators.alphabetical, formValidators.noWhitespace, Validators.maxLength(50)]],
// LastName: ["", [Validators.required, formValidators.alphabetical, formValidators.noWhitespace, Validators.maxLength(50)]],
// Email: ["", [Validators.required, formValidators.email, Validators.maxLength(50)]],
// Password: ["", [Validators.required, Validators.minLength(8), Validators.maxLength(12)]],
// Phone: ["", [Validators.required, formValidators.noWhitespace, formValidators.numeric,
// Validators.minLength(7), Validators.maxLength(15)]],
// WorkPhone: ["", [Validators.required, formValidators.noWhitespace, formValidators.numeric,
// Validators.minLength(7), Validators.maxLength(15)]],
active: "",
Security:"",
BusinessPartner:"",
Department:"",
Status:"",
SecurityName:"",
OrderType:"",
ISIN:"",
AvaloqID:"",
Requester:"",
AssignTo:"",
rememberMeFlag: [false]
});
this.BulkUploadDetailsForm = this.formBuilder.group({
InputFile: ['', [Validators.required]]
});
this.cols = [
{ field: 'businesspartner', header: 'business partner' },
{ field: 'department', header: 'department' },
{ field: 'ordertype', header: 'ordertype' },
{ field: 'security', header: 'security' },
{ field: 'securityname', header: 'securityname' },
{ field: 'isin', header: 'isin' },
{ field: 'avaloqid', header: 'avaloqid' },
{ field: 'requester', header: 'requester' },
{ field: 'assignto', header: 'assignto' },
];
this.Department();
this.Status()
this.OrderType();
}
public get getFields() {
return this.userManagementForm.controls;
}
equalArray(a, b) {
if (a.length === b.length) {
for (var i = 0; i < a.length; i++) {
if (a[i] !== b[i]) {
return false;
}
}
return true;
} else {
return false;
}
}
handleFileInput(files: FileList) {
try {
this.fileName = files.item(0).name;
var array = this.fileName.split(/\.(?=[^\.]+$)/);
var extension = ['csv'];
var n = extension.includes(array[1]);
if (!n) {
this.Errormessage("Invalid file upload");
this.fileName = 'Choose File';
return false;
}
else {
this.BulkUploadDetailsForm.patchValue({ InputFile: { file: files.item(0) } });
}
import('xlsx').then(xlsx => {
let workBook = null;
let userdata = null;
const reader = new FileReader();
reader.onload = (event) => {
const data = reader.result;
workBook = xlsx.read(data, { type: 'binary' });
let sheetcount = 1;
userdata = workBook.SheetNames.reduce((initial, name) => {
const sheet = workBook.Sheets[name];
if (sheet && sheetcount == 1) {
sheetcount = sheetcount += 1;
const data1 = (xlsx.utils.sheet_to_json(sheet, { header: 1 }));
this.headers = data1.shift();
}
initial[name] = xlsx.utils.sheet_to_json(sheet, { raw: false });
return initial;
}, {});
const usermgntHeader = ["FirstName", "LastName", "Email", "Phone", "WorkPhone"];
let checkEqual = this.equalArray(usermgntHeader,this.headers)
if (!checkEqual) {
this.Errormessage("Header mismacth");
this.fileName = 'Choose File';
return false;
}
},
reader.readAsBinaryString(files.item(0));
});
}
catch (e) {
console.log('Try again. Something went wrong check the uploaded sheet.')
}
}
// GetUserManagementList() {
// debugger
// $('.spinner').show();
// this.UserManagement.getRequest1('api/v1/CAdocument/GetCAdocument?businesspartner=test').subscribe((UserManagementDetails) => {
// console.log(any);
// debugger
// window.location.href = any;
// this.userManagement = any;
// this.showCount = true;
// $('.spinner').hide();
// },err=>{
// this.Errormessage(err);
// });
// }
GetUserManagementList() {
debugger
//this.UserManagement.getRequest1('api/v1/CAdocument/Login').subscribe((any) => {
this.http.get(environment.CADServiceUrl + 'api/v1/CAdocument/Login' , {responseType: 'text'}).subscribe(result => {
debugger
sessionStorage.setItem("UserName", result);
//alert(sessionStorage.getItem("UserName"));
this.GetUserManagementList1();
//window.location.href = result;
}, error => console.log(error));}
GetUserManagementList1() {
debugger
//this.UserManagement.getRequest1('api/v1/CAdocument/Login').subscribe((any) => {
this.http.get(sessionStorage.getItem("UserName") , {responseType: 'text'}).subscribe(result => {
debugger
sessionStorage.setItem("UserName", result);
//sessionStorage.setItem("UserName", result);
//alert(sessionStorage.getItem("UserName"));
//window.location.href = result;
}, error => console.log(error));}
Department() {
var data = [
{
id: 1,
name: 'FUNDS',
},
{
id: 2,
name: 'KYC',
},
{
id: 3,
name: 'KYB',
},
{
id: 4,
name: 'Insurance',
}]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.clients.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
Status()
{
var data = [
{
id: 1,
name: 'Active',
},
{
| random_line_split |
||
user-management.component.ts | any = XLSX.write(wb, { bookType: 'csv', type: 'array' });
this.saveAsExcelFile(excelBuffer, excelFileName);
}
private saveAsExcelFile(buffer: any, fileName: string): void {
const data: Blob = new Blob([buffer], {
type: EXCEL_TYPE
});
FileSaver.saveAs(data, 'Workflow_Input' + EXCEL_EXTENSION);
}
downloadCsv() {
this.status = ['approved', 'rejected', 'pending'];
const data = ['FirstName', 'LastName', 'Email', 'Phone', 'WorkPhone'];
this.exportAsExcelFile(data, 'Workflow_Input');
}
ngOnInit() {
debugger
$('.spinner').show();
$('.modal').appendTo('#fullscreen');
// this.shrService.getSideBarDetail().subscribe(resp => { this.showSideBar = resp });
this.selectedValue = 'true';
this.GetUserManagementList();
this.userManagementForm = this.formBuilder.group({
// OrganizationName: ["", [Validators.required, Validators.maxLength(50)]],
// FirstName: ["", [Validators.required, formValidators.alphabetical, formValidators.noWhitespace, Validators.maxLength(50)]],
// LastName: ["", [Validators.required, formValidators.alphabetical, formValidators.noWhitespace, Validators.maxLength(50)]],
// Email: ["", [Validators.required, formValidators.email, Validators.maxLength(50)]],
// Password: ["", [Validators.required, Validators.minLength(8), Validators.maxLength(12)]],
// Phone: ["", [Validators.required, formValidators.noWhitespace, formValidators.numeric,
// Validators.minLength(7), Validators.maxLength(15)]],
// WorkPhone: ["", [Validators.required, formValidators.noWhitespace, formValidators.numeric,
// Validators.minLength(7), Validators.maxLength(15)]],
active: "",
Security:"",
BusinessPartner:"",
Department:"",
Status:"",
SecurityName:"",
OrderType:"",
ISIN:"",
AvaloqID:"",
Requester:"",
AssignTo:"",
rememberMeFlag: [false]
});
this.BulkUploadDetailsForm = this.formBuilder.group({
InputFile: ['', [Validators.required]]
});
this.cols = [
{ field: 'businesspartner', header: 'business partner' },
{ field: 'department', header: 'department' },
{ field: 'ordertype', header: 'ordertype' },
{ field: 'security', header: 'security' },
{ field: 'securityname', header: 'securityname' },
{ field: 'isin', header: 'isin' },
{ field: 'avaloqid', header: 'avaloqid' },
{ field: 'requester', header: 'requester' },
{ field: 'assignto', header: 'assignto' },
];
this.Department();
this.Status()
this.OrderType();
}
public get getFields() {
return this.userManagementForm.controls;
}
equalArray(a, b) {
if (a.length === b.length) {
for (var i = 0; i < a.length; i++) {
if (a[i] !== b[i]) {
return false;
}
}
return true;
} else {
return false;
}
}
handleFileInput(files: FileList) {
try {
this.fileName = files.item(0).name;
var array = this.fileName.split(/\.(?=[^\.]+$)/);
var extension = ['csv'];
var n = extension.includes(array[1]);
if (!n) |
else {
this.BulkUploadDetailsForm.patchValue({ InputFile: { file: files.item(0) } });
}
import('xlsx').then(xlsx => {
let workBook = null;
let userdata = null;
const reader = new FileReader();
reader.onload = (event) => {
const data = reader.result;
workBook = xlsx.read(data, { type: 'binary' });
let sheetcount = 1;
userdata = workBook.SheetNames.reduce((initial, name) => {
const sheet = workBook.Sheets[name];
if (sheet && sheetcount == 1) {
sheetcount = sheetcount += 1;
const data1 = (xlsx.utils.sheet_to_json(sheet, { header: 1 }));
this.headers = data1.shift();
}
initial[name] = xlsx.utils.sheet_to_json(sheet, { raw: false });
return initial;
}, {});
const usermgntHeader = ["FirstName", "LastName", "Email", "Phone", "WorkPhone"];
let checkEqual = this.equalArray(usermgntHeader,this.headers)
if (!checkEqual) {
this.Errormessage("Header mismacth");
this.fileName = 'Choose File';
return false;
}
},
reader.readAsBinaryString(files.item(0));
});
}
catch (e) {
console.log('Try again. Something went wrong check the uploaded sheet.')
}
}
// GetUserManagementList() {
// debugger
// $('.spinner').show();
// this.UserManagement.getRequest1('api/v1/CAdocument/GetCAdocument?businesspartner=test').subscribe((UserManagementDetails) => {
// console.log(any);
// debugger
// window.location.href = any;
// this.userManagement = any;
// this.showCount = true;
// $('.spinner').hide();
// },err=>{
// this.Errormessage(err);
// });
// }
GetUserManagementList() {
debugger
//this.UserManagement.getRequest1('api/v1/CAdocument/Login').subscribe((any) => {
this.http.get(environment.CADServiceUrl + 'api/v1/CAdocument/Login' , {responseType: 'text'}).subscribe(result => {
debugger
sessionStorage.setItem("UserName", result);
//alert(sessionStorage.getItem("UserName"));
this.GetUserManagementList1();
//window.location.href = result;
}, error => console.log(error));}
GetUserManagementList1() {
debugger
//this.UserManagement.getRequest1('api/v1/CAdocument/Login').subscribe((any) => {
this.http.get(sessionStorage.getItem("UserName") , {responseType: 'text'}).subscribe(result => {
debugger
sessionStorage.setItem("UserName", result);
//sessionStorage.setItem("UserName", result);
//alert(sessionStorage.getItem("UserName"));
//window.location.href = result;
}, error => console.log(error));}
Department() {
var data = [
{
id: 1,
name: 'FUNDS',
},
{
id: 2,
name: 'KYC',
},
{
id: 3,
name: 'KYB',
},
{
id: 4,
name: 'Insurance',
}]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.clients.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
Status()
{
var data = [
{
id: 1,
name: 'Active',
},
{
id: 2,
name: 'In Active',
}
]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.status.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
OrderType()
{
var data = [
{
id: 1,
name: 'Assimilation',
},
{
id: 2,
name: 'Client',
},
{
id: 3,
name: 'EKYB',
}
]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.orderType.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
validateAllFormFields(formGroup: FormGroup) {
debugger
Object.keys(formGroup.controls).forEach(field => {
const control = formGroup.get(field);
if (control instanceof FormControl) {
control.markAsTouched({ onlySelf: true });
} else if (control instanceof FormGroup) {
this.validateAllFormFields(control);
}
});
}
submitForm(params) {
debugger
let formData = params.value;
this.isSubmitted = true;
this.validateAllFormFields(this.userManagementForm);
//this.ValidatePassword(formData["Password"]);
var data = {};
if (this.userManagementForm.valid && this.update != true ) {
$('.spinner').show();
this.disabled | {
this.Errormessage("Invalid file upload");
this.fileName = 'Choose File';
return false;
} | conditional_block |
user-management.component.ts | this.equalArray(usermgntHeader,this.headers)
if (!checkEqual) {
this.Errormessage("Header mismacth");
this.fileName = 'Choose File';
return false;
}
},
reader.readAsBinaryString(files.item(0));
});
}
catch (e) {
console.log('Try again. Something went wrong check the uploaded sheet.')
}
}
// GetUserManagementList() {
// debugger
// $('.spinner').show();
// this.UserManagement.getRequest1('api/v1/CAdocument/GetCAdocument?businesspartner=test').subscribe((UserManagementDetails) => {
// console.log(any);
// debugger
// window.location.href = any;
// this.userManagement = any;
// this.showCount = true;
// $('.spinner').hide();
// },err=>{
// this.Errormessage(err);
// });
// }
GetUserManagementList() {
debugger
//this.UserManagement.getRequest1('api/v1/CAdocument/Login').subscribe((any) => {
this.http.get(environment.CADServiceUrl + 'api/v1/CAdocument/Login' , {responseType: 'text'}).subscribe(result => {
debugger
sessionStorage.setItem("UserName", result);
//alert(sessionStorage.getItem("UserName"));
this.GetUserManagementList1();
//window.location.href = result;
}, error => console.log(error));}
GetUserManagementList1() {
debugger
//this.UserManagement.getRequest1('api/v1/CAdocument/Login').subscribe((any) => {
this.http.get(sessionStorage.getItem("UserName") , {responseType: 'text'}).subscribe(result => {
debugger
sessionStorage.setItem("UserName", result);
//sessionStorage.setItem("UserName", result);
//alert(sessionStorage.getItem("UserName"));
//window.location.href = result;
}, error => console.log(error));}
Department() {
var data = [
{
id: 1,
name: 'FUNDS',
},
{
id: 2,
name: 'KYC',
},
{
id: 3,
name: 'KYB',
},
{
id: 4,
name: 'Insurance',
}]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.clients.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
Status()
{
var data = [
{
id: 1,
name: 'Active',
},
{
id: 2,
name: 'In Active',
}
]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.status.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
OrderType()
{
var data = [
{
id: 1,
name: 'Assimilation',
},
{
id: 2,
name: 'Client',
},
{
id: 3,
name: 'EKYB',
}
]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.orderType.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
validateAllFormFields(formGroup: FormGroup) {
debugger
Object.keys(formGroup.controls).forEach(field => {
const control = formGroup.get(field);
if (control instanceof FormControl) {
control.markAsTouched({ onlySelf: true });
} else if (control instanceof FormGroup) {
this.validateAllFormFields(control);
}
});
}
submitForm(params) {
debugger
let formData = params.value;
this.isSubmitted = true;
this.validateAllFormFields(this.userManagementForm);
//this.ValidatePassword(formData["Password"]);
var data = {};
if (this.userManagementForm.valid && this.update != true ) {
$('.spinner').show();
this.disabled = true;
rememberMeFlag: false
data={
"security": true,
"businesspartner": formData["BusinessPartner"],
"department": "Department",
"status": "Active",//formData["Status"],
"securityname":formData["SecurityName"],
"ordertype": "ordertype",//formData["OrderType"],
"isin": formData["ISIN"],
"avaloqid": formData["AvaloqID"],
"requester": formData["Requester"],
"assignto": formData["AssignTo"]
}
this.UserManagement.postRequest1('api/v1/CAdocument/InsertCAdocument', data).subscribe((response) => {
debugger
// if (response.status == 1) {
$('.close').trigger('click');
this.Close();
this.GetUserManagementList();
this.successmessage("User created successfully");
this.disabled = false;
this.isSubmitted = false;
// }
// else {
// $('.spinner').hide();
// this.Errormessage(response["message"]);
// this.disabled = false;
// }
}, err => {
this.Errormessage(err.message);
$('.spinner').hide();
});
}
else {
debugger
if (this.userManagementForm.valid && this.update == true) {
$('.spinner').show();
this.disabled = true;
data={
"id":this.uploadData.id,
"security": true,
"businesspartner": formData["BusinessPartner"],
"department": "Department",
"status": "Active",//formData["Status"],
"securityname":formData["SecurityName"],
"ordertype": "ordertype",//formData["OrderType"],
"isin": formData["ISIN"],
"avaloqid": formData["AvaloqID"],
"requester": formData["Requester"],
"assignto": formData["AssignTo"]
}
debugger
this.UserManagement.putRequest1('api/v1/CAdocument/UpdateCAdocument', data).subscribe((response) => {
debugger
// if (response!=null) {
$('.close').trigger('click');
this.Close();
this.GetUserManagementList();
this.successmessage("User updated Successfully");
this.disabled = false;
// }
// else if (response.status == 3) {
// $('.spinner').hide();
// this.Errormessage(response["message"]);
// this.disabled = false;
// }
// else { $('.spinner').hide();
// this.Errormessage(response["message"]);
// this.disabled = false;
// }
},err => {
$('.spinner').hide();
});
}
}
}
exportPdf() {
this.exportPdfcolumn = this.cols.map(col => ({ title: col.header, dataKey: col.field }));
const doc = new jsPDF('l');
doc.autoTable(this.exportPdfcolumn,this.userManagement);
doc.save('UDP_UserManagement' + new Date().getTime() + '.pdf');
}
Open() {
this.selectedValue = 'true';
this.update = false;
}
Close() {
this.userManagementForm.reset();
this.isSubmitted = false;
}
successmessage(message) {
this.messageService.add({ severity: 'success', summary: 'Success', detail: message });
}
Errormessage(errorsmessage) {
this.messageService.add({ severity: 'error', summary: 'Error', detail: errorsmessage });
}
numberOnly(event): boolean {
const charCode = (event.which) ? event.which : event.keyCode;
if (charCode > 31 && (charCode < 48 || charCode > 57)) {
return false;
}
return true;
}
editUserManagement(userManagement: any) {
debugger
this.update = true;
this.userManagementForm.controls['Security'].setValue(userManagement.security);
this.userManagementForm.controls['BusinessPartner'].setValue(userManagement.businesspartner);
this.userManagementForm.controls['Department'].setValue(userManagement.department);
this.userManagementForm.controls['Status'].setValue(userManagement.status);
this.userManagementForm.controls['OrderType'].setValue(userManagement.ordertype);
this.userManagementForm.controls['ISIN'].setValue(userManagement.isin);
this.userManagementForm.controls['AvaloqID'].setValue(userManagement.avaloqid);
this.selectedValue = String(userManagement.active);
this.userManagementForm.controls['Requester'].setValue(userManagement.requester);
this.userManagementForm.controls['AssignTo'].setValue(userManagement.assignto);
//this.userManagementForm.controls['Id'].setValue(userManagement.id);
this.uploadData = userManagement;
}
deleteUser(user) | {
debugger
this.currentUserId = user.id;
} | identifier_body |
|
user-management.component.ts | er' },
{ field: 'assignto', header: 'assignto' },
];
this.Department();
this.Status()
this.OrderType();
}
public get getFields() {
return this.userManagementForm.controls;
}
equalArray(a, b) {
if (a.length === b.length) {
for (var i = 0; i < a.length; i++) {
if (a[i] !== b[i]) {
return false;
}
}
return true;
} else {
return false;
}
}
handleFileInput(files: FileList) {
try {
this.fileName = files.item(0).name;
var array = this.fileName.split(/\.(?=[^\.]+$)/);
var extension = ['csv'];
var n = extension.includes(array[1]);
if (!n) {
this.Errormessage("Invalid file upload");
this.fileName = 'Choose File';
return false;
}
else {
this.BulkUploadDetailsForm.patchValue({ InputFile: { file: files.item(0) } });
}
import('xlsx').then(xlsx => {
let workBook = null;
let userdata = null;
const reader = new FileReader();
reader.onload = (event) => {
const data = reader.result;
workBook = xlsx.read(data, { type: 'binary' });
let sheetcount = 1;
userdata = workBook.SheetNames.reduce((initial, name) => {
const sheet = workBook.Sheets[name];
if (sheet && sheetcount == 1) {
sheetcount = sheetcount += 1;
const data1 = (xlsx.utils.sheet_to_json(sheet, { header: 1 }));
this.headers = data1.shift();
}
initial[name] = xlsx.utils.sheet_to_json(sheet, { raw: false });
return initial;
}, {});
const usermgntHeader = ["FirstName", "LastName", "Email", "Phone", "WorkPhone"];
let checkEqual = this.equalArray(usermgntHeader,this.headers)
if (!checkEqual) {
this.Errormessage("Header mismacth");
this.fileName = 'Choose File';
return false;
}
},
reader.readAsBinaryString(files.item(0));
});
}
catch (e) {
console.log('Try again. Something went wrong check the uploaded sheet.')
}
}
// GetUserManagementList() {
// debugger
// $('.spinner').show();
// this.UserManagement.getRequest1('api/v1/CAdocument/GetCAdocument?businesspartner=test').subscribe((UserManagementDetails) => {
// console.log(any);
// debugger
// window.location.href = any;
// this.userManagement = any;
// this.showCount = true;
// $('.spinner').hide();
// },err=>{
// this.Errormessage(err);
// });
// }
GetUserManagementList() {
debugger
//this.UserManagement.getRequest1('api/v1/CAdocument/Login').subscribe((any) => {
this.http.get(environment.CADServiceUrl + 'api/v1/CAdocument/Login' , {responseType: 'text'}).subscribe(result => {
debugger
sessionStorage.setItem("UserName", result);
//alert(sessionStorage.getItem("UserName"));
this.GetUserManagementList1();
//window.location.href = result;
}, error => console.log(error));}
GetUserManagementList1() {
debugger
//this.UserManagement.getRequest1('api/v1/CAdocument/Login').subscribe((any) => {
this.http.get(sessionStorage.getItem("UserName") , {responseType: 'text'}).subscribe(result => {
debugger
sessionStorage.setItem("UserName", result);
//sessionStorage.setItem("UserName", result);
//alert(sessionStorage.getItem("UserName"));
//window.location.href = result;
}, error => console.log(error));}
Department() {
var data = [
{
id: 1,
name: 'FUNDS',
},
{
id: 2,
name: 'KYC',
},
{
id: 3,
name: 'KYB',
},
{
id: 4,
name: 'Insurance',
}]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.clients.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
Status()
{
var data = [
{
id: 1,
name: 'Active',
},
{
id: 2,
name: 'In Active',
}
]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.status.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
OrderType()
{
var data = [
{
id: 1,
name: 'Assimilation',
},
{
id: 2,
name: 'Client',
},
{
id: 3,
name: 'EKYB',
}
]
// $('.spinner').show();
// this.ClientAdministration.getRequest('GetClientDetails').subscribe((ClientAdministrationDetails) => {
for (let i = 0; i <= data.length - 1; i++) {
this.orderType.push({
label: data[i].name, value: data[i].id
});
}
// $('.spinner').hide();
// });
}
validateAllFormFields(formGroup: FormGroup) {
debugger
Object.keys(formGroup.controls).forEach(field => {
const control = formGroup.get(field);
if (control instanceof FormControl) {
control.markAsTouched({ onlySelf: true });
} else if (control instanceof FormGroup) {
this.validateAllFormFields(control);
}
});
}
submitForm(params) {
debugger
let formData = params.value;
this.isSubmitted = true;
this.validateAllFormFields(this.userManagementForm);
//this.ValidatePassword(formData["Password"]);
var data = {};
if (this.userManagementForm.valid && this.update != true ) {
$('.spinner').show();
this.disabled = true;
rememberMeFlag: false
data={
"security": true,
"businesspartner": formData["BusinessPartner"],
"department": "Department",
"status": "Active",//formData["Status"],
"securityname":formData["SecurityName"],
"ordertype": "ordertype",//formData["OrderType"],
"isin": formData["ISIN"],
"avaloqid": formData["AvaloqID"],
"requester": formData["Requester"],
"assignto": formData["AssignTo"]
}
this.UserManagement.postRequest1('api/v1/CAdocument/InsertCAdocument', data).subscribe((response) => {
debugger
// if (response.status == 1) {
$('.close').trigger('click');
this.Close();
this.GetUserManagementList();
this.successmessage("User created successfully");
this.disabled = false;
this.isSubmitted = false;
// }
// else {
// $('.spinner').hide();
// this.Errormessage(response["message"]);
// this.disabled = false;
// }
}, err => {
this.Errormessage(err.message);
$('.spinner').hide();
});
}
else {
debugger
if (this.userManagementForm.valid && this.update == true) {
$('.spinner').show();
this.disabled = true;
data={
"id":this.uploadData.id,
"security": true,
"businesspartner": formData["BusinessPartner"],
"department": "Department",
"status": "Active",//formData["Status"],
"securityname":formData["SecurityName"],
"ordertype": "ordertype",//formData["OrderType"],
"isin": formData["ISIN"],
"avaloqid": formData["AvaloqID"],
"requester": formData["Requester"],
"assignto": formData["AssignTo"]
}
debugger
this.UserManagement.putRequest1('api/v1/CAdocument/UpdateCAdocument', data).subscribe((response) => {
debugger
// if (response!=null) {
$('.close').trigger('click');
this.Close();
this.GetUserManagementList();
this.successmessage("User updated Successfully");
this.disabled = false;
// }
// else if (response.status == 3) {
// $('.spinner').hide();
// this.Errormessage(response["message"]);
// this.disabled = false;
// }
// else { $('.spinner').hide();
// this.Errormessage(response["message"]);
// this.disabled = false;
// }
},err => {
$('.spinner').hide();
});
}
}
}
| exportPdf | identifier_name |
|
connection.rs | payload::build_select_protocol(addr, port))?;
}
let key = encryption_key(&mut client)?;
let _ = client
.stream_ref()
.as_tcp()
.set_read_timeout(Some(Duration::from_millis(25)));
let mutexed_client = Arc::new(Mutex::new(client));
let thread_items = start_threads(Arc::clone(&mutexed_client), &udp)?;
info!("[Voice] Connected to: {}", info.endpoint);
let encoder = OpusEncoder::new(SAMPLE_RATE, Channels::Mono, CodingMode::Audio)?;
let soft_clip = SoftClip::new(Channels::Stereo);
// Per discord dev team's current recommendations:
// (https://discordapp.com/developers/docs/topics/voice-connections#heartbeating)
let temp_heartbeat = (hello.heartbeat_interval as f64 * 0.75) as u64;
Ok(Connection {
audio_timer: Timer::new(1000 * 60 * 4),
client: mutexed_client,
decoder_map: HashMap::new(),
destination,
encoder,
encoder_stereo: false,
key,
keepalive_timer: Timer::new(temp_heartbeat),
udp,
sequence: 0,
silence_frames: 0,
soft_clip,
speaking: false,
ssrc: hello.ssrc,
thread_items,
timestamp: 0,
user_id: info.user_id,
})
}
#[allow(unused_variables)]
pub fn cycle(&mut self,
sources: &mut Vec<LockedAudio>,
receiver: &mut Option<Box<AudioReceiver>>,
audio_timer: &mut Timer)
-> Result<()> {
let mut buffer = [0i16; 960 * 2];
let mut mix_buffer = [0f32; 960 * 2];
let mut packet = [0u8; 512];
let mut nonce = secretbox::Nonce([0; 24]);
if let Some(receiver) = receiver.as_mut() {
while let Ok(status) = self.thread_items.rx.try_recv() {
match status {
ReceiverStatus::Udp(packet) => {
let mut handle = &packet[2..];
let seq = handle.read_u16::<BigEndian>()?;
let timestamp = handle.read_u32::<BigEndian>()?;
let ssrc = handle.read_u32::<BigEndian>()?;
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
if let Ok(mut decrypted) =
secretbox::open(&packet[HEADER_LEN..], &nonce, &self.key) {
let channels = opus_packet::get_nb_channels(&decrypted)?;
let entry =
self.decoder_map.entry((ssrc, channels)).or_insert_with(
|| OpusDecoder::new(SAMPLE_RATE, channels).unwrap(),
);
// Strip RTP Header Extensions (one-byte)
if decrypted[0] == 0xBE && decrypted[1] == 0xDE {
// Read the length bytes as a big-endian u16.
let header_extension_len = BigEndian::read_u16(&decrypted[2..4]);
let mut offset = 4;
for _ in 0..header_extension_len {
let byte = decrypted[offset];
offset += 1;
if byte == 0 {
continue;
}
offset += 1 + (0b1111 & (byte >> 4)) as usize;
}
while decrypted[offset] == 0 {
offset += 1;
}
decrypted = decrypted.split_off(offset);
}
let len = entry.decode(&decrypted, &mut buffer, false)?;
let is_stereo = channels == Channels::Stereo;
let b = if is_stereo { len * 2 } else { len };
receiver
.voice_packet(ssrc, seq, timestamp, is_stereo, &buffer[..b]);
}
},
ReceiverStatus::Websocket(VoiceEvent::Speaking(ev)) => {
receiver.speaking_update(ev.ssrc, ev.user_id.0, ev.speaking);
},
ReceiverStatus::Websocket(other) => {
info!("[Voice] Received other websocket data: {:?}", other);
},
}
}
} else {
loop {
if self.thread_items.rx.try_recv().is_err() {
break;
}
}
}
// Send the voice websocket keepalive if it's time
if self.keepalive_timer.check() {
self.client.lock().send_json(&payload::build_keepalive())?;
}
// Send UDP keepalive if it's time
if self.audio_timer.check() {
let mut bytes = [0; 4];
(&mut bytes[..]).write_u32::<BigEndian>(self.ssrc)?;
self.udp.send_to(&bytes, self.destination)?;
}
let mut opus_frame = Vec::new();
let mut len = 0;
// Walk over all the audio files, removing those which have finished.
// For this purpose, we need a while loop in Rust.
let mut i = 0;
while i < sources.len() {
let mut finished = false;
let aud_lock = (&sources[i]).clone();
let mut aud = aud_lock.lock();
let vol = aud.volume;
let skip = !aud.playing;
{
let stream = &mut aud.source;
if skip {
i += 1;
continue;
}
// Assume this for now, at least.
// We'll be fusing streams, so we can either keep
// as stereo or downmix to mono.
let is_stereo = true;
let source_stereo = stream.is_stereo();
if is_stereo != self.encoder_stereo {
let channels = if is_stereo {
Channels::Stereo
} else {
Channels::Mono
};
self.encoder = OpusEncoder::new(SAMPLE_RATE, channels, CodingMode::Audio)?;
self.encoder_stereo = is_stereo;
}
let temp_len = match stream.get_type() {
// TODO: decode back to raw, then include.
AudioType::Opus => match stream.read_opus_frame() {
Some(frame) => {
opus_frame = frame;
opus_frame.len()
},
None => 0,
},
AudioType::Pcm => {
let buffer_len = if source_stereo { 960 * 2 } else { 960 };
match stream.read_pcm_frame(&mut buffer[..buffer_len]) {
Some(len) => len,
None => 0,
}
},
};
// May need to force interleave/copy.
combine_audio(buffer, &mut mix_buffer, source_stereo, vol);
len = len.max(temp_len);
i += if temp_len > 0 {
1
} else {
sources.remove(i);
finished = true;
0
};
}
aud.finished = finished;
if !finished {
aud.step_frame();
}
};
self.soft_clip.apply(&mut mix_buffer);
if len == 0 {
if self.silence_frames > 0 {
self.silence_frames -= 1;
// Explicit "Silence" frame.
opus_frame.extend_from_slice(&[0xf8, 0xff, 0xfe]);
} else {
// Per official guidelines, send 5x silence BEFORE we stop speaking.
self.set_speaking(false)?;
audio_timer.await();
return Ok(());
}
} else {
self.silence_frames = 5;
for value in &mut buffer[len..] {
*value = 0;
}
}
self.set_speaking(true)?;
let index = self.prep_packet(&mut packet, mix_buffer, &opus_frame, nonce)?;
audio_timer.await();
self.udp.send_to(&packet[..index], self.destination)?;
self.audio_timer.reset();
Ok(())
}
fn prep_packet(&mut self,
packet: &mut [u8; 512],
buffer: [f32; 1920],
opus_frame: &[u8],
mut nonce: Nonce)
-> Result<usize> {
{
let mut cursor = &mut packet[..HEADER_LEN];
cursor.write_all(&[0x80, 0x78])?;
cursor.write_u16::<BigEndian>(self.sequence)?;
cursor.write_u32::<BigEndian>(self.timestamp)?;
cursor.write_u32::<BigEndian>(self.ssrc)?;
}
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
let sl_index = packet.len() - 16;
let buffer_len = if self.encoder_stereo { 960 * 2 } else { 960 };
let len = if opus_frame.is_empty() {
self.encoder
.encode_float(&buffer[..buffer_len], &mut packet[HEADER_LEN..sl_index])?
} else {
let len = opus_frame.len();
packet[HEADER_LEN..HEADER_LEN + len]
.clone_from_slice(opus_frame);
len
};
| let crypted = {
let slice = &packet[HEADER_LEN..HEADER_LEN + len];
secretbox::seal(slice, &nonce, &self.key)
}; | random_line_split |
|
connection.rs | _items = start_threads(Arc::clone(&mutexed_client), &udp)?;
info!("[Voice] Connected to: {}", info.endpoint);
let encoder = OpusEncoder::new(SAMPLE_RATE, Channels::Mono, CodingMode::Audio)?;
let soft_clip = SoftClip::new(Channels::Stereo);
// Per discord dev team's current recommendations:
// (https://discordapp.com/developers/docs/topics/voice-connections#heartbeating)
let temp_heartbeat = (hello.heartbeat_interval as f64 * 0.75) as u64;
Ok(Connection {
audio_timer: Timer::new(1000 * 60 * 4),
client: mutexed_client,
decoder_map: HashMap::new(),
destination,
encoder,
encoder_stereo: false,
key,
keepalive_timer: Timer::new(temp_heartbeat),
udp,
sequence: 0,
silence_frames: 0,
soft_clip,
speaking: false,
ssrc: hello.ssrc,
thread_items,
timestamp: 0,
user_id: info.user_id,
})
}
#[allow(unused_variables)]
pub fn cycle(&mut self,
sources: &mut Vec<LockedAudio>,
receiver: &mut Option<Box<AudioReceiver>>,
audio_timer: &mut Timer)
-> Result<()> {
let mut buffer = [0i16; 960 * 2];
let mut mix_buffer = [0f32; 960 * 2];
let mut packet = [0u8; 512];
let mut nonce = secretbox::Nonce([0; 24]);
if let Some(receiver) = receiver.as_mut() {
while let Ok(status) = self.thread_items.rx.try_recv() {
match status {
ReceiverStatus::Udp(packet) => {
let mut handle = &packet[2..];
let seq = handle.read_u16::<BigEndian>()?;
let timestamp = handle.read_u32::<BigEndian>()?;
let ssrc = handle.read_u32::<BigEndian>()?;
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
if let Ok(mut decrypted) =
secretbox::open(&packet[HEADER_LEN..], &nonce, &self.key) {
let channels = opus_packet::get_nb_channels(&decrypted)?;
let entry =
self.decoder_map.entry((ssrc, channels)).or_insert_with(
|| OpusDecoder::new(SAMPLE_RATE, channels).unwrap(),
);
// Strip RTP Header Extensions (one-byte)
if decrypted[0] == 0xBE && decrypted[1] == 0xDE {
// Read the length bytes as a big-endian u16.
let header_extension_len = BigEndian::read_u16(&decrypted[2..4]);
let mut offset = 4;
for _ in 0..header_extension_len {
let byte = decrypted[offset];
offset += 1;
if byte == 0 {
continue;
}
offset += 1 + (0b1111 & (byte >> 4)) as usize;
}
while decrypted[offset] == 0 {
offset += 1;
}
decrypted = decrypted.split_off(offset);
}
let len = entry.decode(&decrypted, &mut buffer, false)?;
let is_stereo = channels == Channels::Stereo;
let b = if is_stereo { len * 2 } else { len };
receiver
.voice_packet(ssrc, seq, timestamp, is_stereo, &buffer[..b]);
}
},
ReceiverStatus::Websocket(VoiceEvent::Speaking(ev)) => {
receiver.speaking_update(ev.ssrc, ev.user_id.0, ev.speaking);
},
ReceiverStatus::Websocket(other) => {
info!("[Voice] Received other websocket data: {:?}", other);
},
}
}
} else {
loop {
if self.thread_items.rx.try_recv().is_err() {
break;
}
}
}
// Send the voice websocket keepalive if it's time
if self.keepalive_timer.check() {
self.client.lock().send_json(&payload::build_keepalive())?;
}
// Send UDP keepalive if it's time
if self.audio_timer.check() {
let mut bytes = [0; 4];
(&mut bytes[..]).write_u32::<BigEndian>(self.ssrc)?;
self.udp.send_to(&bytes, self.destination)?;
}
let mut opus_frame = Vec::new();
let mut len = 0;
// Walk over all the audio files, removing those which have finished.
// For this purpose, we need a while loop in Rust.
let mut i = 0;
while i < sources.len() {
let mut finished = false;
let aud_lock = (&sources[i]).clone();
let mut aud = aud_lock.lock();
let vol = aud.volume;
let skip = !aud.playing;
{
let stream = &mut aud.source;
if skip {
i += 1;
continue;
}
// Assume this for now, at least.
// We'll be fusing streams, so we can either keep
// as stereo or downmix to mono.
let is_stereo = true;
let source_stereo = stream.is_stereo();
if is_stereo != self.encoder_stereo {
let channels = if is_stereo {
Channels::Stereo
} else {
Channels::Mono
};
self.encoder = OpusEncoder::new(SAMPLE_RATE, channels, CodingMode::Audio)?;
self.encoder_stereo = is_stereo;
}
let temp_len = match stream.get_type() {
// TODO: decode back to raw, then include.
AudioType::Opus => match stream.read_opus_frame() {
Some(frame) => {
opus_frame = frame;
opus_frame.len()
},
None => 0,
},
AudioType::Pcm => {
let buffer_len = if source_stereo { 960 * 2 } else { 960 };
match stream.read_pcm_frame(&mut buffer[..buffer_len]) {
Some(len) => len,
None => 0,
}
},
};
// May need to force interleave/copy.
combine_audio(buffer, &mut mix_buffer, source_stereo, vol);
len = len.max(temp_len);
i += if temp_len > 0 {
1
} else {
sources.remove(i);
finished = true;
0
};
}
aud.finished = finished;
if !finished {
aud.step_frame();
}
};
self.soft_clip.apply(&mut mix_buffer);
if len == 0 {
if self.silence_frames > 0 {
self.silence_frames -= 1;
// Explicit "Silence" frame.
opus_frame.extend_from_slice(&[0xf8, 0xff, 0xfe]);
} else {
// Per official guidelines, send 5x silence BEFORE we stop speaking.
self.set_speaking(false)?;
audio_timer.await();
return Ok(());
}
} else {
self.silence_frames = 5;
for value in &mut buffer[len..] {
*value = 0;
}
}
self.set_speaking(true)?;
let index = self.prep_packet(&mut packet, mix_buffer, &opus_frame, nonce)?;
audio_timer.await();
self.udp.send_to(&packet[..index], self.destination)?;
self.audio_timer.reset();
Ok(())
}
fn prep_packet(&mut self,
packet: &mut [u8; 512],
buffer: [f32; 1920],
opus_frame: &[u8],
mut nonce: Nonce)
-> Result<usize> {
{
let mut cursor = &mut packet[..HEADER_LEN];
cursor.write_all(&[0x80, 0x78])?;
cursor.write_u16::<BigEndian>(self.sequence)?;
cursor.write_u32::<BigEndian>(self.timestamp)?;
cursor.write_u32::<BigEndian>(self.ssrc)?;
}
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
let sl_index = packet.len() - 16;
let buffer_len = if self.encoder_stereo { 960 * 2 } else { 960 };
let len = if opus_frame.is_empty() {
self.encoder
.encode_float(&buffer[..buffer_len], &mut packet[HEADER_LEN..sl_index])?
} else {
let len = opus_frame.len();
packet[HEADER_LEN..HEADER_LEN + len]
.clone_from_slice(opus_frame);
len
};
let crypted = {
let slice = &packet[HEADER_LEN..HEADER_LEN + len];
secretbox::seal(slice, &nonce, &self.key)
};
let index = HEADER_LEN + crypted.len();
packet[HEADER_LEN..index].clone_from_slice(&crypted);
self.sequence = self.sequence.wrapping_add(1);
self.timestamp = self.timestamp.wrapping_add(960);
Ok(HEADER_LEN + crypted.len())
}
fn | set_speaking | identifier_name |
|
connection.rs | _hello) => {
break received_hello;
},
VoiceEvent::Heartbeat(_) => continue,
other => {
debug!("[Voice] Expected hello/heartbeat; got: {:?}", other);
return Err(Error::Voice(VoiceError::ExpectedHandshake));
},
}
};
if !has_valid_mode(&hello.modes) {
return Err(Error::Voice(VoiceError::VoiceModeUnavailable));
}
let destination = (&info.endpoint[..], hello.port)
.to_socket_addrs()?
.next()
.ok_or(Error::Voice(VoiceError::HostnameResolve))?;
// Important to note here: the length of the packet can be of either 4
// or 70 bytes. If it is 4 bytes, then we need to send a 70-byte packet
// to determine the IP.
//
// Past the initial 4 bytes, the packet _must_ be completely empty data.
//
// The returned packet will be a null-terminated string of the IP, and
// the port encoded in LE in the last two bytes of the packet.
let udp = UdpSocket::bind("0.0.0.0:0")?;
{
let mut bytes = [0; 70];
(&mut bytes[..]).write_u32::<BigEndian>(hello.ssrc)?;
udp.send_to(&bytes, destination)?;
let mut bytes = [0; 256];
let (len, _addr) = udp.recv_from(&mut bytes)?;
// Find the position in the bytes that contains the first byte of 0,
// indicating the "end of the address".
let index = bytes
.iter()
.skip(4)
.position(|&x| x == 0)
.ok_or(Error::Voice(VoiceError::FindingByte))?;
let pos = 4 + index;
let addr = String::from_utf8_lossy(&bytes[4..pos]);
let port_pos = len - 2;
let port = (&bytes[port_pos..]).read_u16::<LittleEndian>()?;
client
.send_json(&payload::build_select_protocol(addr, port))?;
}
let key = encryption_key(&mut client)?;
let _ = client
.stream_ref()
.as_tcp()
.set_read_timeout(Some(Duration::from_millis(25)));
let mutexed_client = Arc::new(Mutex::new(client));
let thread_items = start_threads(Arc::clone(&mutexed_client), &udp)?;
info!("[Voice] Connected to: {}", info.endpoint);
let encoder = OpusEncoder::new(SAMPLE_RATE, Channels::Mono, CodingMode::Audio)?;
let soft_clip = SoftClip::new(Channels::Stereo);
// Per discord dev team's current recommendations:
// (https://discordapp.com/developers/docs/topics/voice-connections#heartbeating)
let temp_heartbeat = (hello.heartbeat_interval as f64 * 0.75) as u64;
Ok(Connection {
audio_timer: Timer::new(1000 * 60 * 4),
client: mutexed_client,
decoder_map: HashMap::new(),
destination,
encoder,
encoder_stereo: false,
key,
keepalive_timer: Timer::new(temp_heartbeat),
udp,
sequence: 0,
silence_frames: 0,
soft_clip,
speaking: false,
ssrc: hello.ssrc,
thread_items,
timestamp: 0,
user_id: info.user_id,
})
}
#[allow(unused_variables)]
pub fn cycle(&mut self,
sources: &mut Vec<LockedAudio>,
receiver: &mut Option<Box<AudioReceiver>>,
audio_timer: &mut Timer)
-> Result<()> {
let mut buffer = [0i16; 960 * 2];
let mut mix_buffer = [0f32; 960 * 2];
let mut packet = [0u8; 512];
let mut nonce = secretbox::Nonce([0; 24]);
if let Some(receiver) = receiver.as_mut() {
while let Ok(status) = self.thread_items.rx.try_recv() {
match status {
ReceiverStatus::Udp(packet) => {
let mut handle = &packet[2..];
let seq = handle.read_u16::<BigEndian>()?;
let timestamp = handle.read_u32::<BigEndian>()?;
let ssrc = handle.read_u32::<BigEndian>()?;
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
if let Ok(mut decrypted) =
secretbox::open(&packet[HEADER_LEN..], &nonce, &self.key) {
let channels = opus_packet::get_nb_channels(&decrypted)?;
let entry =
self.decoder_map.entry((ssrc, channels)).or_insert_with(
|| OpusDecoder::new(SAMPLE_RATE, channels).unwrap(),
);
// Strip RTP Header Extensions (one-byte)
if decrypted[0] == 0xBE && decrypted[1] == 0xDE {
// Read the length bytes as a big-endian u16.
let header_extension_len = BigEndian::read_u16(&decrypted[2..4]);
let mut offset = 4;
for _ in 0..header_extension_len {
let byte = decrypted[offset];
offset += 1;
if byte == 0 {
continue;
}
offset += 1 + (0b1111 & (byte >> 4)) as usize;
}
while decrypted[offset] == 0 {
offset += 1;
}
decrypted = decrypted.split_off(offset);
}
let len = entry.decode(&decrypted, &mut buffer, false)?;
let is_stereo = channels == Channels::Stereo;
let b = if is_stereo { len * 2 } else { len };
receiver
.voice_packet(ssrc, seq, timestamp, is_stereo, &buffer[..b]);
}
},
ReceiverStatus::Websocket(VoiceEvent::Speaking(ev)) => {
receiver.speaking_update(ev.ssrc, ev.user_id.0, ev.speaking);
},
ReceiverStatus::Websocket(other) => {
info!("[Voice] Received other websocket data: {:?}", other);
},
}
}
} else {
loop {
if self.thread_items.rx.try_recv().is_err() {
break;
}
}
}
// Send the voice websocket keepalive if it's time
if self.keepalive_timer.check() {
self.client.lock().send_json(&payload::build_keepalive())?;
}
// Send UDP keepalive if it's time
if self.audio_timer.check() {
let mut bytes = [0; 4];
(&mut bytes[..]).write_u32::<BigEndian>(self.ssrc)?;
self.udp.send_to(&bytes, self.destination)?;
}
let mut opus_frame = Vec::new();
let mut len = 0;
// Walk over all the audio files, removing those which have finished.
// For this purpose, we need a while loop in Rust.
let mut i = 0;
while i < sources.len() {
let mut finished = false;
let aud_lock = (&sources[i]).clone();
let mut aud = aud_lock.lock();
let vol = aud.volume;
let skip = !aud.playing;
{
let stream = &mut aud.source;
if skip {
i += 1;
continue;
}
// Assume this for now, at least.
// We'll be fusing streams, so we can either keep
// as stereo or downmix to mono.
let is_stereo = true;
let source_stereo = stream.is_stereo();
if is_stereo != self.encoder_stereo {
let channels = if is_stereo {
Channels::Stereo
} else {
Channels::Mono
};
self.encoder = OpusEncoder::new(SAMPLE_RATE, channels, CodingMode::Audio)?;
self.encoder_stereo = is_stereo;
}
let temp_len = match stream.get_type() {
// TODO: decode back to raw, then include.
AudioType::Opus => match stream.read_opus_frame() {
Some(frame) => {
opus_frame = frame;
opus_frame.len()
},
None => 0,
},
AudioType::Pcm => | ,
};
// May need to force interleave/copy.
combine_audio(buffer, &mut mix_buffer, source_stereo, vol);
len = len.max(temp_len);
i += if temp_len > 0 {
1
} else {
sources.remove(i);
finished = true;
0
};
}
aud.finished = finished;
if !finished {
aud.step_frame();
}
};
self.soft_clip.apply(&mut mix_buffer);
if len == 0 {
if self.silence_frames > 0 {
self.silence_frames -= 1;
// Explicit | {
let buffer_len = if source_stereo { 960 * 2 } else { 960 };
match stream.read_pcm_frame(&mut buffer[..buffer_len]) {
Some(len) => len,
None => 0,
}
} | conditional_block |
connection.rs | _id,
})
}
#[allow(unused_variables)]
pub fn cycle(&mut self,
sources: &mut Vec<LockedAudio>,
receiver: &mut Option<Box<AudioReceiver>>,
audio_timer: &mut Timer)
-> Result<()> {
let mut buffer = [0i16; 960 * 2];
let mut mix_buffer = [0f32; 960 * 2];
let mut packet = [0u8; 512];
let mut nonce = secretbox::Nonce([0; 24]);
if let Some(receiver) = receiver.as_mut() {
while let Ok(status) = self.thread_items.rx.try_recv() {
match status {
ReceiverStatus::Udp(packet) => {
let mut handle = &packet[2..];
let seq = handle.read_u16::<BigEndian>()?;
let timestamp = handle.read_u32::<BigEndian>()?;
let ssrc = handle.read_u32::<BigEndian>()?;
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
if let Ok(mut decrypted) =
secretbox::open(&packet[HEADER_LEN..], &nonce, &self.key) {
let channels = opus_packet::get_nb_channels(&decrypted)?;
let entry =
self.decoder_map.entry((ssrc, channels)).or_insert_with(
|| OpusDecoder::new(SAMPLE_RATE, channels).unwrap(),
);
// Strip RTP Header Extensions (one-byte)
if decrypted[0] == 0xBE && decrypted[1] == 0xDE {
// Read the length bytes as a big-endian u16.
let header_extension_len = BigEndian::read_u16(&decrypted[2..4]);
let mut offset = 4;
for _ in 0..header_extension_len {
let byte = decrypted[offset];
offset += 1;
if byte == 0 {
continue;
}
offset += 1 + (0b1111 & (byte >> 4)) as usize;
}
while decrypted[offset] == 0 {
offset += 1;
}
decrypted = decrypted.split_off(offset);
}
let len = entry.decode(&decrypted, &mut buffer, false)?;
let is_stereo = channels == Channels::Stereo;
let b = if is_stereo { len * 2 } else { len };
receiver
.voice_packet(ssrc, seq, timestamp, is_stereo, &buffer[..b]);
}
},
ReceiverStatus::Websocket(VoiceEvent::Speaking(ev)) => {
receiver.speaking_update(ev.ssrc, ev.user_id.0, ev.speaking);
},
ReceiverStatus::Websocket(other) => {
info!("[Voice] Received other websocket data: {:?}", other);
},
}
}
} else {
loop {
if self.thread_items.rx.try_recv().is_err() {
break;
}
}
}
// Send the voice websocket keepalive if it's time
if self.keepalive_timer.check() {
self.client.lock().send_json(&payload::build_keepalive())?;
}
// Send UDP keepalive if it's time
if self.audio_timer.check() {
let mut bytes = [0; 4];
(&mut bytes[..]).write_u32::<BigEndian>(self.ssrc)?;
self.udp.send_to(&bytes, self.destination)?;
}
let mut opus_frame = Vec::new();
let mut len = 0;
// Walk over all the audio files, removing those which have finished.
// For this purpose, we need a while loop in Rust.
let mut i = 0;
while i < sources.len() {
let mut finished = false;
let aud_lock = (&sources[i]).clone();
let mut aud = aud_lock.lock();
let vol = aud.volume;
let skip = !aud.playing;
{
let stream = &mut aud.source;
if skip {
i += 1;
continue;
}
// Assume this for now, at least.
// We'll be fusing streams, so we can either keep
// as stereo or downmix to mono.
let is_stereo = true;
let source_stereo = stream.is_stereo();
if is_stereo != self.encoder_stereo {
let channels = if is_stereo {
Channels::Stereo
} else {
Channels::Mono
};
self.encoder = OpusEncoder::new(SAMPLE_RATE, channels, CodingMode::Audio)?;
self.encoder_stereo = is_stereo;
}
let temp_len = match stream.get_type() {
// TODO: decode back to raw, then include.
AudioType::Opus => match stream.read_opus_frame() {
Some(frame) => {
opus_frame = frame;
opus_frame.len()
},
None => 0,
},
AudioType::Pcm => {
let buffer_len = if source_stereo { 960 * 2 } else { 960 };
match stream.read_pcm_frame(&mut buffer[..buffer_len]) {
Some(len) => len,
None => 0,
}
},
};
// May need to force interleave/copy.
combine_audio(buffer, &mut mix_buffer, source_stereo, vol);
len = len.max(temp_len);
i += if temp_len > 0 {
1
} else {
sources.remove(i);
finished = true;
0
};
}
aud.finished = finished;
if !finished {
aud.step_frame();
}
};
self.soft_clip.apply(&mut mix_buffer);
if len == 0 {
if self.silence_frames > 0 {
self.silence_frames -= 1;
// Explicit "Silence" frame.
opus_frame.extend_from_slice(&[0xf8, 0xff, 0xfe]);
} else {
// Per official guidelines, send 5x silence BEFORE we stop speaking.
self.set_speaking(false)?;
audio_timer.await();
return Ok(());
}
} else {
self.silence_frames = 5;
for value in &mut buffer[len..] {
*value = 0;
}
}
self.set_speaking(true)?;
let index = self.prep_packet(&mut packet, mix_buffer, &opus_frame, nonce)?;
audio_timer.await();
self.udp.send_to(&packet[..index], self.destination)?;
self.audio_timer.reset();
Ok(())
}
fn prep_packet(&mut self,
packet: &mut [u8; 512],
buffer: [f32; 1920],
opus_frame: &[u8],
mut nonce: Nonce)
-> Result<usize> {
{
let mut cursor = &mut packet[..HEADER_LEN];
cursor.write_all(&[0x80, 0x78])?;
cursor.write_u16::<BigEndian>(self.sequence)?;
cursor.write_u32::<BigEndian>(self.timestamp)?;
cursor.write_u32::<BigEndian>(self.ssrc)?;
}
nonce.0[..HEADER_LEN]
.clone_from_slice(&packet[..HEADER_LEN]);
let sl_index = packet.len() - 16;
let buffer_len = if self.encoder_stereo { 960 * 2 } else { 960 };
let len = if opus_frame.is_empty() {
self.encoder
.encode_float(&buffer[..buffer_len], &mut packet[HEADER_LEN..sl_index])?
} else {
let len = opus_frame.len();
packet[HEADER_LEN..HEADER_LEN + len]
.clone_from_slice(opus_frame);
len
};
let crypted = {
let slice = &packet[HEADER_LEN..HEADER_LEN + len];
secretbox::seal(slice, &nonce, &self.key)
};
let index = HEADER_LEN + crypted.len();
packet[HEADER_LEN..index].clone_from_slice(&crypted);
self.sequence = self.sequence.wrapping_add(1);
self.timestamp = self.timestamp.wrapping_add(960);
Ok(HEADER_LEN + crypted.len())
}
fn set_speaking(&mut self, speaking: bool) -> Result<()> {
if self.speaking == speaking {
return Ok(());
}
self.speaking = speaking;
self.client.lock().send_json(&payload::build_speaking(speaking))
}
}
impl Drop for Connection {
fn drop(&mut self) {
let _ = self.thread_items.udp_close_sender.send(0);
let _ = self.thread_items.ws_close_sender.send(0);
info!("[Voice] Disconnected");
}
}
#[inline]
fn combine_audio(
raw_buffer: [i16; 1920],
float_buffer: &mut [f32; 1920],
true_stereo: bool,
volume: f32,
) | {
for i in 0..1920 {
let sample_index = if true_stereo { i } else { i/2 };
let sample = (raw_buffer[sample_index] as f32) / 32768.0;
float_buffer[i] = float_buffer[i] + sample * volume;
}
} | identifier_body |
|
displayer.py | []
link_attributes = []
title_prefix = ''
title_postfix = ''
def __init__(self, the_json):
self.json = the_json
self.data = json.loads(self.json)
self.id = self.data['id']
self.prepare()
def __cmp__(self, other):
return cmp(self.id, other.id)
@property
def title(self):
return ' '.join([self.title_prefix,
self.id,
self.title_postfix])
def prepare(self):
pass
@property
def link(self):
return "../%s/%s.html" % (self.subdir, self.id)
@property
def links(self):
for link_attribute in self.link_attributes:
obj = getattr(self, link_attribute, None)
if obj is None:
continue
yield obj.link, obj.title
@property
def generated_on(self):
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
def write(self):
outfile = os.path.join(utils.html_dir(),
self.subdir,
'%s.html' % self.id)
template = jinja_env.get_template(self.template_name)
open(outfile, 'w').write(template.render(view=self))
logger.info("Wrote %s", outfile)
@property
def fields(self):
result = []
for simple_field in self.simple_fields:
value = self.data.get(simple_field)
if value is None:
continue
if isinstance(value, list):
value = ', '.join(value)
name = simple_field.replace('_', ' ').capitalize()
result.append([name, value])
return result
| simple_fields = ['hostname',
'buildout_directory',
'configfile',
'server_names',
'proxy_port',
]
buildout = None
server = None
link_attributes = ['buildout',
'server',
]
def _splitted_for_sort(self):
parts = self.id.split('.')
parts.reverse()
return parts
def __cmp__(self, other):
return cmp(self._splitted_for_sort(), other._splitted_for_sort())
@property
def raw_contents(self):
return '\n'.join(self.data['contents'])
class Apache(Nginx):
title_prefix = 'Apache configuration of'
class CodeLink(object):
def __init__(self, vcs, url):
self.vcs = vcs
self.url = url
@property
def title(self):
return "Browse the %s code" % self.vcs
@property
def link(self):
if self.vcs == 'svn':
return self.url.replace('svn/Products', 'trac/browser/Products')
return self.url
class AuthorSuggestionLink(object):
title = "Who worked on this?"
def __init__(self, vcs, url):
assert(vcs == 'git')
self.url = url
@property
def link(self):
return self.url + '/graphs/contributors'
class Buildout(Common):
subdir = 'buildouts'
template_name = 'buildout.html'
title_prefix = 'Buildout directory'
simple_fields = ['hostname',
'directory',
'extends', # TODO: fix this: missing KGS here.
'version_control_system',
'version_control_url',
]
site = None
code_url = None
server = None
link_attributes = ['site', 'code_url', 'server', 'author_suggestion']
# TODO: KGS handling, just like eggs.
def prepare(self):
if ('vcs' in self.data) and self.data['vcs']:
vcs = self.data['vcs']['vcs']
vcs_url = self.data['vcs']['url']
self.data['version_control_system'] = vcs
self.data['version_control_url'] = vcs_url
# https://office.lizard.net/trac/browser/Products
# https://office.lizard.net/svn/Products/sites/demo/tags/3.0.11/
self.code_url = CodeLink(vcs, vcs_url)
self.author_suggestion = None
if vcs == 'git':
self.author_suggestion = AuthorSuggestionLink(vcs, vcs_url)
self.eggs = {}
for egg_name, version in self.data['eggs'].items():
if egg_name not in data['egg']:
data['egg'][egg_name] = Egg(egg_name)
egg = data['egg'][egg_name]
egg.add_usage(self, version)
self.eggs[egg] = version
@property
def eggs_for_display(self):
for key in sorted(self.eggs.keys()):
yield key, self.eggs[key]
@property
def title_postfix(self):
"""Warn if there's no site pointing at us."""
if not self.site:
logger.warn("No site: %r", self.site)
return "(not linked into a site!)"
return ''
class Server(Common):
subdir = 'servers'
template_name = 'server.html'
title_prefix = 'Linux server'
simple_fields = ['hostname',
'users',
'backup_jobs',
]
def prepare(self):
self.sites = []
self.buildouts = []
self.ports = {}
@property
def sites_for_display(self):
return sorted(self.sites)
@property
def buildouts_for_display(self):
return sorted(self.buildouts)
@property
def ports_for_display(self):
for key in sorted(self.ports.keys()):
yield key, self.ports[key]
class Egg(Common):
# Well, it is not actually that common...
subdir = 'eggs'
template_name = 'egg.html'
title_prefix = 'Egg'
simple_fields = ['directory',
'extends', # TODO: fix this: missing KGS here.
'version_control_system',
'version_control_url',
]
def __init__(self, egg_name):
self.id = egg_name
self.versions = collections.defaultdict(list)
def add_usage(self, buildout, version):
self.versions[version].append(buildout)
@property
def versions_for_display(self):
for key in sorted(self.versions.keys()):
yield key, self.versions[key]
def collect_data():
"""Collect all the json data and load it in memory."""
mapping = {'nginx': Nginx,
'apache': Apache,
'server': Server,
'buildout': Buildout}
with utils.cd(utils.displayer_dir()):
for dirpath, dirnames, filenames in os.walk('.'):
# server_id = dirpath
for json_file in [f for f in filenames if f.endswith('.json')]:
kind = json_file.split('___')[0]
filepath = os.path.join(dirpath, json_file)
logger.debug("Loading info from %s",
os.path.abspath(filepath))
json_content = open(filepath).read()
klass = mapping[kind]
obj = klass(json_content)
data[kind][obj.id.lower()] = obj
# Link buildouts and nginx sites.
for nginx in data['nginx'].values():
buildout_id = nginx.data.get('buildout_id')
if buildout_id is not None:
buildout = data['buildout'].get(buildout_id)
if buildout is not None:
nginx.buildout = buildout
buildout.site = nginx
# Link buildouts and apache sites.
for apache in data['apache'].values():
buildout_id = apache.data.get('buildout_id')
if buildout_id is not None:
buildout = data['buildout'].get(buildout_id)
if buildout is not None:
apache.buildout = buildout
buildout.site = apache
# Link buildouts+sites with servers.
for kind in ['nginx', 'apache', 'buildout']:
for obj in data[kind].values():
hostname = obj.data.get('hostname')
if hostname is not None:
hostname = hostname.lower()
server = data['server'].get(hostname)
if server is None:
logger.error("Server with hostname %s not found.",
hostname)
else:
obj.server = server
if kind == 'nginx' or kind == 'apache':
server.sites.append(obj)
elif kind == 'buildout':
server.buildouts.append(obj)
# Link nginx gunicorn ports with servers.
for kind in ['nginx']:
for obj in data[kind].values():
hostname = obj.data.get('hostname')
port = obj.data.get('proxy_port')
try:
port = int(port)
except:
pass
if hostname is not None and port is not None:
hostname = hostname.lower()
server = data['server'].get(hostname)
if server is None:
logger.error("Server with hostname %s not found.",
hostname)
continue
server.ports[port] = obj
def generate_html():
index_subdirs = {'site': 'sites',
'buildout': 'buildouts',
'server': 'servers',
'egg': 'eggs'}
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
| class Nginx(Common):
subdir = 'sites'
template_name = 'nginx.html'
title_prefix = 'NGINX configuration of' | random_line_split |
displayer.py | link_attributes = []
title_prefix = ''
title_postfix = ''
def __init__(self, the_json):
self.json = the_json
self.data = json.loads(self.json)
self.id = self.data['id']
self.prepare()
def __cmp__(self, other):
return cmp(self.id, other.id)
@property
def title(self):
return ' '.join([self.title_prefix,
self.id,
self.title_postfix])
def prepare(self):
pass
@property
def link(self):
return "../%s/%s.html" % (self.subdir, self.id)
@property
def links(self):
for link_attribute in self.link_attributes:
obj = getattr(self, link_attribute, None)
if obj is None:
continue
yield obj.link, obj.title
@property
def generated_on(self):
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
def write(self):
outfile = os.path.join(utils.html_dir(),
self.subdir,
'%s.html' % self.id)
template = jinja_env.get_template(self.template_name)
open(outfile, 'w').write(template.render(view=self))
logger.info("Wrote %s", outfile)
@property
def fields(self):
result = []
for simple_field in self.simple_fields:
value = self.data.get(simple_field)
if value is None:
continue
if isinstance(value, list):
value = ', '.join(value)
name = simple_field.replace('_', ' ').capitalize()
result.append([name, value])
return result
class | (Common):
subdir = 'sites'
template_name = 'nginx.html'
title_prefix = 'NGINX configuration of'
simple_fields = ['hostname',
'buildout_directory',
'configfile',
'server_names',
'proxy_port',
]
buildout = None
server = None
link_attributes = ['buildout',
'server',
]
def _splitted_for_sort(self):
parts = self.id.split('.')
parts.reverse()
return parts
def __cmp__(self, other):
return cmp(self._splitted_for_sort(), other._splitted_for_sort())
@property
def raw_contents(self):
return '\n'.join(self.data['contents'])
class Apache(Nginx):
title_prefix = 'Apache configuration of'
class CodeLink(object):
def __init__(self, vcs, url):
self.vcs = vcs
self.url = url
@property
def title(self):
return "Browse the %s code" % self.vcs
@property
def link(self):
if self.vcs == 'svn':
return self.url.replace('svn/Products', 'trac/browser/Products')
return self.url
class AuthorSuggestionLink(object):
title = "Who worked on this?"
def __init__(self, vcs, url):
assert(vcs == 'git')
self.url = url
@property
def link(self):
return self.url + '/graphs/contributors'
class Buildout(Common):
subdir = 'buildouts'
template_name = 'buildout.html'
title_prefix = 'Buildout directory'
simple_fields = ['hostname',
'directory',
'extends', # TODO: fix this: missing KGS here.
'version_control_system',
'version_control_url',
]
site = None
code_url = None
server = None
link_attributes = ['site', 'code_url', 'server', 'author_suggestion']
# TODO: KGS handling, just like eggs.
def prepare(self):
if ('vcs' in self.data) and self.data['vcs']:
vcs = self.data['vcs']['vcs']
vcs_url = self.data['vcs']['url']
self.data['version_control_system'] = vcs
self.data['version_control_url'] = vcs_url
# https://office.lizard.net/trac/browser/Products
# https://office.lizard.net/svn/Products/sites/demo/tags/3.0.11/
self.code_url = CodeLink(vcs, vcs_url)
self.author_suggestion = None
if vcs == 'git':
self.author_suggestion = AuthorSuggestionLink(vcs, vcs_url)
self.eggs = {}
for egg_name, version in self.data['eggs'].items():
if egg_name not in data['egg']:
data['egg'][egg_name] = Egg(egg_name)
egg = data['egg'][egg_name]
egg.add_usage(self, version)
self.eggs[egg] = version
@property
def eggs_for_display(self):
for key in sorted(self.eggs.keys()):
yield key, self.eggs[key]
@property
def title_postfix(self):
"""Warn if there's no site pointing at us."""
if not self.site:
logger.warn("No site: %r", self.site)
return "(not linked into a site!)"
return ''
class Server(Common):
subdir = 'servers'
template_name = 'server.html'
title_prefix = 'Linux server'
simple_fields = ['hostname',
'users',
'backup_jobs',
]
def prepare(self):
self.sites = []
self.buildouts = []
self.ports = {}
@property
def sites_for_display(self):
return sorted(self.sites)
@property
def buildouts_for_display(self):
return sorted(self.buildouts)
@property
def ports_for_display(self):
for key in sorted(self.ports.keys()):
yield key, self.ports[key]
class Egg(Common):
# Well, it is not actually that common...
subdir = 'eggs'
template_name = 'egg.html'
title_prefix = 'Egg'
simple_fields = ['directory',
'extends', # TODO: fix this: missing KGS here.
'version_control_system',
'version_control_url',
]
def __init__(self, egg_name):
self.id = egg_name
self.versions = collections.defaultdict(list)
def add_usage(self, buildout, version):
self.versions[version].append(buildout)
@property
def versions_for_display(self):
for key in sorted(self.versions.keys()):
yield key, self.versions[key]
def collect_data():
"""Collect all the json data and load it in memory."""
mapping = {'nginx': Nginx,
'apache': Apache,
'server': Server,
'buildout': Buildout}
with utils.cd(utils.displayer_dir()):
for dirpath, dirnames, filenames in os.walk('.'):
# server_id = dirpath
for json_file in [f for f in filenames if f.endswith('.json')]:
kind = json_file.split('___')[0]
filepath = os.path.join(dirpath, json_file)
logger.debug("Loading info from %s",
os.path.abspath(filepath))
json_content = open(filepath).read()
klass = mapping[kind]
obj = klass(json_content)
data[kind][obj.id.lower()] = obj
# Link buildouts and nginx sites.
for nginx in data['nginx'].values():
buildout_id = nginx.data.get('buildout_id')
if buildout_id is not None:
buildout = data['buildout'].get(buildout_id)
if buildout is not None:
nginx.buildout = buildout
buildout.site = nginx
# Link buildouts and apache sites.
for apache in data['apache'].values():
buildout_id = apache.data.get('buildout_id')
if buildout_id is not None:
buildout = data['buildout'].get(buildout_id)
if buildout is not None:
apache.buildout = buildout
buildout.site = apache
# Link buildouts+sites with servers.
for kind in ['nginx', 'apache', 'buildout']:
for obj in data[kind].values():
hostname = obj.data.get('hostname')
if hostname is not None:
hostname = hostname.lower()
server = data['server'].get(hostname)
if server is None:
logger.error("Server with hostname %s not found.",
hostname)
else:
obj.server = server
if kind == 'nginx' or kind == 'apache':
server.sites.append(obj)
elif kind == 'buildout':
server.buildouts.append(obj)
# Link nginx gunicorn ports with servers.
for kind in ['nginx']:
for obj in data[kind].values():
hostname = obj.data.get('hostname')
port = obj.data.get('proxy_port')
try:
port = int(port)
except:
pass
if hostname is not None and port is not None:
hostname = hostname.lower()
server = data['server'].get(hostname)
if server is None:
logger.error("Server with hostname %s not found.",
hostname)
continue
server.ports[port] = obj
def generate_html():
index_subdirs = {'site': 'sites',
'buildout': 'buildouts',
'server': 'servers',
'egg': 'eggs'}
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
| Nginx | identifier_name |
displayer.py | link_attributes = []
title_prefix = ''
title_postfix = ''
def __init__(self, the_json):
self.json = the_json
self.data = json.loads(self.json)
self.id = self.data['id']
self.prepare()
def __cmp__(self, other):
return cmp(self.id, other.id)
@property
def title(self):
return ' '.join([self.title_prefix,
self.id,
self.title_postfix])
def prepare(self):
pass
@property
def link(self):
return "../%s/%s.html" % (self.subdir, self.id)
@property
def links(self):
for link_attribute in self.link_attributes:
obj = getattr(self, link_attribute, None)
if obj is None:
continue
yield obj.link, obj.title
@property
def generated_on(self):
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
def write(self):
outfile = os.path.join(utils.html_dir(),
self.subdir,
'%s.html' % self.id)
template = jinja_env.get_template(self.template_name)
open(outfile, 'w').write(template.render(view=self))
logger.info("Wrote %s", outfile)
@property
def fields(self):
result = []
for simple_field in self.simple_fields:
value = self.data.get(simple_field)
if value is None:
continue
if isinstance(value, list):
value = ', '.join(value)
name = simple_field.replace('_', ' ').capitalize()
result.append([name, value])
return result
class Nginx(Common):
subdir = 'sites'
template_name = 'nginx.html'
title_prefix = 'NGINX configuration of'
simple_fields = ['hostname',
'buildout_directory',
'configfile',
'server_names',
'proxy_port',
]
buildout = None
server = None
link_attributes = ['buildout',
'server',
]
def _splitted_for_sort(self):
parts = self.id.split('.')
parts.reverse()
return parts
def __cmp__(self, other):
return cmp(self._splitted_for_sort(), other._splitted_for_sort())
@property
def raw_contents(self):
return '\n'.join(self.data['contents'])
class Apache(Nginx):
title_prefix = 'Apache configuration of'
class CodeLink(object):
def __init__(self, vcs, url):
self.vcs = vcs
self.url = url
@property
def title(self):
return "Browse the %s code" % self.vcs
@property
def link(self):
if self.vcs == 'svn':
return self.url.replace('svn/Products', 'trac/browser/Products')
return self.url
class AuthorSuggestionLink(object):
|
class Buildout(Common):
subdir = 'buildouts'
template_name = 'buildout.html'
title_prefix = 'Buildout directory'
simple_fields = ['hostname',
'directory',
'extends', # TODO: fix this: missing KGS here.
'version_control_system',
'version_control_url',
]
site = None
code_url = None
server = None
link_attributes = ['site', 'code_url', 'server', 'author_suggestion']
# TODO: KGS handling, just like eggs.
def prepare(self):
if ('vcs' in self.data) and self.data['vcs']:
vcs = self.data['vcs']['vcs']
vcs_url = self.data['vcs']['url']
self.data['version_control_system'] = vcs
self.data['version_control_url'] = vcs_url
# https://office.lizard.net/trac/browser/Products
# https://office.lizard.net/svn/Products/sites/demo/tags/3.0.11/
self.code_url = CodeLink(vcs, vcs_url)
self.author_suggestion = None
if vcs == 'git':
self.author_suggestion = AuthorSuggestionLink(vcs, vcs_url)
self.eggs = {}
for egg_name, version in self.data['eggs'].items():
if egg_name not in data['egg']:
data['egg'][egg_name] = Egg(egg_name)
egg = data['egg'][egg_name]
egg.add_usage(self, version)
self.eggs[egg] = version
@property
def eggs_for_display(self):
for key in sorted(self.eggs.keys()):
yield key, self.eggs[key]
@property
def title_postfix(self):
"""Warn if there's no site pointing at us."""
if not self.site:
logger.warn("No site: %r", self.site)
return "(not linked into a site!)"
return ''
class Server(Common):
subdir = 'servers'
template_name = 'server.html'
title_prefix = 'Linux server'
simple_fields = ['hostname',
'users',
'backup_jobs',
]
def prepare(self):
self.sites = []
self.buildouts = []
self.ports = {}
@property
def sites_for_display(self):
return sorted(self.sites)
@property
def buildouts_for_display(self):
return sorted(self.buildouts)
@property
def ports_for_display(self):
for key in sorted(self.ports.keys()):
yield key, self.ports[key]
class Egg(Common):
# Well, it is not actually that common...
subdir = 'eggs'
template_name = 'egg.html'
title_prefix = 'Egg'
simple_fields = ['directory',
'extends', # TODO: fix this: missing KGS here.
'version_control_system',
'version_control_url',
]
def __init__(self, egg_name):
self.id = egg_name
self.versions = collections.defaultdict(list)
def add_usage(self, buildout, version):
self.versions[version].append(buildout)
@property
def versions_for_display(self):
for key in sorted(self.versions.keys()):
yield key, self.versions[key]
def collect_data():
"""Collect all the json data and load it in memory."""
mapping = {'nginx': Nginx,
'apache': Apache,
'server': Server,
'buildout': Buildout}
with utils.cd(utils.displayer_dir()):
for dirpath, dirnames, filenames in os.walk('.'):
# server_id = dirpath
for json_file in [f for f in filenames if f.endswith('.json')]:
kind = json_file.split('___')[0]
filepath = os.path.join(dirpath, json_file)
logger.debug("Loading info from %s",
os.path.abspath(filepath))
json_content = open(filepath).read()
klass = mapping[kind]
obj = klass(json_content)
data[kind][obj.id.lower()] = obj
# Link buildouts and nginx sites.
for nginx in data['nginx'].values():
buildout_id = nginx.data.get('buildout_id')
if buildout_id is not None:
buildout = data['buildout'].get(buildout_id)
if buildout is not None:
nginx.buildout = buildout
buildout.site = nginx
# Link buildouts and apache sites.
for apache in data['apache'].values():
buildout_id = apache.data.get('buildout_id')
if buildout_id is not None:
buildout = data['buildout'].get(buildout_id)
if buildout is not None:
apache.buildout = buildout
buildout.site = apache
# Link buildouts+sites with servers.
for kind in ['nginx', 'apache', 'buildout']:
for obj in data[kind].values():
hostname = obj.data.get('hostname')
if hostname is not None:
hostname = hostname.lower()
server = data['server'].get(hostname)
if server is None:
logger.error("Server with hostname %s not found.",
hostname)
else:
obj.server = server
if kind == 'nginx' or kind == 'apache':
server.sites.append(obj)
elif kind == 'buildout':
server.buildouts.append(obj)
# Link nginx gunicorn ports with servers.
for kind in ['nginx']:
for obj in data[kind].values():
hostname = obj.data.get('hostname')
port = obj.data.get('proxy_port')
try:
port = int(port)
except:
pass
if hostname is not None and port is not None:
hostname = hostname.lower()
server = data['server'].get(hostname)
if server is None:
logger.error("Server with hostname %s not found.",
hostname)
continue
server.ports[port] = obj
def generate_html():
index_subdirs = {'site': 'sites',
'buildout': 'buildouts',
'server': 'servers',
'egg': 'eggs'}
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
| title = "Who worked on this?"
def __init__(self, vcs, url):
assert(vcs == 'git')
self.url = url
@property
def link(self):
return self.url + '/graphs/contributors' | identifier_body |
displayer.py | (view=self))
logger.info("Wrote %s", outfile)
@property
def fields(self):
result = []
for simple_field in self.simple_fields:
value = self.data.get(simple_field)
if value is None:
continue
if isinstance(value, list):
value = ', '.join(value)
name = simple_field.replace('_', ' ').capitalize()
result.append([name, value])
return result
class Nginx(Common):
subdir = 'sites'
template_name = 'nginx.html'
title_prefix = 'NGINX configuration of'
simple_fields = ['hostname',
'buildout_directory',
'configfile',
'server_names',
'proxy_port',
]
buildout = None
server = None
link_attributes = ['buildout',
'server',
]
def _splitted_for_sort(self):
parts = self.id.split('.')
parts.reverse()
return parts
def __cmp__(self, other):
return cmp(self._splitted_for_sort(), other._splitted_for_sort())
@property
def raw_contents(self):
return '\n'.join(self.data['contents'])
class Apache(Nginx):
title_prefix = 'Apache configuration of'
class CodeLink(object):
def __init__(self, vcs, url):
self.vcs = vcs
self.url = url
@property
def title(self):
return "Browse the %s code" % self.vcs
@property
def link(self):
if self.vcs == 'svn':
return self.url.replace('svn/Products', 'trac/browser/Products')
return self.url
class AuthorSuggestionLink(object):
title = "Who worked on this?"
def __init__(self, vcs, url):
assert(vcs == 'git')
self.url = url
@property
def link(self):
return self.url + '/graphs/contributors'
class Buildout(Common):
subdir = 'buildouts'
template_name = 'buildout.html'
title_prefix = 'Buildout directory'
simple_fields = ['hostname',
'directory',
'extends', # TODO: fix this: missing KGS here.
'version_control_system',
'version_control_url',
]
site = None
code_url = None
server = None
link_attributes = ['site', 'code_url', 'server', 'author_suggestion']
# TODO: KGS handling, just like eggs.
def prepare(self):
if ('vcs' in self.data) and self.data['vcs']:
vcs = self.data['vcs']['vcs']
vcs_url = self.data['vcs']['url']
self.data['version_control_system'] = vcs
self.data['version_control_url'] = vcs_url
# https://office.lizard.net/trac/browser/Products
# https://office.lizard.net/svn/Products/sites/demo/tags/3.0.11/
self.code_url = CodeLink(vcs, vcs_url)
self.author_suggestion = None
if vcs == 'git':
self.author_suggestion = AuthorSuggestionLink(vcs, vcs_url)
self.eggs = {}
for egg_name, version in self.data['eggs'].items():
if egg_name not in data['egg']:
data['egg'][egg_name] = Egg(egg_name)
egg = data['egg'][egg_name]
egg.add_usage(self, version)
self.eggs[egg] = version
@property
def eggs_for_display(self):
for key in sorted(self.eggs.keys()):
yield key, self.eggs[key]
@property
def title_postfix(self):
"""Warn if there's no site pointing at us."""
if not self.site:
logger.warn("No site: %r", self.site)
return "(not linked into a site!)"
return ''
class Server(Common):
subdir = 'servers'
template_name = 'server.html'
title_prefix = 'Linux server'
simple_fields = ['hostname',
'users',
'backup_jobs',
]
def prepare(self):
self.sites = []
self.buildouts = []
self.ports = {}
@property
def sites_for_display(self):
return sorted(self.sites)
@property
def buildouts_for_display(self):
return sorted(self.buildouts)
@property
def ports_for_display(self):
for key in sorted(self.ports.keys()):
yield key, self.ports[key]
class Egg(Common):
# Well, it is not actually that common...
subdir = 'eggs'
template_name = 'egg.html'
title_prefix = 'Egg'
simple_fields = ['directory',
'extends', # TODO: fix this: missing KGS here.
'version_control_system',
'version_control_url',
]
def __init__(self, egg_name):
self.id = egg_name
self.versions = collections.defaultdict(list)
def add_usage(self, buildout, version):
self.versions[version].append(buildout)
@property
def versions_for_display(self):
for key in sorted(self.versions.keys()):
yield key, self.versions[key]
def collect_data():
"""Collect all the json data and load it in memory."""
mapping = {'nginx': Nginx,
'apache': Apache,
'server': Server,
'buildout': Buildout}
with utils.cd(utils.displayer_dir()):
for dirpath, dirnames, filenames in os.walk('.'):
# server_id = dirpath
for json_file in [f for f in filenames if f.endswith('.json')]:
kind = json_file.split('___')[0]
filepath = os.path.join(dirpath, json_file)
logger.debug("Loading info from %s",
os.path.abspath(filepath))
json_content = open(filepath).read()
klass = mapping[kind]
obj = klass(json_content)
data[kind][obj.id.lower()] = obj
# Link buildouts and nginx sites.
for nginx in data['nginx'].values():
buildout_id = nginx.data.get('buildout_id')
if buildout_id is not None:
buildout = data['buildout'].get(buildout_id)
if buildout is not None:
nginx.buildout = buildout
buildout.site = nginx
# Link buildouts and apache sites.
for apache in data['apache'].values():
buildout_id = apache.data.get('buildout_id')
if buildout_id is not None:
buildout = data['buildout'].get(buildout_id)
if buildout is not None:
apache.buildout = buildout
buildout.site = apache
# Link buildouts+sites with servers.
for kind in ['nginx', 'apache', 'buildout']:
for obj in data[kind].values():
hostname = obj.data.get('hostname')
if hostname is not None:
hostname = hostname.lower()
server = data['server'].get(hostname)
if server is None:
logger.error("Server with hostname %s not found.",
hostname)
else:
obj.server = server
if kind == 'nginx' or kind == 'apache':
server.sites.append(obj)
elif kind == 'buildout':
server.buildouts.append(obj)
# Link nginx gunicorn ports with servers.
for kind in ['nginx']:
for obj in data[kind].values():
hostname = obj.data.get('hostname')
port = obj.data.get('proxy_port')
try:
port = int(port)
except:
pass
if hostname is not None and port is not None:
hostname = hostname.lower()
server = data['server'].get(hostname)
if server is None:
logger.error("Server with hostname %s not found.",
hostname)
continue
server.ports[port] = obj
def generate_html():
index_subdirs = {'site': 'sites',
'buildout': 'buildouts',
'server': 'servers',
'egg': 'eggs'}
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
data['site'] = data['apache']
data['site'].update(data['nginx'])
for kind in ['buildout', 'server', 'egg', 'site']:
for obj in data[kind].values():
obj.write()
# Overview.
subdir = index_subdirs[kind]
outfile = os.path.join(utils.html_dir(),
subdir,
'index.html')
template = jinja_env.get_template('index.html')
open(outfile, 'w').write(template.render(
view={'title': 'Overview of %s' % subdir,
'objs': sorted(data[kind].values()),
'generated_on': now}))
logger.info("Wrote %s", outfile)
outfile = os.path.join(utils.html_dir(), 'index.html')
template = jinja_env.get_template('root.html')
open(outfile, 'w').write(template.render(
view={'title': 'Root overview',
'subitems': index_subdirs.values(),
'generated_on': now}))
logger.info("Wrote %s", outfile)
def main():
utils.setup_logging()
for subdir in ['eggs', 'servers', 'buildouts', 'sites']:
| utils.clear_directory_contents(os.path.join(
utils.html_dir(), subdir)) | conditional_block |
|
models.py | )
# Many-to-Many auxiliary table of Bands and Songs
band_songs = db.Table('band_songs',
db.Column('band_id', db.Integer, db.ForeignKey('band.id'), nullable=False),
db.Column('song_id', db.Integer, db.ForeignKey('song.id'), nullable=False)
)
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), unique=True, index=True, nullable=False)
password_hash = db.Column(db.String(1000), nullable=False) | songs = db.relationship('Song', backref='user', lazy='dynamic', cascade="all, delete-orphan")
shows = db.relationship('Show', backref='user', lazy='dynamic', cascade="all, delete-orphan")
def __repr__(self):
return 'User {0} ({1})'.format(self.name, self.email)
@property
def password(self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=86400):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm_token(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Song(db.Model):
__tablename__ = 'song'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
title = db.Column(db.String(128), nullable=False)
artist = db.Column(db.String(128))
key = db.Column(db.String(128))
tempo = db.Column(db.Integer)
duration = db.Column(db.String(5))
lyrics = db.Column(db.Text)
notes = db.Column(db.String(4000))
def __repr__(self):
return self.title
def pretty_duration(self):
if self.duration is not None and self.duration != '':
return self.duration[:2] + ':' + self.duration[2:]
else:
return ''
@staticmethod
def get_lyrics_or_chords(url):
"""
Scrapes the HTML of a given song Lyrics or Chords
:param url: The url of the song (different Providers)
:return: HTML of the song's Lyrics or Chords
"""
html = ''
if 'cifraclub' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
url += 'imprimir.html#columns=false' # Printer Friendly page (it's cleaner)
soup = getsoup(url)
sections = soup.find_all('pre')
for s in sections:
html += str(s)
if 'letras.mus.br' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
soup = getsoup(url)
article = soup.find('article')
html = str(article)
if 'e-chords' in url:
soup = getsoup(url)
pre = soup.find('pre', id='core')
# Remove Tab Div, keep raw tab
div = pre.find('div')
if div is not None:
tab = div.find('div', class_='tab')
html = '<pre>' + tab.text + '</pre>'
div.extract()
html += str(pre)
if 'freak' in url:
soup = getsoup(url)
content = soup.find('div', id='content_h')
html = str(content)
return html
def get_list_of_associated_bands(self):
formatted_output = ''
associated_bands = self.query.get(self.id).bands.order_by(Band.name).all()
for band in associated_bands:
formatted_output = formatted_output + band.name + ', '
if len(formatted_output) > 0:
formatted_output = formatted_output[:-2]
return formatted_output
class Band(db.Model):
__tablename__ = 'band'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
name = db.Column(db.String(128), nullable=False)
style = db.Column(db.String(128))
members = db.relationship('BandMember',
backref=db.backref('band'),
cascade="all, delete-orphan",
lazy='dynamic')
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=band_songs,
primaryjoin=(band_songs.c.band_id == id),
secondaryjoin=(band_songs.c.song_id == Song.id),
backref=db.backref('bands', lazy='dynamic'),
lazy='dynamic')
def associate_song(self, song):
"""
Adds a song to the association list
:param song: The song object to be added
:return: None
"""
self.songs.append(song)
def disassociate_song(self, song):
"""
Removes a song from the association list
:param song: The song object to be removed
:return: None
"""
self.songs.remove(song)
def __repr__(self):
return 'Band {0}'.format(self.name)
class BandMember(db.Model):
__tablename__ = 'band_member'
id = db.Column(db.Integer, primary_key=True)
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), nullable=False)
def __repr__(self):
return 'Band Member {0} ({1})'.format(self.name, self.email)
# noinspection SqlDialectInspection
class Show(db.Model):
__tablename__ = 'show'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
start = db.Column(db.DateTime, nullable=True)
end = db.Column(db.DateTime, nullable=True)
address = db.Column(db.String(4000))
contact = db.Column(db.String(4000))
pay = db.Column(db.String(128))
notes = db.Column(db.String(4000))
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=setlist,
order_by=setlist.c.song_position,
primaryjoin=(setlist.c.show_id == id),
secondaryjoin=(setlist.c.song_id == Song.id),
backref=db.backref('shows', lazy='dynamic'),
lazy='dynamic')
def __repr__(self):
return self.name
def add_song(self, song):
| confirmed = db.Column(db.Boolean, default=False)
bands = db.relationship('Band', backref='user', lazy='dynamic', cascade="all, delete-orphan") | random_line_split |
models.py | ('song.id'), nullable=False)
)
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), unique=True, index=True, nullable=False)
password_hash = db.Column(db.String(1000), nullable=False)
confirmed = db.Column(db.Boolean, default=False)
bands = db.relationship('Band', backref='user', lazy='dynamic', cascade="all, delete-orphan")
songs = db.relationship('Song', backref='user', lazy='dynamic', cascade="all, delete-orphan")
shows = db.relationship('Show', backref='user', lazy='dynamic', cascade="all, delete-orphan")
def __repr__(self):
return 'User {0} ({1})'.format(self.name, self.email)
@property
def password(self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=86400):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm_token(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Song(db.Model):
__tablename__ = 'song'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
title = db.Column(db.String(128), nullable=False)
artist = db.Column(db.String(128))
key = db.Column(db.String(128))
tempo = db.Column(db.Integer)
duration = db.Column(db.String(5))
lyrics = db.Column(db.Text)
notes = db.Column(db.String(4000))
def __repr__(self):
return self.title
def pretty_duration(self):
if self.duration is not None and self.duration != '':
return self.duration[:2] + ':' + self.duration[2:]
else:
return ''
@staticmethod
def get_lyrics_or_chords(url):
"""
Scrapes the HTML of a given song Lyrics or Chords
:param url: The url of the song (different Providers)
:return: HTML of the song's Lyrics or Chords
"""
html = ''
if 'cifraclub' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
url += 'imprimir.html#columns=false' # Printer Friendly page (it's cleaner)
soup = getsoup(url)
sections = soup.find_all('pre')
for s in sections:
html += str(s)
if 'letras.mus.br' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
soup = getsoup(url)
article = soup.find('article')
html = str(article)
if 'e-chords' in url:
soup = getsoup(url)
pre = soup.find('pre', id='core')
# Remove Tab Div, keep raw tab
div = pre.find('div')
if div is not None:
tab = div.find('div', class_='tab')
html = '<pre>' + tab.text + '</pre>'
div.extract()
html += str(pre)
if 'freak' in url:
soup = getsoup(url)
content = soup.find('div', id='content_h')
html = str(content)
return html
def get_list_of_associated_bands(self):
formatted_output = ''
associated_bands = self.query.get(self.id).bands.order_by(Band.name).all()
for band in associated_bands:
formatted_output = formatted_output + band.name + ', '
if len(formatted_output) > 0:
formatted_output = formatted_output[:-2]
return formatted_output
class Band(db.Model):
__tablename__ = 'band'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
name = db.Column(db.String(128), nullable=False)
style = db.Column(db.String(128))
members = db.relationship('BandMember',
backref=db.backref('band'),
cascade="all, delete-orphan",
lazy='dynamic')
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=band_songs,
primaryjoin=(band_songs.c.band_id == id),
secondaryjoin=(band_songs.c.song_id == Song.id),
backref=db.backref('bands', lazy='dynamic'),
lazy='dynamic')
def associate_song(self, song):
"""
Adds a song to the association list
:param song: The song object to be added
:return: None
"""
self.songs.append(song)
def disassociate_song(self, song):
"""
Removes a song from the association list
:param song: The song object to be removed
:return: None
"""
self.songs.remove(song)
def __repr__(self):
return 'Band {0}'.format(self.name)
class BandMember(db.Model):
__tablename__ = 'band_member'
id = db.Column(db.Integer, primary_key=True)
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), nullable=False)
def __repr__(self):
return 'Band Member {0} ({1})'.format(self.name, self.email)
# noinspection SqlDialectInspection
class Show(db.Model):
__tablename__ = 'show'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
start = db.Column(db.DateTime, nullable=True)
end = db.Column(db.DateTime, nullable=True)
address = db.Column(db.String(4000))
contact = db.Column(db.String(4000))
pay = db.Column(db.String(128))
notes = db.Column(db.String(4000))
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=setlist,
order_by=setlist.c.song_position,
primaryjoin=(setlist.c.show_id == id),
secondaryjoin=(setlist.c.song_id == Song.id),
backref=db.backref('shows', lazy='dynamic'),
lazy='dynamic')
def __repr__(self):
return self.name
def add_song(self, song):
"""
Adds a song to the show's setlist
:param song: The song object to be added
:return: None
"""
self.songs.append(song)
def remove_song(self, song):
| self.songs.remove(song) | identifier_body |
|
models.py | )
# Many-to-Many auxiliary table of Bands and Songs
band_songs = db.Table('band_songs',
db.Column('band_id', db.Integer, db.ForeignKey('band.id'), nullable=False),
db.Column('song_id', db.Integer, db.ForeignKey('song.id'), nullable=False)
)
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), unique=True, index=True, nullable=False)
password_hash = db.Column(db.String(1000), nullable=False)
confirmed = db.Column(db.Boolean, default=False)
bands = db.relationship('Band', backref='user', lazy='dynamic', cascade="all, delete-orphan")
songs = db.relationship('Song', backref='user', lazy='dynamic', cascade="all, delete-orphan")
shows = db.relationship('Show', backref='user', lazy='dynamic', cascade="all, delete-orphan")
def __repr__(self):
return 'User {0} ({1})'.format(self.name, self.email)
@property
def | (self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=86400):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm_token(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Song(db.Model):
__tablename__ = 'song'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
title = db.Column(db.String(128), nullable=False)
artist = db.Column(db.String(128))
key = db.Column(db.String(128))
tempo = db.Column(db.Integer)
duration = db.Column(db.String(5))
lyrics = db.Column(db.Text)
notes = db.Column(db.String(4000))
def __repr__(self):
return self.title
def pretty_duration(self):
if self.duration is not None and self.duration != '':
return self.duration[:2] + ':' + self.duration[2:]
else:
return ''
@staticmethod
def get_lyrics_or_chords(url):
"""
Scrapes the HTML of a given song Lyrics or Chords
:param url: The url of the song (different Providers)
:return: HTML of the song's Lyrics or Chords
"""
html = ''
if 'cifraclub' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
url += 'imprimir.html#columns=false' # Printer Friendly page (it's cleaner)
soup = getsoup(url)
sections = soup.find_all('pre')
for s in sections:
html += str(s)
if 'letras.mus.br' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
soup = getsoup(url)
article = soup.find('article')
html = str(article)
if 'e-chords' in url:
soup = getsoup(url)
pre = soup.find('pre', id='core')
# Remove Tab Div, keep raw tab
div = pre.find('div')
if div is not None:
tab = div.find('div', class_='tab')
html = '<pre>' + tab.text + '</pre>'
div.extract()
html += str(pre)
if 'freak' in url:
soup = getsoup(url)
content = soup.find('div', id='content_h')
html = str(content)
return html
def get_list_of_associated_bands(self):
formatted_output = ''
associated_bands = self.query.get(self.id).bands.order_by(Band.name).all()
for band in associated_bands:
formatted_output = formatted_output + band.name + ', '
if len(formatted_output) > 0:
formatted_output = formatted_output[:-2]
return formatted_output
class Band(db.Model):
__tablename__ = 'band'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
name = db.Column(db.String(128), nullable=False)
style = db.Column(db.String(128))
members = db.relationship('BandMember',
backref=db.backref('band'),
cascade="all, delete-orphan",
lazy='dynamic')
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=band_songs,
primaryjoin=(band_songs.c.band_id == id),
secondaryjoin=(band_songs.c.song_id == Song.id),
backref=db.backref('bands', lazy='dynamic'),
lazy='dynamic')
def associate_song(self, song):
"""
Adds a song to the association list
:param song: The song object to be added
:return: None
"""
self.songs.append(song)
def disassociate_song(self, song):
"""
Removes a song from the association list
:param song: The song object to be removed
:return: None
"""
self.songs.remove(song)
def __repr__(self):
return 'Band {0}'.format(self.name)
class BandMember(db.Model):
__tablename__ = 'band_member'
id = db.Column(db.Integer, primary_key=True)
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), nullable=False)
def __repr__(self):
return 'Band Member {0} ({1})'.format(self.name, self.email)
# noinspection SqlDialectInspection
class Show(db.Model):
__tablename__ = 'show'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
start = db.Column(db.DateTime, nullable=True)
end = db.Column(db.DateTime, nullable=True)
address = db.Column(db.String(4000))
contact = db.Column(db.String(4000))
pay = db.Column(db.String(128))
notes = db.Column(db.String(4000))
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=setlist,
order_by=setlist.c.song_position,
primaryjoin=(setlist.c.show_id == id),
secondaryjoin=(setlist.c.song_id == Song.id),
backref=db.backref('shows', lazy='dynamic'),
lazy='dynamic')
def __repr__(self):
return self.name
def add_song(self, song | password | identifier_name |
models.py | )
# Many-to-Many auxiliary table of Bands and Songs
band_songs = db.Table('band_songs',
db.Column('band_id', db.Integer, db.ForeignKey('band.id'), nullable=False),
db.Column('song_id', db.Integer, db.ForeignKey('song.id'), nullable=False)
)
class User(UserMixin, db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), unique=True, index=True, nullable=False)
password_hash = db.Column(db.String(1000), nullable=False)
confirmed = db.Column(db.Boolean, default=False)
bands = db.relationship('Band', backref='user', lazy='dynamic', cascade="all, delete-orphan")
songs = db.relationship('Song', backref='user', lazy='dynamic', cascade="all, delete-orphan")
shows = db.relationship('Show', backref='user', lazy='dynamic', cascade="all, delete-orphan")
def __repr__(self):
return 'User {0} ({1})'.format(self.name, self.email)
@property
def password(self):
raise AttributeError('Password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=86400):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm_token(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Song(db.Model):
__tablename__ = 'song'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
title = db.Column(db.String(128), nullable=False)
artist = db.Column(db.String(128))
key = db.Column(db.String(128))
tempo = db.Column(db.Integer)
duration = db.Column(db.String(5))
lyrics = db.Column(db.Text)
notes = db.Column(db.String(4000))
def __repr__(self):
return self.title
def pretty_duration(self):
if self.duration is not None and self.duration != '':
return self.duration[:2] + ':' + self.duration[2:]
else:
return ''
@staticmethod
def get_lyrics_or_chords(url):
"""
Scrapes the HTML of a given song Lyrics or Chords
:param url: The url of the song (different Providers)
:return: HTML of the song's Lyrics or Chords
"""
html = ''
if 'cifraclub' in url:
if url.startswith('https://m.'):
url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs
url += 'imprimir.html#columns=false' # Printer Friendly page (it's cleaner)
soup = getsoup(url)
sections = soup.find_all('pre')
for s in sections:
html += str(s)
if 'letras.mus.br' in url:
if url.startswith('https://m.'):
|
soup = getsoup(url)
article = soup.find('article')
html = str(article)
if 'e-chords' in url:
soup = getsoup(url)
pre = soup.find('pre', id='core')
# Remove Tab Div, keep raw tab
div = pre.find('div')
if div is not None:
tab = div.find('div', class_='tab')
html = '<pre>' + tab.text + '</pre>'
div.extract()
html += str(pre)
if 'freak' in url:
soup = getsoup(url)
content = soup.find('div', id='content_h')
html = str(content)
return html
def get_list_of_associated_bands(self):
formatted_output = ''
associated_bands = self.query.get(self.id).bands.order_by(Band.name).all()
for band in associated_bands:
formatted_output = formatted_output + band.name + ', '
if len(formatted_output) > 0:
formatted_output = formatted_output[:-2]
return formatted_output
class Band(db.Model):
__tablename__ = 'band'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
name = db.Column(db.String(128), nullable=False)
style = db.Column(db.String(128))
members = db.relationship('BandMember',
backref=db.backref('band'),
cascade="all, delete-orphan",
lazy='dynamic')
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=band_songs,
primaryjoin=(band_songs.c.band_id == id),
secondaryjoin=(band_songs.c.song_id == Song.id),
backref=db.backref('bands', lazy='dynamic'),
lazy='dynamic')
def associate_song(self, song):
"""
Adds a song to the association list
:param song: The song object to be added
:return: None
"""
self.songs.append(song)
def disassociate_song(self, song):
"""
Removes a song from the association list
:param song: The song object to be removed
:return: None
"""
self.songs.remove(song)
def __repr__(self):
return 'Band {0}'.format(self.name)
class BandMember(db.Model):
__tablename__ = 'band_member'
id = db.Column(db.Integer, primary_key=True)
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
email = db.Column(db.String(64), nullable=False)
def __repr__(self):
return 'Band Member {0} ({1})'.format(self.name, self.email)
# noinspection SqlDialectInspection
class Show(db.Model):
__tablename__ = 'show'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
band_id = db.Column(db.Integer, db.ForeignKey('band.id'))
name = db.Column(db.String(128), nullable=False)
start = db.Column(db.DateTime, nullable=True)
end = db.Column(db.DateTime, nullable=True)
address = db.Column(db.String(4000))
contact = db.Column(db.String(4000))
pay = db.Column(db.String(128))
notes = db.Column(db.String(4000))
"""
Configuration for a many to many relationship between Shows and Songs
1. 'Song' is the right side entity of the relationship (the left side entity is the parent class).
2. secondary configures the association table that is used for this relationship. See auxiliary tables at the top
of this file
3. primaryjoin indicates the condition that links the left side entity with the association table.
4. secondaryjoin indicates the condition that links the right side entity with the association table.
5. backref defines how this relationship will be accessed from the right side entity.
The additional lazy argument indicates the execution mode for this query. A mode of dynamic sets up the query to
not run until specifically requested.
6. lazy is similar to the parameter of the same name in the backref, but this one applies to the left side query
instead of the right side.
"""
songs = db.relationship('Song',
secondary=setlist,
order_by=setlist.c.song_position,
primaryjoin=(setlist.c.show_id == id),
secondaryjoin=(setlist.c.song_id == Song.id),
backref=db.backref('shows', lazy='dynamic'),
lazy='dynamic')
def __repr__(self):
return self.name
def add_song(self, song):
| url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs | conditional_block |
client.go | /147.75.94.115/udp/4001/quic/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
"/ip4/147.75.109.213/udp/4001/quic/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
"/ip4/147.75.109.29/udp/4001/quic/p2p/QmZa1sAxajnQjVM8WjWXoMbmPd7NsWhfKsPkErzpm9wGkp",
}
var bootstrappersTCP []*peer.AddrInfo
var bootstrappersUDP []*peer.AddrInfo
func init() {
for _, a := range bootstrappersTCPString {
pi, err := parseAddrInfo(a)
if err != nil {
panic(err)
}
bootstrappersTCP = append(bootstrappersTCP, pi)
}
for _, a := range bootstrappersUDPString {
pi, err := parseAddrInfo(a)
if err != nil {
panic(err)
}
bootstrappersUDP = append(bootstrappersUDP, pi)
}
}
type Client struct {
host host.Host
tracer *Tracer
cfg *Config
domain string
nick string
server *peer.AddrInfo
relay *peer.AddrInfo
}
type ClientInfo struct {
Nick string
Info peer.AddrInfo
}
func NewClient(h host.Host, tracer *Tracer, cfg *Config, domain, nick string) (*Client, error) {
var relay, server *peer.AddrInfo
var err error
if domain == "TCP" {
relay, err = parseAddrInfo(cfg.RelayAddrTCP)
if err != nil {
return nil, err
}
server, err = parseAddrInfo(cfg.ServerAddrTCP)
if err != nil {
return nil, err
}
} else {
relay, err = parseAddrInfo(cfg.RelayAddrUDP)
if err != nil {
return nil, err
}
server, err = parseAddrInfo(cfg.ServerAddrUDP)
if err != nil {
return nil, err
}
}
return &Client{
host: h,
tracer: tracer,
cfg: cfg,
domain: domain,
nick: nick,
relay: relay,
server: server,
}, nil
}
func (c *Client) Domain() string {
return c.domain
}
func (c *Client) ID() peer.ID {
return c.host.ID()
}
func (c *Client) Addrs() []ma.Multiaddr {
return c.host.Addrs()
}
func (c *Client) ListPeers() ([]*ClientInfo, error) {
s, err := c.connectToServer()
if err != nil {
return nil, fmt.Errorf("error connecting to flare server: %w", err)
}
defer s.Close()
return c.getPeers(s)
}
func (c *Client) getPeers(s network.Stream) ([]*ClientInfo, error) {
s.SetDeadline(time.Now().Add(time.Minute))
var msg pb.FlareMessage
wr := protoio.NewDelimitedWriter(s)
rd := protoio.NewDelimitedReader(s, 1<<20)
msg.Type = pb.FlareMessage_GETPEERS.Enum()
msg.GetPeers = &pb.GetPeers{Domain: &c.domain}
if err := wr.WriteMsg(&msg); err != nil {
s.Reset()
return nil, fmt.Errorf("error writing request to server: %w", err)
}
msg.Reset()
if err := rd.ReadMsg(&msg); err != nil {
s.Reset()
return nil, fmt.Errorf("error reading server response: %w", err)
}
peerlist := msg.GetPeerList()
if peerlist == nil {
s.Reset()
return nil, fmt.Errorf("bad server response: missing peer list")
}
result := make([]*ClientInfo, 0, len(peerlist.GetPeers()))
for _, pi := range peerlist.GetPeers() {
ci, err := peerInfoToClientInfo(pi)
if err != nil {
s.Reset()
return nil, fmt.Errorf("error parsing client info: %w", err)
}
result = append(result, ci)
}
s.SetDeadline(time.Time{})
return result, nil
}
func (c *Client) Connect(ci *ClientInfo) error {
// check for existing connections first
for _, conn := range c.host.Network().ConnsToPeer(ci.Info.ID) {
if !isRelayConn(conn) {
return nil
}
}
err := c.connectToBootstrappers()
if err != nil |
// let identify get our observed addresses before starting
time.Sleep(time.Second)
err = c.connectToPeer(ci)
c.tracer.Connect(ci, err)
return err
}
func (c *Client) connectToPeer(ci *ClientInfo) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
err := c.host.Connect(ctx, ci.Info)
if err != nil {
return fmt.Errorf("error establishing initial connection to peer: %w", err)
}
deadline := time.After(time.Minute)
poll:
for {
for _, conn := range c.host.Network().ConnsToPeer(ci.Info.ID) {
if !isRelayConn(conn) {
return nil
}
}
select {
case <-deadline:
break poll
case <-time.After(time.Second):
}
}
return fmt.Errorf("no direct connection to peer")
}
func (c *Client) connectToBootstrappers() error {
var pis []*peer.AddrInfo
if c.domain == "TCP" {
pis = bootstrappersTCP
} else {
pis = bootstrappersUDP
}
count := 0
for _, pi := range pis {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
err := c.host.Connect(ctx, *pi)
cancel()
if err != nil {
log.Warnf("error connecting to bootstrapper %s: %s", pi.ID, err)
} else {
c.host.ConnManager().Protect(pi.ID, "flare")
count++
}
}
if count < 4 {
return fmt.Errorf("could not connect to enough bootstrappers -- need 4, got %d", count)
}
return nil
}
func (c *Client) Background(wg *sync.WaitGroup) {
defer wg.Done()
natType, err := c.getNATType()
if err != nil {
log.Errorf("error determining NAT type: %s", err)
return
}
log.Infof("%s NAT Device Type is %s", c.domain, natType)
c.tracer.Announce(natType.String())
if natType == network.NATDeviceTypeSymmetric {
log.Errorf("%s NAT type is impenetrable; sorry", c.domain)
return
}
c.connectToRelay()
sleep := 15*time.Minute + time.Duration(rand.Int63n(int64(30*time.Minute)))
log.Infof("waiting for %s...", sleep)
time.Sleep(sleep)
for {
log.Infof("trying to connect to peers...")
peers, err := c.ListPeers()
if err != nil {
log.Warnf("error getting peers: %s", err)
time.Sleep(time.Minute)
continue
}
log.Infof("got %d peers", len(peers))
for _, ci := range peers {
err = c.Connect(ci)
if err != nil {
log.Infof("error connecting to %s [%s]: %s", ci.Info.ID, ci.Nick, err)
} else {
log.Infof("successfully connected to %s [%s]", ci.Info.ID, ci.Nick)
}
}
if len(peers) > 25 {
sleep = 2*time.Hour + time.Duration(rand.Int63n(int64(4*time.Hour)))
} else if len(peers) > 10 {
sleep = time.Hour + time.Duration(rand.Int63n(int64(2*time.Hour)))
} else {
sleep = 30*time.Minute + time.Duration(rand.Int63n(int64(time.Hour)))
}
log.Infof("waiting for %s...", sleep)
time.Sleep(sleep)
}
}
func (c *Client) getNATType() (network.NATDeviceType, error) {
sub, err := c.host.EventBus().Subscribe(new(event.EvtNATDeviceTypeChanged))
if err != nil {
return 0, err
}
defer sub.Close()
err = c.connectToBootstrappers()
if err != nil {
return 0, err
}
for {
select {
case evt := <-sub.Out():
e := evt.(event.EvtNATDeviceTypeChanged)
switch c.domain {
case "TCP":
if e.TransportProtocol | {
return fmt.Errorf("error connecting to bootstrappers: %w", err)
} | conditional_block |
client.go | 4/147.75.94.115/udp/4001/quic/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
"/ip4/147.75.109.213/udp/4001/quic/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
"/ip4/147.75.109.29/udp/4001/quic/p2p/QmZa1sAxajnQjVM8WjWXoMbmPd7NsWhfKsPkErzpm9wGkp",
}
var bootstrappersTCP []*peer.AddrInfo
var bootstrappersUDP []*peer.AddrInfo
func init() {
for _, a := range bootstrappersTCPString {
pi, err := parseAddrInfo(a)
if err != nil {
panic(err)
}
bootstrappersTCP = append(bootstrappersTCP, pi)
}
for _, a := range bootstrappersUDPString {
pi, err := parseAddrInfo(a)
if err != nil {
panic(err)
}
bootstrappersUDP = append(bootstrappersUDP, pi)
}
}
type Client struct {
host host.Host
tracer *Tracer
cfg *Config
domain string
nick string
server *peer.AddrInfo
relay *peer.AddrInfo
}
type ClientInfo struct {
Nick string
Info peer.AddrInfo
}
func NewClient(h host.Host, tracer *Tracer, cfg *Config, domain, nick string) (*Client, error) {
var relay, server *peer.AddrInfo
var err error
if domain == "TCP" {
relay, err = parseAddrInfo(cfg.RelayAddrTCP)
if err != nil {
return nil, err
}
server, err = parseAddrInfo(cfg.ServerAddrTCP)
if err != nil {
return nil, err
}
} else {
relay, err = parseAddrInfo(cfg.RelayAddrUDP)
if err != nil {
return nil, err
}
server, err = parseAddrInfo(cfg.ServerAddrUDP)
if err != nil {
return nil, err
}
}
return &Client{
host: h,
tracer: tracer,
cfg: cfg,
domain: domain,
nick: nick,
relay: relay,
server: server,
}, nil
}
func (c *Client) Domain() string {
return c.domain
}
func (c *Client) ID() peer.ID {
return c.host.ID()
}
func (c *Client) Addrs() []ma.Multiaddr {
return c.host.Addrs()
}
func (c *Client) ListPeers() ([]*ClientInfo, error) {
s, err := c.connectToServer()
if err != nil {
return nil, fmt.Errorf("error connecting to flare server: %w", err)
}
defer s.Close()
return c.getPeers(s)
}
func (c *Client) getPeers(s network.Stream) ([]*ClientInfo, error) {
s.SetDeadline(time.Now().Add(time.Minute))
var msg pb.FlareMessage
wr := protoio.NewDelimitedWriter(s)
rd := protoio.NewDelimitedReader(s, 1<<20)
msg.Type = pb.FlareMessage_GETPEERS.Enum()
msg.GetPeers = &pb.GetPeers{Domain: &c.domain}
if err := wr.WriteMsg(&msg); err != nil {
s.Reset()
return nil, fmt.Errorf("error writing request to server: %w", err)
}
msg.Reset()
if err := rd.ReadMsg(&msg); err != nil {
s.Reset()
return nil, fmt.Errorf("error reading server response: %w", err)
}
peerlist := msg.GetPeerList()
if peerlist == nil {
s.Reset()
return nil, fmt.Errorf("bad server response: missing peer list")
}
result := make([]*ClientInfo, 0, len(peerlist.GetPeers()))
for _, pi := range peerlist.GetPeers() {
ci, err := peerInfoToClientInfo(pi)
if err != nil {
s.Reset()
return nil, fmt.Errorf("error parsing client info: %w", err)
}
result = append(result, ci)
}
s.SetDeadline(time.Time{})
return result, nil
}
func (c *Client) Connect(ci *ClientInfo) error {
// check for existing connections first
for _, conn := range c.host.Network().ConnsToPeer(ci.Info.ID) {
if !isRelayConn(conn) {
return nil
}
}
err := c.connectToBootstrappers()
if err != nil {
return fmt.Errorf("error connecting to bootstrappers: %w", err)
}
// let identify get our observed addresses before starting
time.Sleep(time.Second)
err = c.connectToPeer(ci)
c.tracer.Connect(ci, err)
return err
}
func (c *Client) connectToPeer(ci *ClientInfo) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
err := c.host.Connect(ctx, ci.Info)
if err != nil {
return fmt.Errorf("error establishing initial connection to peer: %w", err)
}
deadline := time.After(time.Minute)
poll:
for { | if !isRelayConn(conn) {
return nil
}
}
select {
case <-deadline:
break poll
case <-time.After(time.Second):
}
}
return fmt.Errorf("no direct connection to peer")
}
func (c *Client) connectToBootstrappers() error {
var pis []*peer.AddrInfo
if c.domain == "TCP" {
pis = bootstrappersTCP
} else {
pis = bootstrappersUDP
}
count := 0
for _, pi := range pis {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
err := c.host.Connect(ctx, *pi)
cancel()
if err != nil {
log.Warnf("error connecting to bootstrapper %s: %s", pi.ID, err)
} else {
c.host.ConnManager().Protect(pi.ID, "flare")
count++
}
}
if count < 4 {
return fmt.Errorf("could not connect to enough bootstrappers -- need 4, got %d", count)
}
return nil
}
func (c *Client) Background(wg *sync.WaitGroup) {
defer wg.Done()
natType, err := c.getNATType()
if err != nil {
log.Errorf("error determining NAT type: %s", err)
return
}
log.Infof("%s NAT Device Type is %s", c.domain, natType)
c.tracer.Announce(natType.String())
if natType == network.NATDeviceTypeSymmetric {
log.Errorf("%s NAT type is impenetrable; sorry", c.domain)
return
}
c.connectToRelay()
sleep := 15*time.Minute + time.Duration(rand.Int63n(int64(30*time.Minute)))
log.Infof("waiting for %s...", sleep)
time.Sleep(sleep)
for {
log.Infof("trying to connect to peers...")
peers, err := c.ListPeers()
if err != nil {
log.Warnf("error getting peers: %s", err)
time.Sleep(time.Minute)
continue
}
log.Infof("got %d peers", len(peers))
for _, ci := range peers {
err = c.Connect(ci)
if err != nil {
log.Infof("error connecting to %s [%s]: %s", ci.Info.ID, ci.Nick, err)
} else {
log.Infof("successfully connected to %s [%s]", ci.Info.ID, ci.Nick)
}
}
if len(peers) > 25 {
sleep = 2*time.Hour + time.Duration(rand.Int63n(int64(4*time.Hour)))
} else if len(peers) > 10 {
sleep = time.Hour + time.Duration(rand.Int63n(int64(2*time.Hour)))
} else {
sleep = 30*time.Minute + time.Duration(rand.Int63n(int64(time.Hour)))
}
log.Infof("waiting for %s...", sleep)
time.Sleep(sleep)
}
}
func (c *Client) getNATType() (network.NATDeviceType, error) {
sub, err := c.host.EventBus().Subscribe(new(event.EvtNATDeviceTypeChanged))
if err != nil {
return 0, err
}
defer sub.Close()
err = c.connectToBootstrappers()
if err != nil {
return 0, err
}
for {
select {
case evt := <-sub.Out():
e := evt.(event.EvtNATDeviceTypeChanged)
switch c.domain {
case "TCP":
if e.TransportProtocol == | for _, conn := range c.host.Network().ConnsToPeer(ci.Info.ID) { | random_line_split |
client.go | pi, err := parseAddrInfo(a)
if err != nil {
panic(err)
}
bootstrappersTCP = append(bootstrappersTCP, pi)
}
for _, a := range bootstrappersUDPString {
pi, err := parseAddrInfo(a)
if err != nil {
panic(err)
}
bootstrappersUDP = append(bootstrappersUDP, pi)
}
}
type Client struct {
host host.Host
tracer *Tracer
cfg *Config
domain string
nick string
server *peer.AddrInfo
relay *peer.AddrInfo
}
type ClientInfo struct {
Nick string
Info peer.AddrInfo
}
func NewClient(h host.Host, tracer *Tracer, cfg *Config, domain, nick string) (*Client, error) {
var relay, server *peer.AddrInfo
var err error
if domain == "TCP" {
relay, err = parseAddrInfo(cfg.RelayAddrTCP)
if err != nil {
return nil, err
}
server, err = parseAddrInfo(cfg.ServerAddrTCP)
if err != nil {
return nil, err
}
} else {
relay, err = parseAddrInfo(cfg.RelayAddrUDP)
if err != nil {
return nil, err
}
server, err = parseAddrInfo(cfg.ServerAddrUDP)
if err != nil {
return nil, err
}
}
return &Client{
host: h,
tracer: tracer,
cfg: cfg,
domain: domain,
nick: nick,
relay: relay,
server: server,
}, nil
}
func (c *Client) Domain() string {
return c.domain
}
func (c *Client) ID() peer.ID {
return c.host.ID()
}
func (c *Client) Addrs() []ma.Multiaddr {
return c.host.Addrs()
}
func (c *Client) ListPeers() ([]*ClientInfo, error) {
s, err := c.connectToServer()
if err != nil {
return nil, fmt.Errorf("error connecting to flare server: %w", err)
}
defer s.Close()
return c.getPeers(s)
}
func (c *Client) getPeers(s network.Stream) ([]*ClientInfo, error) {
s.SetDeadline(time.Now().Add(time.Minute))
var msg pb.FlareMessage
wr := protoio.NewDelimitedWriter(s)
rd := protoio.NewDelimitedReader(s, 1<<20)
msg.Type = pb.FlareMessage_GETPEERS.Enum()
msg.GetPeers = &pb.GetPeers{Domain: &c.domain}
if err := wr.WriteMsg(&msg); err != nil {
s.Reset()
return nil, fmt.Errorf("error writing request to server: %w", err)
}
msg.Reset()
if err := rd.ReadMsg(&msg); err != nil {
s.Reset()
return nil, fmt.Errorf("error reading server response: %w", err)
}
peerlist := msg.GetPeerList()
if peerlist == nil {
s.Reset()
return nil, fmt.Errorf("bad server response: missing peer list")
}
result := make([]*ClientInfo, 0, len(peerlist.GetPeers()))
for _, pi := range peerlist.GetPeers() {
ci, err := peerInfoToClientInfo(pi)
if err != nil {
s.Reset()
return nil, fmt.Errorf("error parsing client info: %w", err)
}
result = append(result, ci)
}
s.SetDeadline(time.Time{})
return result, nil
}
func (c *Client) Connect(ci *ClientInfo) error {
// check for existing connections first
for _, conn := range c.host.Network().ConnsToPeer(ci.Info.ID) {
if !isRelayConn(conn) {
return nil
}
}
err := c.connectToBootstrappers()
if err != nil {
return fmt.Errorf("error connecting to bootstrappers: %w", err)
}
// let identify get our observed addresses before starting
time.Sleep(time.Second)
err = c.connectToPeer(ci)
c.tracer.Connect(ci, err)
return err
}
func (c *Client) connectToPeer(ci *ClientInfo) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
err := c.host.Connect(ctx, ci.Info)
if err != nil {
return fmt.Errorf("error establishing initial connection to peer: %w", err)
}
deadline := time.After(time.Minute)
poll:
for {
for _, conn := range c.host.Network().ConnsToPeer(ci.Info.ID) {
if !isRelayConn(conn) {
return nil
}
}
select {
case <-deadline:
break poll
case <-time.After(time.Second):
}
}
return fmt.Errorf("no direct connection to peer")
}
func (c *Client) connectToBootstrappers() error {
var pis []*peer.AddrInfo
if c.domain == "TCP" {
pis = bootstrappersTCP
} else {
pis = bootstrappersUDP
}
count := 0
for _, pi := range pis {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
err := c.host.Connect(ctx, *pi)
cancel()
if err != nil {
log.Warnf("error connecting to bootstrapper %s: %s", pi.ID, err)
} else {
c.host.ConnManager().Protect(pi.ID, "flare")
count++
}
}
if count < 4 {
return fmt.Errorf("could not connect to enough bootstrappers -- need 4, got %d", count)
}
return nil
}
func (c *Client) Background(wg *sync.WaitGroup) {
defer wg.Done()
natType, err := c.getNATType()
if err != nil {
log.Errorf("error determining NAT type: %s", err)
return
}
log.Infof("%s NAT Device Type is %s", c.domain, natType)
c.tracer.Announce(natType.String())
if natType == network.NATDeviceTypeSymmetric {
log.Errorf("%s NAT type is impenetrable; sorry", c.domain)
return
}
c.connectToRelay()
sleep := 15*time.Minute + time.Duration(rand.Int63n(int64(30*time.Minute)))
log.Infof("waiting for %s...", sleep)
time.Sleep(sleep)
for {
log.Infof("trying to connect to peers...")
peers, err := c.ListPeers()
if err != nil {
log.Warnf("error getting peers: %s", err)
time.Sleep(time.Minute)
continue
}
log.Infof("got %d peers", len(peers))
for _, ci := range peers {
err = c.Connect(ci)
if err != nil {
log.Infof("error connecting to %s [%s]: %s", ci.Info.ID, ci.Nick, err)
} else {
log.Infof("successfully connected to %s [%s]", ci.Info.ID, ci.Nick)
}
}
if len(peers) > 25 {
sleep = 2*time.Hour + time.Duration(rand.Int63n(int64(4*time.Hour)))
} else if len(peers) > 10 {
sleep = time.Hour + time.Duration(rand.Int63n(int64(2*time.Hour)))
} else {
sleep = 30*time.Minute + time.Duration(rand.Int63n(int64(time.Hour)))
}
log.Infof("waiting for %s...", sleep)
time.Sleep(sleep)
}
}
func (c *Client) getNATType() (network.NATDeviceType, error) {
sub, err := c.host.EventBus().Subscribe(new(event.EvtNATDeviceTypeChanged))
if err != nil {
return 0, err
}
defer sub.Close()
err = c.connectToBootstrappers()
if err != nil {
return 0, err
}
for {
select {
case evt := <-sub.Out():
e := evt.(event.EvtNATDeviceTypeChanged)
switch c.domain {
case "TCP":
if e.TransportProtocol == network.NATTransportTCP {
return e.NatDeviceType, nil
}
case "UDP":
if e.TransportProtocol == network.NATTransportUDP {
return e.NatDeviceType, nil
}
}
case <-time.After(time.Minute):
return 0, fmt.Errorf("timed out waiting for NAT type determination")
}
}
}
func (c *Client) connectToRelay() | {
// connect to relay and reserve slot
var rsvp *circuit.Reservation
var err error
for rsvp == nil {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
err = c.host.Connect(ctx, *c.relay)
cancel()
if err != nil {
log.Warnf("error connecting to relay: %s; will retry in 1min", err)
time.Sleep(time.Minute)
continue
}
ctx, cancel = context.WithTimeout(context.Background(), time.Minute)
rsvp, err = circuit.Reserve(ctx, c.host, *c.relay)
cancel()
if err != nil { | identifier_body |
|
client.go | 4/147.75.94.115/udp/4001/quic/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
"/ip4/147.75.109.213/udp/4001/quic/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
"/ip4/147.75.109.29/udp/4001/quic/p2p/QmZa1sAxajnQjVM8WjWXoMbmPd7NsWhfKsPkErzpm9wGkp",
}
var bootstrappersTCP []*peer.AddrInfo
var bootstrappersUDP []*peer.AddrInfo
func init() {
for _, a := range bootstrappersTCPString {
pi, err := parseAddrInfo(a)
if err != nil {
panic(err)
}
bootstrappersTCP = append(bootstrappersTCP, pi)
}
for _, a := range bootstrappersUDPString {
pi, err := parseAddrInfo(a)
if err != nil {
panic(err)
}
bootstrappersUDP = append(bootstrappersUDP, pi)
}
}
type Client struct {
host host.Host
tracer *Tracer
cfg *Config
domain string
nick string
server *peer.AddrInfo
relay *peer.AddrInfo
}
type ClientInfo struct {
Nick string
Info peer.AddrInfo
}
func NewClient(h host.Host, tracer *Tracer, cfg *Config, domain, nick string) (*Client, error) {
var relay, server *peer.AddrInfo
var err error
if domain == "TCP" {
relay, err = parseAddrInfo(cfg.RelayAddrTCP)
if err != nil {
return nil, err
}
server, err = parseAddrInfo(cfg.ServerAddrTCP)
if err != nil {
return nil, err
}
} else {
relay, err = parseAddrInfo(cfg.RelayAddrUDP)
if err != nil {
return nil, err
}
server, err = parseAddrInfo(cfg.ServerAddrUDP)
if err != nil {
return nil, err
}
}
return &Client{
host: h,
tracer: tracer,
cfg: cfg,
domain: domain,
nick: nick,
relay: relay,
server: server,
}, nil
}
func (c *Client) Domain() string {
return c.domain
}
func (c *Client) ID() peer.ID {
return c.host.ID()
}
func (c *Client) Addrs() []ma.Multiaddr {
return c.host.Addrs()
}
func (c *Client) ListPeers() ([]*ClientInfo, error) {
s, err := c.connectToServer()
if err != nil {
return nil, fmt.Errorf("error connecting to flare server: %w", err)
}
defer s.Close()
return c.getPeers(s)
}
func (c *Client) getPeers(s network.Stream) ([]*ClientInfo, error) {
s.SetDeadline(time.Now().Add(time.Minute))
var msg pb.FlareMessage
wr := protoio.NewDelimitedWriter(s)
rd := protoio.NewDelimitedReader(s, 1<<20)
msg.Type = pb.FlareMessage_GETPEERS.Enum()
msg.GetPeers = &pb.GetPeers{Domain: &c.domain}
if err := wr.WriteMsg(&msg); err != nil {
s.Reset()
return nil, fmt.Errorf("error writing request to server: %w", err)
}
msg.Reset()
if err := rd.ReadMsg(&msg); err != nil {
s.Reset()
return nil, fmt.Errorf("error reading server response: %w", err)
}
peerlist := msg.GetPeerList()
if peerlist == nil {
s.Reset()
return nil, fmt.Errorf("bad server response: missing peer list")
}
result := make([]*ClientInfo, 0, len(peerlist.GetPeers()))
for _, pi := range peerlist.GetPeers() {
ci, err := peerInfoToClientInfo(pi)
if err != nil {
s.Reset()
return nil, fmt.Errorf("error parsing client info: %w", err)
}
result = append(result, ci)
}
s.SetDeadline(time.Time{})
return result, nil
}
func (c *Client) | (ci *ClientInfo) error {
// check for existing connections first
for _, conn := range c.host.Network().ConnsToPeer(ci.Info.ID) {
if !isRelayConn(conn) {
return nil
}
}
err := c.connectToBootstrappers()
if err != nil {
return fmt.Errorf("error connecting to bootstrappers: %w", err)
}
// let identify get our observed addresses before starting
time.Sleep(time.Second)
err = c.connectToPeer(ci)
c.tracer.Connect(ci, err)
return err
}
func (c *Client) connectToPeer(ci *ClientInfo) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
err := c.host.Connect(ctx, ci.Info)
if err != nil {
return fmt.Errorf("error establishing initial connection to peer: %w", err)
}
deadline := time.After(time.Minute)
poll:
for {
for _, conn := range c.host.Network().ConnsToPeer(ci.Info.ID) {
if !isRelayConn(conn) {
return nil
}
}
select {
case <-deadline:
break poll
case <-time.After(time.Second):
}
}
return fmt.Errorf("no direct connection to peer")
}
func (c *Client) connectToBootstrappers() error {
var pis []*peer.AddrInfo
if c.domain == "TCP" {
pis = bootstrappersTCP
} else {
pis = bootstrappersUDP
}
count := 0
for _, pi := range pis {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
err := c.host.Connect(ctx, *pi)
cancel()
if err != nil {
log.Warnf("error connecting to bootstrapper %s: %s", pi.ID, err)
} else {
c.host.ConnManager().Protect(pi.ID, "flare")
count++
}
}
if count < 4 {
return fmt.Errorf("could not connect to enough bootstrappers -- need 4, got %d", count)
}
return nil
}
func (c *Client) Background(wg *sync.WaitGroup) {
defer wg.Done()
natType, err := c.getNATType()
if err != nil {
log.Errorf("error determining NAT type: %s", err)
return
}
log.Infof("%s NAT Device Type is %s", c.domain, natType)
c.tracer.Announce(natType.String())
if natType == network.NATDeviceTypeSymmetric {
log.Errorf("%s NAT type is impenetrable; sorry", c.domain)
return
}
c.connectToRelay()
sleep := 15*time.Minute + time.Duration(rand.Int63n(int64(30*time.Minute)))
log.Infof("waiting for %s...", sleep)
time.Sleep(sleep)
for {
log.Infof("trying to connect to peers...")
peers, err := c.ListPeers()
if err != nil {
log.Warnf("error getting peers: %s", err)
time.Sleep(time.Minute)
continue
}
log.Infof("got %d peers", len(peers))
for _, ci := range peers {
err = c.Connect(ci)
if err != nil {
log.Infof("error connecting to %s [%s]: %s", ci.Info.ID, ci.Nick, err)
} else {
log.Infof("successfully connected to %s [%s]", ci.Info.ID, ci.Nick)
}
}
if len(peers) > 25 {
sleep = 2*time.Hour + time.Duration(rand.Int63n(int64(4*time.Hour)))
} else if len(peers) > 10 {
sleep = time.Hour + time.Duration(rand.Int63n(int64(2*time.Hour)))
} else {
sleep = 30*time.Minute + time.Duration(rand.Int63n(int64(time.Hour)))
}
log.Infof("waiting for %s...", sleep)
time.Sleep(sleep)
}
}
func (c *Client) getNATType() (network.NATDeviceType, error) {
sub, err := c.host.EventBus().Subscribe(new(event.EvtNATDeviceTypeChanged))
if err != nil {
return 0, err
}
defer sub.Close()
err = c.connectToBootstrappers()
if err != nil {
return 0, err
}
for {
select {
case evt := <-sub.Out():
e := evt.(event.EvtNATDeviceTypeChanged)
switch c.domain {
case "TCP":
if e.TransportProtocol | Connect | identifier_name |
initializer.rs | Storage},
Component,
},
effect::{
announcements::{
ChainspecLoaderAnnouncement, ContractRuntimeAnnouncement, ControlAnnouncement,
},
requests::{
ConsensusRequest, ContractRuntimeRequest, LinearChainRequest, NetworkRequest,
RestRequest, StateStoreRequest, StorageRequest,
},
EffectBuilder, Effects,
},
protocol::Message,
reactor::{self, participating, EventQueueHandle, ReactorExit},
types::{chainspec, NodeId},
utils::WithDir,
NodeRng,
};
/// Top-level event for the reactor.
#[derive(Debug, From, Serialize)]
#[must_use]
pub enum Event {
/// Chainspec handler event.
#[from]
Chainspec(chainspec_loader::Event),
/// Storage event.
#[from]
Storage(#[serde(skip_serializing)] storage::Event),
/// Contract runtime event.
#[from]
ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event),
/// Request for state storage.
#[from]
StateStoreRequest(StateStoreRequest),
/// Control announcement
#[from]
ControlAnnouncement(ControlAnnouncement),
}
impl ReactorEvent for Event {
fn as_control(&self) -> Option<&ControlAnnouncement> {
if let Self::ControlAnnouncement(ref ctrl_ann) = self {
Some(ctrl_ann)
} else {
None
}
}
}
impl From<StorageRequest> for Event {
fn from(request: StorageRequest) -> Self {
Event::Storage(storage::Event::StorageRequest(request))
}
}
impl From<ContractRuntimeRequest> for Event {
fn from(request: ContractRuntimeRequest) -> Self {
Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request)))
}
}
impl From<NetworkRequest<NodeId, Message>> for Event {
fn from(_request: NetworkRequest<NodeId, Message>) -> Self {
unreachable!("no network traffic happens during initialization")
}
}
impl From<ChainspecLoaderAnnouncement> for Event {
fn from(_announcement: ChainspecLoaderAnnouncement) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<LinearChainRequest<NodeId>> for Event {
fn from(_req: LinearChainRequest<NodeId>) -> Self {
unreachable!("no linear chain events happen during initialization")
}
}
impl From<NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>> for Event {
fn from(_request: NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>) -> Self {
unreachable!("no gossiper events happen during initialization")
}
}
impl From<ConsensusRequest> for Event {
fn from(_request: ConsensusRequest) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<RestRequest<NodeId>> for Event {
fn from(_request: RestRequest<NodeId>) -> Self {
unreachable!("no rest requests happen during initialization")
}
}
impl From<ContractRuntimeAnnouncement> for Event {
fn from(_request: ContractRuntimeAnnouncement) -> Self {
unreachable!("no block executor requests happen during initialization")
}
}
impl Display for Event {
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
Event::Chainspec(event) => write!(formatter, "chainspec: {}", event),
Event::Storage(event) => write!(formatter, "storage: {}", event),
Event::ContractRuntime(event) => write!(formatter, "contract runtime: {:?}", event),
Event::StateStoreRequest(request) => {
write!(formatter, "state store request: {}", request)
}
Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann),
}
}
}
/// Error type returned by the initializer reactor.
#[derive(Debug, Error)]
pub enum Error {
/// `Config` error.
#[error("config error: {0}")]
ConfigError(String),
/// Metrics-related error
#[error("prometheus (metrics) error: {0}")]
Metrics(#[from] prometheus::Error),
/// `ChainspecHandler` component error.
#[error("chainspec error: {0}")]
Chainspec(#[from] chainspec::Error),
/// `Storage` component error.
#[error("storage error: {0}")]
Storage(#[from] storage::Error),
/// `ContractRuntime` component error.
#[error("contract runtime config error: {0}")]
ContractRuntime(#[from] contract_runtime::ConfigError),
/// An error that occurred when creating a `SmallNetworkIdentity`.
#[error(transparent)]
SmallNetworkIdentityError(#[from] SmallNetworkIdentityError),
}
/// Initializer node reactor.
#[derive(DataSize, Debug)]
pub struct Reactor {
pub(super) config: WithDir<participating::Config>,
pub(super) chainspec_loader: ChainspecLoader,
pub(super) storage: Storage,
pub(super) contract_runtime: ContractRuntime,
pub(super) small_network_identity: SmallNetworkIdentity,
#[data_size(skip)]
pub(super) network_identity: NetworkIdentity,
}
impl Reactor {
fn new_with_chainspec_loader(
(crashed, config): <Self as reactor::Reactor>::Config,
registry: &Registry,
chainspec_loader: ChainspecLoader,
chainspec_effects: Effects<chainspec_loader::Event>,
) -> Result<(Self, Effects<Event>), Error> {
let hard_reset_to_start_of_era = chainspec_loader.hard_reset_to_start_of_era();
let storage_config = config.map_ref(|cfg| cfg.storage.clone());
let storage = Storage::new(
&storage_config,
hard_reset_to_start_of_era,
chainspec_loader.chainspec().protocol_config.version,
crashed,
)?;
let contract_runtime = ContractRuntime::new(
chainspec_loader.initial_state_root_hash(),
chainspec_loader.initial_block_header(),
chainspec_loader.chainspec().protocol_config.version,
storage_config,
&config.value().contract_runtime,
registry,
)?;
// TODO: This integrity check is misplaced, it should be part of the components
// `handle_event` function. Ideally it would be in the constructor, but since a query to
// storage needs to be made, this is not possible.
//
// Refactoring this has been postponed for now, since it is unclear whether time-consuming
// integrity checks are even a good idea, as they can block the node for one or more hours
// on restarts (online checks are an alternative).
if crashed {
info!("running trie-store integrity check, this may take a while");
if let Some(state_roots) = storage.get_state_root_hashes_for_trie_check() {
let missing_trie_keys = contract_runtime.trie_store_check(state_roots.clone());
if !missing_trie_keys.is_empty() {
panic!(
"Fatal error! Trie-Key store is not empty.\n {:?}\n \
Wipe the DB to ensure operations.\n Present state_roots: {:?}",
missing_trie_keys, state_roots
)
}
}
}
let effects = reactor::wrap_effects(Event::Chainspec, chainspec_effects);
let small_network_identity = SmallNetworkIdentity::new()?;
let network_identity = NetworkIdentity::new();
let reactor = Reactor {
config,
chainspec_loader,
storage,
contract_runtime,
small_network_identity,
network_identity,
};
Ok((reactor, effects))
}
}
#[cfg(test)]
impl Reactor {
/// Inspect storage.
pub fn storage(&self) -> &Storage {
&self.storage
}
}
impl reactor::Reactor for Reactor {
type Event = Event;
type Config = (bool, WithDir<participating::Config>);
type Error = Error;
fn new(
config: Self::Config,
registry: &Registry,
event_queue: EventQueueHandle<Self::Event>,
_rng: &mut NodeRng,
) -> Result<(Self, Effects<Self::Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
// Construct the `ChainspecLoader` first so we fail fast if the chainspec is invalid.
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new(config.1.dir(), effect_builder)?;
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
fn dispatch_event(
&mut self,
effect_builder: EffectBuilder<Self::Event>,
rng: &mut NodeRng,
event: Event,
) -> Effects<Self::Event> {
match event {
Event::Chainspec(event) => reactor::wrap_effects(
Event::Chainspec,
self.chainspec_loader
.handle_event(effect_builder, rng, event),
),
Event::Storage(event) => reactor::wrap_effects(
Event::Storage,
self.storage.handle_event(effect_builder, rng, event),
),
Event::ContractRuntime(event) => reactor::wrap_effects(
Event::ContractRuntime,
self.contract_runtime
.handle_event(effect_builder, rng, event),
),
Event::StateStoreRequest(request) => {
self.dispatch_event(effect_builder, rng, Event::Storage(request.into()))
}
Event::ControlAnnouncement(_) => unreachable!("unhandled control announcement"),
}
}
fn | (&self) -> Option<ReactorExit> | maybe_exit | identifier_name |
initializer.rs | {
None
}
}
}
impl From<StorageRequest> for Event {
fn from(request: StorageRequest) -> Self {
Event::Storage(storage::Event::StorageRequest(request))
}
}
impl From<ContractRuntimeRequest> for Event {
fn from(request: ContractRuntimeRequest) -> Self {
Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request)))
}
}
impl From<NetworkRequest<NodeId, Message>> for Event {
fn from(_request: NetworkRequest<NodeId, Message>) -> Self {
unreachable!("no network traffic happens during initialization")
}
}
impl From<ChainspecLoaderAnnouncement> for Event {
fn from(_announcement: ChainspecLoaderAnnouncement) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<LinearChainRequest<NodeId>> for Event {
fn from(_req: LinearChainRequest<NodeId>) -> Self {
unreachable!("no linear chain events happen during initialization")
}
}
impl From<NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>> for Event {
fn from(_request: NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>) -> Self {
unreachable!("no gossiper events happen during initialization")
}
}
impl From<ConsensusRequest> for Event {
fn from(_request: ConsensusRequest) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<RestRequest<NodeId>> for Event {
fn from(_request: RestRequest<NodeId>) -> Self {
unreachable!("no rest requests happen during initialization")
}
}
impl From<ContractRuntimeAnnouncement> for Event {
fn from(_request: ContractRuntimeAnnouncement) -> Self {
unreachable!("no block executor requests happen during initialization")
}
}
impl Display for Event {
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
Event::Chainspec(event) => write!(formatter, "chainspec: {}", event),
Event::Storage(event) => write!(formatter, "storage: {}", event),
Event::ContractRuntime(event) => write!(formatter, "contract runtime: {:?}", event),
Event::StateStoreRequest(request) => {
write!(formatter, "state store request: {}", request)
}
Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann),
}
}
}
/// Error type returned by the initializer reactor.
#[derive(Debug, Error)]
pub enum Error {
/// `Config` error.
#[error("config error: {0}")]
ConfigError(String),
/// Metrics-related error
#[error("prometheus (metrics) error: {0}")]
Metrics(#[from] prometheus::Error),
/// `ChainspecHandler` component error.
#[error("chainspec error: {0}")]
Chainspec(#[from] chainspec::Error),
/// `Storage` component error.
#[error("storage error: {0}")]
Storage(#[from] storage::Error),
/// `ContractRuntime` component error.
#[error("contract runtime config error: {0}")]
ContractRuntime(#[from] contract_runtime::ConfigError),
/// An error that occurred when creating a `SmallNetworkIdentity`.
#[error(transparent)]
SmallNetworkIdentityError(#[from] SmallNetworkIdentityError),
}
/// Initializer node reactor.
#[derive(DataSize, Debug)]
pub struct Reactor {
pub(super) config: WithDir<participating::Config>,
pub(super) chainspec_loader: ChainspecLoader,
pub(super) storage: Storage,
pub(super) contract_runtime: ContractRuntime,
pub(super) small_network_identity: SmallNetworkIdentity,
#[data_size(skip)]
pub(super) network_identity: NetworkIdentity,
}
impl Reactor {
fn new_with_chainspec_loader(
(crashed, config): <Self as reactor::Reactor>::Config,
registry: &Registry,
chainspec_loader: ChainspecLoader,
chainspec_effects: Effects<chainspec_loader::Event>,
) -> Result<(Self, Effects<Event>), Error> {
let hard_reset_to_start_of_era = chainspec_loader.hard_reset_to_start_of_era();
let storage_config = config.map_ref(|cfg| cfg.storage.clone());
let storage = Storage::new(
&storage_config,
hard_reset_to_start_of_era,
chainspec_loader.chainspec().protocol_config.version,
crashed,
)?;
let contract_runtime = ContractRuntime::new(
chainspec_loader.initial_state_root_hash(),
chainspec_loader.initial_block_header(),
chainspec_loader.chainspec().protocol_config.version,
storage_config,
&config.value().contract_runtime,
registry,
)?;
// TODO: This integrity check is misplaced, it should be part of the components
// `handle_event` function. Ideally it would be in the constructor, but since a query to
// storage needs to be made, this is not possible.
//
// Refactoring this has been postponed for now, since it is unclear whether time-consuming
// integrity checks are even a good idea, as they can block the node for one or more hours
// on restarts (online checks are an alternative).
if crashed {
info!("running trie-store integrity check, this may take a while");
if let Some(state_roots) = storage.get_state_root_hashes_for_trie_check() {
let missing_trie_keys = contract_runtime.trie_store_check(state_roots.clone());
if !missing_trie_keys.is_empty() {
panic!(
"Fatal error! Trie-Key store is not empty.\n {:?}\n \
Wipe the DB to ensure operations.\n Present state_roots: {:?}",
missing_trie_keys, state_roots
)
}
}
}
let effects = reactor::wrap_effects(Event::Chainspec, chainspec_effects);
let small_network_identity = SmallNetworkIdentity::new()?;
let network_identity = NetworkIdentity::new();
let reactor = Reactor {
config,
chainspec_loader,
storage,
contract_runtime,
small_network_identity,
network_identity,
};
Ok((reactor, effects))
}
}
#[cfg(test)]
impl Reactor {
/// Inspect storage.
pub fn storage(&self) -> &Storage {
&self.storage
}
}
impl reactor::Reactor for Reactor {
type Event = Event;
type Config = (bool, WithDir<participating::Config>);
type Error = Error;
fn new(
config: Self::Config,
registry: &Registry,
event_queue: EventQueueHandle<Self::Event>,
_rng: &mut NodeRng,
) -> Result<(Self, Effects<Self::Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
// Construct the `ChainspecLoader` first so we fail fast if the chainspec is invalid.
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new(config.1.dir(), effect_builder)?;
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
fn dispatch_event(
&mut self,
effect_builder: EffectBuilder<Self::Event>,
rng: &mut NodeRng,
event: Event,
) -> Effects<Self::Event> {
match event {
Event::Chainspec(event) => reactor::wrap_effects(
Event::Chainspec,
self.chainspec_loader
.handle_event(effect_builder, rng, event),
),
Event::Storage(event) => reactor::wrap_effects(
Event::Storage,
self.storage.handle_event(effect_builder, rng, event),
),
Event::ContractRuntime(event) => reactor::wrap_effects(
Event::ContractRuntime,
self.contract_runtime
.handle_event(effect_builder, rng, event),
),
Event::StateStoreRequest(request) => {
self.dispatch_event(effect_builder, rng, Event::Storage(request.into()))
}
Event::ControlAnnouncement(_) => unreachable!("unhandled control announcement"),
}
}
fn maybe_exit(&self) -> Option<ReactorExit> {
self.chainspec_loader.reactor_exit()
}
}
#[cfg(test)]
pub mod test {
use super::*;
use crate::{
components::network::ENABLE_LIBP2P_NET_ENV_VAR, testing::network::NetworkedReactor,
types::Chainspec,
};
use std::{env, sync::Arc};
impl Reactor {
pub(crate) fn new_with_chainspec(
config: <Self as reactor::Reactor>::Config,
registry: &Registry,
event_queue: EventQueueHandle<Event>,
chainspec: Arc<Chainspec>,
) -> Result<(Self, Effects<Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new_with_chainspec(chainspec, effect_builder);
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
}
impl NetworkedReactor for Reactor {
type NodeId = NodeId;
fn node_id(&self) -> Self::NodeId {
if env::var(ENABLE_LIBP2P_NET_ENV_VAR).is_err() {
NodeId::from(&self.small_network_identity)
} else { | NodeId::from(&self.network_identity)
}
}
}
} | random_line_split |
|
initializer.rs | },
Component,
},
effect::{
announcements::{
ChainspecLoaderAnnouncement, ContractRuntimeAnnouncement, ControlAnnouncement,
},
requests::{
ConsensusRequest, ContractRuntimeRequest, LinearChainRequest, NetworkRequest,
RestRequest, StateStoreRequest, StorageRequest,
},
EffectBuilder, Effects,
},
protocol::Message,
reactor::{self, participating, EventQueueHandle, ReactorExit},
types::{chainspec, NodeId},
utils::WithDir,
NodeRng,
};
/// Top-level event for the reactor.
#[derive(Debug, From, Serialize)]
#[must_use]
pub enum Event {
/// Chainspec handler event.
#[from]
Chainspec(chainspec_loader::Event),
/// Storage event.
#[from]
Storage(#[serde(skip_serializing)] storage::Event),
/// Contract runtime event.
#[from]
ContractRuntime(#[serde(skip_serializing)] contract_runtime::Event),
/// Request for state storage.
#[from]
StateStoreRequest(StateStoreRequest),
/// Control announcement
#[from]
ControlAnnouncement(ControlAnnouncement),
}
impl ReactorEvent for Event {
fn as_control(&self) -> Option<&ControlAnnouncement> {
if let Self::ControlAnnouncement(ref ctrl_ann) = self {
Some(ctrl_ann)
} else {
None
}
}
}
impl From<StorageRequest> for Event {
fn from(request: StorageRequest) -> Self {
Event::Storage(storage::Event::StorageRequest(request))
}
}
impl From<ContractRuntimeRequest> for Event {
fn from(request: ContractRuntimeRequest) -> Self {
Event::ContractRuntime(contract_runtime::Event::Request(Box::new(request)))
}
}
impl From<NetworkRequest<NodeId, Message>> for Event {
fn from(_request: NetworkRequest<NodeId, Message>) -> Self {
unreachable!("no network traffic happens during initialization")
}
}
impl From<ChainspecLoaderAnnouncement> for Event {
fn from(_announcement: ChainspecLoaderAnnouncement) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<LinearChainRequest<NodeId>> for Event {
fn from(_req: LinearChainRequest<NodeId>) -> Self |
}
impl From<NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>> for Event {
fn from(_request: NetworkRequest<NodeId, gossiper::Message<GossipedAddress>>) -> Self {
unreachable!("no gossiper events happen during initialization")
}
}
impl From<ConsensusRequest> for Event {
fn from(_request: ConsensusRequest) -> Self {
unreachable!("no chainspec announcements happen during initialization")
}
}
impl From<RestRequest<NodeId>> for Event {
fn from(_request: RestRequest<NodeId>) -> Self {
unreachable!("no rest requests happen during initialization")
}
}
impl From<ContractRuntimeAnnouncement> for Event {
fn from(_request: ContractRuntimeAnnouncement) -> Self {
unreachable!("no block executor requests happen during initialization")
}
}
impl Display for Event {
fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
match self {
Event::Chainspec(event) => write!(formatter, "chainspec: {}", event),
Event::Storage(event) => write!(formatter, "storage: {}", event),
Event::ContractRuntime(event) => write!(formatter, "contract runtime: {:?}", event),
Event::StateStoreRequest(request) => {
write!(formatter, "state store request: {}", request)
}
Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann),
}
}
}
/// Error type returned by the initializer reactor.
#[derive(Debug, Error)]
pub enum Error {
/// `Config` error.
#[error("config error: {0}")]
ConfigError(String),
/// Metrics-related error
#[error("prometheus (metrics) error: {0}")]
Metrics(#[from] prometheus::Error),
/// `ChainspecHandler` component error.
#[error("chainspec error: {0}")]
Chainspec(#[from] chainspec::Error),
/// `Storage` component error.
#[error("storage error: {0}")]
Storage(#[from] storage::Error),
/// `ContractRuntime` component error.
#[error("contract runtime config error: {0}")]
ContractRuntime(#[from] contract_runtime::ConfigError),
/// An error that occurred when creating a `SmallNetworkIdentity`.
#[error(transparent)]
SmallNetworkIdentityError(#[from] SmallNetworkIdentityError),
}
/// Initializer node reactor.
#[derive(DataSize, Debug)]
pub struct Reactor {
pub(super) config: WithDir<participating::Config>,
pub(super) chainspec_loader: ChainspecLoader,
pub(super) storage: Storage,
pub(super) contract_runtime: ContractRuntime,
pub(super) small_network_identity: SmallNetworkIdentity,
#[data_size(skip)]
pub(super) network_identity: NetworkIdentity,
}
impl Reactor {
fn new_with_chainspec_loader(
(crashed, config): <Self as reactor::Reactor>::Config,
registry: &Registry,
chainspec_loader: ChainspecLoader,
chainspec_effects: Effects<chainspec_loader::Event>,
) -> Result<(Self, Effects<Event>), Error> {
let hard_reset_to_start_of_era = chainspec_loader.hard_reset_to_start_of_era();
let storage_config = config.map_ref(|cfg| cfg.storage.clone());
let storage = Storage::new(
&storage_config,
hard_reset_to_start_of_era,
chainspec_loader.chainspec().protocol_config.version,
crashed,
)?;
let contract_runtime = ContractRuntime::new(
chainspec_loader.initial_state_root_hash(),
chainspec_loader.initial_block_header(),
chainspec_loader.chainspec().protocol_config.version,
storage_config,
&config.value().contract_runtime,
registry,
)?;
// TODO: This integrity check is misplaced, it should be part of the components
// `handle_event` function. Ideally it would be in the constructor, but since a query to
// storage needs to be made, this is not possible.
//
// Refactoring this has been postponed for now, since it is unclear whether time-consuming
// integrity checks are even a good idea, as they can block the node for one or more hours
// on restarts (online checks are an alternative).
if crashed {
info!("running trie-store integrity check, this may take a while");
if let Some(state_roots) = storage.get_state_root_hashes_for_trie_check() {
let missing_trie_keys = contract_runtime.trie_store_check(state_roots.clone());
if !missing_trie_keys.is_empty() {
panic!(
"Fatal error! Trie-Key store is not empty.\n {:?}\n \
Wipe the DB to ensure operations.\n Present state_roots: {:?}",
missing_trie_keys, state_roots
)
}
}
}
let effects = reactor::wrap_effects(Event::Chainspec, chainspec_effects);
let small_network_identity = SmallNetworkIdentity::new()?;
let network_identity = NetworkIdentity::new();
let reactor = Reactor {
config,
chainspec_loader,
storage,
contract_runtime,
small_network_identity,
network_identity,
};
Ok((reactor, effects))
}
}
#[cfg(test)]
impl Reactor {
/// Inspect storage.
pub fn storage(&self) -> &Storage {
&self.storage
}
}
impl reactor::Reactor for Reactor {
type Event = Event;
type Config = (bool, WithDir<participating::Config>);
type Error = Error;
fn new(
config: Self::Config,
registry: &Registry,
event_queue: EventQueueHandle<Self::Event>,
_rng: &mut NodeRng,
) -> Result<(Self, Effects<Self::Event>), Error> {
let effect_builder = EffectBuilder::new(event_queue);
// Construct the `ChainspecLoader` first so we fail fast if the chainspec is invalid.
let (chainspec_loader, chainspec_effects) =
ChainspecLoader::new(config.1.dir(), effect_builder)?;
Self::new_with_chainspec_loader(config, registry, chainspec_loader, chainspec_effects)
}
fn dispatch_event(
&mut self,
effect_builder: EffectBuilder<Self::Event>,
rng: &mut NodeRng,
event: Event,
) -> Effects<Self::Event> {
match event {
Event::Chainspec(event) => reactor::wrap_effects(
Event::Chainspec,
self.chainspec_loader
.handle_event(effect_builder, rng, event),
),
Event::Storage(event) => reactor::wrap_effects(
Event::Storage,
self.storage.handle_event(effect_builder, rng, event),
),
Event::ContractRuntime(event) => reactor::wrap_effects(
Event::ContractRuntime,
self.contract_runtime
.handle_event(effect_builder, rng, event),
),
Event::StateStoreRequest(request) => {
self.dispatch_event(effect_builder, rng, Event::Storage(request.into()))
}
Event::ControlAnnouncement(_) => unreachable!("unhandled control announcement"),
}
}
fn maybe_exit(&self) -> Option<ReactorExit> | {
unreachable!("no linear chain events happen during initialization")
} | identifier_body |
getMultiTractTemplate.py | coaddName = pexConfig.Field(
doc="coadd name: typically one of 'deep', 'goodSeeing', or 'dcr'",
dtype=str,
default="deep",
)
warpType = pexConfig.Field(
doc="Warp type of the coadd template: one of 'direct' or 'psfMatched'",
dtype=str,
default="direct",
)
coaddPsf = pexConfig.ConfigField(
doc="Configuration for CoaddPsf",
dtype=CoaddPsfConfig,
)
warp = pexConfig.ConfigField(
dtype=afwMath.Warper.ConfigClass,
doc="warper configuration",
)
statistic = pexConfig.Field(
dtype=str,
doc="How to combine tracts that overlap",
default="MEAN",
)
class GetCoaddAsMultiTractTemplateTask(pipeBase.Task):
"""Subtask to retrieve coadd from possibly different tracts and
use as an image difference template. It uses the tract closest to the
central point of the ccd as the reference tract. All other tracts will
be warped onto the reference task.
The PSF of the resulting template will be a CoaddPSF of individual CoaddPSFs.
"""
ConfigClass = GetCoaddAsMultiTractTemplateConfig
_DefaultName = "GetCoaddAsMultiTractTemplateTask"
def __init__(self, *args, **kwargs):
pipeBase.Task.__init__(self, *args, **kwargs)
self.warper = afwMath.Warper.fromConfig(self.config.warp)
def run(self, exposure, sensorRef, templateIdList=None):
"""Retrieve and mosaic a template coadd that overlaps the exposure where
the template spans multiple tracts.
The resulting template image will be an average of all the input templates from
the separate tracts.
The PSF on the template is created by combining the CoaddPsf on each template image
into a meta-CoaddPsf.
Parameters
----------
exposure: `lsst.afw.image.Exposure`
an exposure for which to generate an overlapping template
sensorRef : TYPE
a Butler data reference that can be used to obtain coadd data
templateIdList : TYPE, optional
list of data ids (unused)
Returns
-------
result : `struct`
return a pipeBase.Struct:
- ``exposure`` : a template coadd exposure assembled out of patches
- ``sources`` : None for this subtask
"""
# Table for CoaddPSF
tractsSchema = afwTable.ExposureTable.makeMinimalSchema()
tractKey = tractsSchema.addField('tract', type=np.int32, doc='Which tract')
patchKey = tractsSchema.addField('patch', type=np.int32, doc='Which patch')
weightKey = tractsSchema.addField('weight', type=float, doc='Weight for each tract, should be 1')
tractsCatalog = afwTable.ExposureCatalog(tractsSchema)
skyMap = sensorRef.get(datasetType=self.config.coaddName + "Coadd_skyMap")
expWcs = exposure.getWcs()
expBoxD = geom.Box2D(exposure.getBBox())
expBoxD.grow(self.config.templateBorderSize)
ctrSkyPos = expWcs.pixelToSky(expBoxD.getCenter())
centralTractInfo = skyMap.findTract(ctrSkyPos)
if not centralTractInfo:
|
self.log.info("Central skyMap tract %s" % (centralTractInfo.getId(),))
skyCorners = [expWcs.pixelToSky(pixPos) for pixPos in expBoxD.getCorners()]
tractPatchList = skyMap.findTractPatchList(skyCorners)
if not tractPatchList:
raise RuntimeError("No suitable tract found")
self.log.info("All overlapping skyMap tracts %s" % ([a[0].getId() for a in tractPatchList]))
# Move central tract to front of the list and use as the reference
tracts = [tract[0].getId() for tract in tractPatchList]
centralIndex = tracts.index(centralTractInfo.getId())
tracts.insert(0, tracts.pop(centralIndex))
tractPatchList.insert(0, tractPatchList.pop(centralIndex))
coaddPsf = None
coaddFilter = None
nPatchesFound = 0
maskedImageList = []
weightList = []
for itract,tract in enumerate(tracts):
tractInfo = tractPatchList[itract][0]
coaddWcs = tractInfo.getWcs()
coaddBBox = geom.Box2D()
for skyPos in skyCorners:
coaddBBox.include(coaddWcs.skyToPixel(skyPos))
coaddBBox = geom.Box2I(coaddBBox)
if itract == 0:
# Define final wcs and bounding box from the reference tract
finalWcs = coaddWcs
finalBBox = coaddBBox
patchList = tractPatchList[itract][1]
for patchInfo in patchList:
self.log.info('Adding patch %s from tract %s' % (patchInfo.getIndex(),tract))
# Local patch information
patchSubBBox = geom.Box2I(patchInfo.getInnerBBox())
patchSubBBox.clip(coaddBBox)
patchInt = int(f"{patchInfo.getIndex()[0]}{patchInfo.getIndex()[1]}")
innerBBox = geom.Box2I(tractInfo._minimumBoundingBox(finalWcs))
if itract == 0:
# clip to image and tract boundaries
patchSubBBox.clip(finalBBox)
patchSubBBox.clip(innerBBox)
if patchSubBBox.getArea() == 0:
self.log.debug("No ovlerap for patch %s" % patchInfo)
continue
patchArgDict = dict(
datasetType="deepCoadd_sub",
bbox=patchSubBBox,
tract=tractInfo.getId(),
patch="%s,%s" % (patchInfo.getIndex()[0], patchInfo.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = sensorRef.get(**patchArgDict)
if coaddFilter is None:
coaddFilter = coaddPatch.getFilter()
# create full image from final bounding box
exp = afwImage.ExposureF(finalBBox, finalWcs)
exp.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
exp.maskedImage.assign(coaddPatch.maskedImage, patchSubBBox)
maskedImageList.append(exp.maskedImage)
weightList.append(1)
record = tractsCatalog.addNew()
record.setPsf(coaddPatch.getPsf())
record.setWcs(coaddPatch.getWcs())
record.setPhotoCalib(coaddPatch.getPhotoCalib())
record.setBBox(patchSubBBox)
record.set(tractKey, tract)
record.set(patchKey, patchInt)
record.set(weightKey, 1.)
nPatchesFound += 1
else:
# compute the exposure bounding box in a tract that is not the reference tract
localBox = geom.Box2I()
for skyPos in skyCorners:
localBox.include(geom.Point2I(tractInfo.getWcs().skyToPixel(skyPos)))
# clip to patch bounding box
localBox.clip(patchSubBBox)
# grow border to deal with warping at edges
localBox.grow(self.config.templateBorderSize)
# clip to tract inner bounding box
localInnerBBox = geom.Box2I(tractInfo._minimumBoundingBox(tractInfo.getWcs()))
localBox.clip(localInnerBBox)
patchArgDict = dict(
datasetType="deepCoadd_sub",
bbox=localBox,
tract=tractInfo.getId(),
patch="%s,%s" % (patchInfo.getIndex()[0], patchInfo.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = sensorRef.get(**patchArgDict)
# warp to reference tract wcs
xyTransform = afwGeom.makeWcsPairTransform(coaddPatch.getWcs(), finalWcs)
psfWarped = WarpedPsf(coaddPatch.getPsf(), xyTransform)
warped = self.warper.warpExposure(finalWcs, coaddPatch, maxBBox=finalBBox)
# check if warpped image is viable
if warped.getBBox().getArea() == 0:
self.log.info("No ovlerap for warped patch %s. Skipping" % patchInfo)
continue
warped.setPsf(psfWarped)
exp = afwImage.ExposureF(finalBBox, finalWcs)
exp.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
exp.maskedImage.assign(warped.maskedImage, warped.getBBox())
maskedImageList.append(exp.maskedImage)
weightList.append(1)
record = tractsCatalog.addNew()
record.setPsf(psfWarped)
record.setWcs(finalWcs)
record.setPhotoCalib(coadd | raise RuntimeError("No suitable tract found for central point") | conditional_block |
getMultiTractTemplate.py |
----------
exposure: `lsst.afw.image.Exposure`
an exposure for which to generate an overlapping template
sensorRef : TYPE
a Butler data reference that can be used to obtain coadd data
templateIdList : TYPE, optional
list of data ids (unused)
Returns
-------
result : `struct`
return a pipeBase.Struct:
- ``exposure`` : a template coadd exposure assembled out of patches
- ``sources`` : None for this subtask
"""
# Table for CoaddPSF
tractsSchema = afwTable.ExposureTable.makeMinimalSchema()
tractKey = tractsSchema.addField('tract', type=np.int32, doc='Which tract')
patchKey = tractsSchema.addField('patch', type=np.int32, doc='Which patch')
weightKey = tractsSchema.addField('weight', type=float, doc='Weight for each tract, should be 1')
tractsCatalog = afwTable.ExposureCatalog(tractsSchema)
skyMap = sensorRef.get(datasetType=self.config.coaddName + "Coadd_skyMap")
expWcs = exposure.getWcs()
expBoxD = geom.Box2D(exposure.getBBox())
expBoxD.grow(self.config.templateBorderSize)
ctrSkyPos = expWcs.pixelToSky(expBoxD.getCenter())
centralTractInfo = skyMap.findTract(ctrSkyPos)
if not centralTractInfo:
raise RuntimeError("No suitable tract found for central point")
self.log.info("Central skyMap tract %s" % (centralTractInfo.getId(),))
skyCorners = [expWcs.pixelToSky(pixPos) for pixPos in expBoxD.getCorners()]
tractPatchList = skyMap.findTractPatchList(skyCorners)
if not tractPatchList:
raise RuntimeError("No suitable tract found")
self.log.info("All overlapping skyMap tracts %s" % ([a[0].getId() for a in tractPatchList]))
# Move central tract to front of the list and use as the reference
tracts = [tract[0].getId() for tract in tractPatchList]
centralIndex = tracts.index(centralTractInfo.getId())
tracts.insert(0, tracts.pop(centralIndex))
tractPatchList.insert(0, tractPatchList.pop(centralIndex))
coaddPsf = None
coaddFilter = None
nPatchesFound = 0
maskedImageList = []
weightList = []
for itract,tract in enumerate(tracts):
tractInfo = tractPatchList[itract][0]
coaddWcs = tractInfo.getWcs()
coaddBBox = geom.Box2D()
for skyPos in skyCorners:
coaddBBox.include(coaddWcs.skyToPixel(skyPos))
coaddBBox = geom.Box2I(coaddBBox)
if itract == 0:
# Define final wcs and bounding box from the reference tract
finalWcs = coaddWcs
finalBBox = coaddBBox
patchList = tractPatchList[itract][1]
for patchInfo in patchList:
self.log.info('Adding patch %s from tract %s' % (patchInfo.getIndex(),tract))
# Local patch information
patchSubBBox = geom.Box2I(patchInfo.getInnerBBox())
patchSubBBox.clip(coaddBBox)
patchInt = int(f"{patchInfo.getIndex()[0]}{patchInfo.getIndex()[1]}")
innerBBox = geom.Box2I(tractInfo._minimumBoundingBox(finalWcs))
if itract == 0:
# clip to image and tract boundaries
patchSubBBox.clip(finalBBox)
patchSubBBox.clip(innerBBox)
if patchSubBBox.getArea() == 0:
self.log.debug("No ovlerap for patch %s" % patchInfo)
continue
patchArgDict = dict(
datasetType="deepCoadd_sub",
bbox=patchSubBBox,
tract=tractInfo.getId(),
patch="%s,%s" % (patchInfo.getIndex()[0], patchInfo.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = sensorRef.get(**patchArgDict)
if coaddFilter is None:
coaddFilter = coaddPatch.getFilter()
# create full image from final bounding box
exp = afwImage.ExposureF(finalBBox, finalWcs)
exp.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
exp.maskedImage.assign(coaddPatch.maskedImage, patchSubBBox)
maskedImageList.append(exp.maskedImage)
weightList.append(1)
record = tractsCatalog.addNew()
record.setPsf(coaddPatch.getPsf())
record.setWcs(coaddPatch.getWcs())
record.setPhotoCalib(coaddPatch.getPhotoCalib())
record.setBBox(patchSubBBox)
record.set(tractKey, tract)
record.set(patchKey, patchInt)
record.set(weightKey, 1.)
nPatchesFound += 1
else:
# compute the exposure bounding box in a tract that is not the reference tract
localBox = geom.Box2I()
for skyPos in skyCorners:
localBox.include(geom.Point2I(tractInfo.getWcs().skyToPixel(skyPos)))
# clip to patch bounding box
localBox.clip(patchSubBBox)
# grow border to deal with warping at edges
localBox.grow(self.config.templateBorderSize)
# clip to tract inner bounding box
localInnerBBox = geom.Box2I(tractInfo._minimumBoundingBox(tractInfo.getWcs()))
localBox.clip(localInnerBBox)
patchArgDict = dict(
datasetType="deepCoadd_sub",
bbox=localBox,
tract=tractInfo.getId(),
patch="%s,%s" % (patchInfo.getIndex()[0], patchInfo.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = sensorRef.get(**patchArgDict)
# warp to reference tract wcs
xyTransform = afwGeom.makeWcsPairTransform(coaddPatch.getWcs(), finalWcs)
psfWarped = WarpedPsf(coaddPatch.getPsf(), xyTransform)
warped = self.warper.warpExposure(finalWcs, coaddPatch, maxBBox=finalBBox)
# check if warpped image is viable
if warped.getBBox().getArea() == 0:
self.log.info("No ovlerap for warped patch %s. Skipping" % patchInfo)
continue
warped.setPsf(psfWarped)
exp = afwImage.ExposureF(finalBBox, finalWcs)
exp.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
exp.maskedImage.assign(warped.maskedImage, warped.getBBox())
maskedImageList.append(exp.maskedImage)
weightList.append(1)
record = tractsCatalog.addNew()
record.setPsf(psfWarped)
record.setWcs(finalWcs)
record.setPhotoCalib(coaddPatch.getPhotoCalib())
record.setBBox(warped.getBBox())
record.set(tractKey, tract)
record.set(patchKey, patchInt)
record.set(weightKey, 1.)
nPatchesFound += 1
if nPatchesFound == 0:
raise RuntimeError("No patches found!")
# Combine images from individual patches together
# Do not mask any values
statsFlags = afwMath.stringToStatisticsProperty(self.config.statistic)
maskMap = []
statsCtrl = afwMath.StatisticsControl()
statsCtrl.setNanSafe(True)
statsCtrl.setWeighted(True)
statsCtrl.setCalcErrorFromInputVariance(True)
coaddExposure = afwImage.ExposureF(finalBBox, finalWcs)
coaddExposure.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
xy0 = coaddExposure.getXY0()
coaddExposure.maskedImage = afwMath.statisticsStack(maskedImageList,
statsFlags, statsCtrl, weightList, 0, maskMap)
coaddExposure.maskedImage.setXY0(xy0)
coaddPsf = CoaddPsf(tractsCatalog, finalWcs, self.config.coaddPsf.makeControl())
if coaddPsf is None:
raise RuntimeError("No coadd Psf found!")
coaddExposure.setPsf(coaddPsf)
coaddExposure.setFilter(coaddFilter)
return pipeBase.Struct(exposure=coaddExposure, sources=None)
def getCoaddDatasetName(self):
| """Return coadd name for given task config
Returns
-------
CoaddDatasetName : `string`
TODO: This nearly duplicates a method in CoaddBaseTask (DM-11985)
"""
warpType = self.config.warpType
suffix = "" if warpType == "direct" else warpType[0].upper() + warpType[1:]
return self.config.coaddName + "Coadd" + suffix | identifier_body |
|
getMultiTractTemplate.py | coaddName = pexConfig.Field(
doc="coadd name: typically one of 'deep', 'goodSeeing', or 'dcr'",
dtype=str,
default="deep",
)
warpType = pexConfig.Field(
doc="Warp type of the coadd template: one of 'direct' or 'psfMatched'",
dtype=str,
default="direct",
)
coaddPsf = pexConfig.ConfigField(
doc="Configuration for CoaddPsf",
dtype=CoaddPsfConfig,
)
warp = pexConfig.ConfigField(
dtype=afwMath.Warper.ConfigClass,
doc="warper configuration",
)
statistic = pexConfig.Field(
dtype=str,
doc="How to combine tracts that overlap",
default="MEAN",
)
class GetCoaddAsMultiTractTemplateTask(pipeBase.Task):
"""Subtask to retrieve coadd from possibly different tracts and
use as an image difference template. It uses the tract closest to the
central point of the ccd as the reference tract. All other tracts will
be warped onto the reference task.
The PSF of the resulting template will be a CoaddPSF of individual CoaddPSFs.
"""
ConfigClass = GetCoaddAsMultiTractTemplateConfig
_DefaultName = "GetCoaddAsMultiTractTemplateTask"
def __init__(self, *args, **kwargs):
pipeBase.Task.__init__(self, *args, **kwargs)
self.warper = afwMath.Warper.fromConfig(self.config.warp)
def | (self, exposure, sensorRef, templateIdList=None):
"""Retrieve and mosaic a template coadd that overlaps the exposure where
the template spans multiple tracts.
The resulting template image will be an average of all the input templates from
the separate tracts.
The PSF on the template is created by combining the CoaddPsf on each template image
into a meta-CoaddPsf.
Parameters
----------
exposure: `lsst.afw.image.Exposure`
an exposure for which to generate an overlapping template
sensorRef : TYPE
a Butler data reference that can be used to obtain coadd data
templateIdList : TYPE, optional
list of data ids (unused)
Returns
-------
result : `struct`
return a pipeBase.Struct:
- ``exposure`` : a template coadd exposure assembled out of patches
- ``sources`` : None for this subtask
"""
# Table for CoaddPSF
tractsSchema = afwTable.ExposureTable.makeMinimalSchema()
tractKey = tractsSchema.addField('tract', type=np.int32, doc='Which tract')
patchKey = tractsSchema.addField('patch', type=np.int32, doc='Which patch')
weightKey = tractsSchema.addField('weight', type=float, doc='Weight for each tract, should be 1')
tractsCatalog = afwTable.ExposureCatalog(tractsSchema)
skyMap = sensorRef.get(datasetType=self.config.coaddName + "Coadd_skyMap")
expWcs = exposure.getWcs()
expBoxD = geom.Box2D(exposure.getBBox())
expBoxD.grow(self.config.templateBorderSize)
ctrSkyPos = expWcs.pixelToSky(expBoxD.getCenter())
centralTractInfo = skyMap.findTract(ctrSkyPos)
if not centralTractInfo:
raise RuntimeError("No suitable tract found for central point")
self.log.info("Central skyMap tract %s" % (centralTractInfo.getId(),))
skyCorners = [expWcs.pixelToSky(pixPos) for pixPos in expBoxD.getCorners()]
tractPatchList = skyMap.findTractPatchList(skyCorners)
if not tractPatchList:
raise RuntimeError("No suitable tract found")
self.log.info("All overlapping skyMap tracts %s" % ([a[0].getId() for a in tractPatchList]))
# Move central tract to front of the list and use as the reference
tracts = [tract[0].getId() for tract in tractPatchList]
centralIndex = tracts.index(centralTractInfo.getId())
tracts.insert(0, tracts.pop(centralIndex))
tractPatchList.insert(0, tractPatchList.pop(centralIndex))
coaddPsf = None
coaddFilter = None
nPatchesFound = 0
maskedImageList = []
weightList = []
for itract,tract in enumerate(tracts):
tractInfo = tractPatchList[itract][0]
coaddWcs = tractInfo.getWcs()
coaddBBox = geom.Box2D()
for skyPos in skyCorners:
coaddBBox.include(coaddWcs.skyToPixel(skyPos))
coaddBBox = geom.Box2I(coaddBBox)
if itract == 0:
# Define final wcs and bounding box from the reference tract
finalWcs = coaddWcs
finalBBox = coaddBBox
patchList = tractPatchList[itract][1]
for patchInfo in patchList:
self.log.info('Adding patch %s from tract %s' % (patchInfo.getIndex(),tract))
# Local patch information
patchSubBBox = geom.Box2I(patchInfo.getInnerBBox())
patchSubBBox.clip(coaddBBox)
patchInt = int(f"{patchInfo.getIndex()[0]}{patchInfo.getIndex()[1]}")
innerBBox = geom.Box2I(tractInfo._minimumBoundingBox(finalWcs))
if itract == 0:
# clip to image and tract boundaries
patchSubBBox.clip(finalBBox)
patchSubBBox.clip(innerBBox)
if patchSubBBox.getArea() == 0:
self.log.debug("No ovlerap for patch %s" % patchInfo)
continue
patchArgDict = dict(
datasetType="deepCoadd_sub",
bbox=patchSubBBox,
tract=tractInfo.getId(),
patch="%s,%s" % (patchInfo.getIndex()[0], patchInfo.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = sensorRef.get(**patchArgDict)
if coaddFilter is None:
coaddFilter = coaddPatch.getFilter()
# create full image from final bounding box
exp = afwImage.ExposureF(finalBBox, finalWcs)
exp.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
exp.maskedImage.assign(coaddPatch.maskedImage, patchSubBBox)
maskedImageList.append(exp.maskedImage)
weightList.append(1)
record = tractsCatalog.addNew()
record.setPsf(coaddPatch.getPsf())
record.setWcs(coaddPatch.getWcs())
record.setPhotoCalib(coaddPatch.getPhotoCalib())
record.setBBox(patchSubBBox)
record.set(tractKey, tract)
record.set(patchKey, patchInt)
record.set(weightKey, 1.)
nPatchesFound += 1
else:
# compute the exposure bounding box in a tract that is not the reference tract
localBox = geom.Box2I()
for skyPos in skyCorners:
localBox.include(geom.Point2I(tractInfo.getWcs().skyToPixel(skyPos)))
# clip to patch bounding box
localBox.clip(patchSubBBox)
# grow border to deal with warping at edges
localBox.grow(self.config.templateBorderSize)
# clip to tract inner bounding box
localInnerBBox = geom.Box2I(tractInfo._minimumBoundingBox(tractInfo.getWcs()))
localBox.clip(localInnerBBox)
patchArgDict = dict(
datasetType="deepCoadd_sub",
bbox=localBox,
tract=tractInfo.getId(),
patch="%s,%s" % (patchInfo.getIndex()[0], patchInfo.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = sensorRef.get(**patchArgDict)
# warp to reference tract wcs
xyTransform = afwGeom.makeWcsPairTransform(coaddPatch.getWcs(), finalWcs)
psfWarped = WarpedPsf(coaddPatch.getPsf(), xyTransform)
warped = self.warper.warpExposure(finalWcs, coaddPatch, maxBBox=finalBBox)
# check if warpped image is viable
if warped.getBBox().getArea() == 0:
self.log.info("No ovlerap for warped patch %s. Skipping" % patchInfo)
continue
warped.setPsf(psfWarped)
exp = afwImage.ExposureF(finalBBox, finalWcs)
exp.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
exp.maskedImage.assign(warped.maskedImage, warped.getBBox())
maskedImageList.append(exp.maskedImage)
weightList.append(1)
record = tractsCatalog.addNew()
record.setPsf(psfWarped)
record.setWcs(finalWcs)
record.setPhotoCalib(coadd | run | identifier_name |
getMultiTractTemplate.py | coaddName = pexConfig.Field(
doc="coadd name: typically one of 'deep', 'goodSeeing', or 'dcr'",
dtype=str,
default="deep",
)
warpType = pexConfig.Field(
doc="Warp type of the coadd template: one of 'direct' or 'psfMatched'",
dtype=str,
default="direct",
)
coaddPsf = pexConfig.ConfigField(
doc="Configuration for CoaddPsf",
dtype=CoaddPsfConfig,
)
warp = pexConfig.ConfigField(
dtype=afwMath.Warper.ConfigClass,
doc="warper configuration",
)
statistic = pexConfig.Field(
dtype=str,
doc="How to combine tracts that overlap",
default="MEAN",
)
class GetCoaddAsMultiTractTemplateTask(pipeBase.Task):
"""Subtask to retrieve coadd from possibly different tracts and
use as an image difference template. It uses the tract closest to the
central point of the ccd as the reference tract. All other tracts will
be warped onto the reference task.
The PSF of the resulting template will be a CoaddPSF of individual CoaddPSFs.
"""
ConfigClass = GetCoaddAsMultiTractTemplateConfig
_DefaultName = "GetCoaddAsMultiTractTemplateTask"
def __init__(self, *args, **kwargs):
pipeBase.Task.__init__(self, *args, **kwargs)
self.warper = afwMath.Warper.fromConfig(self.config.warp)
def run(self, exposure, sensorRef, templateIdList=None):
"""Retrieve and mosaic a template coadd that overlaps the exposure where
the template spans multiple tracts.
The resulting template image will be an average of all the input templates from
the separate tracts.
The PSF on the template is created by combining the CoaddPsf on each template image
into a meta-CoaddPsf.
Parameters
----------
exposure: `lsst.afw.image.Exposure`
an exposure for which to generate an overlapping template
sensorRef : TYPE
a Butler data reference that can be used to obtain coadd data
templateIdList : TYPE, optional
list of data ids (unused)
Returns
-------
result : `struct`
return a pipeBase.Struct:
- ``exposure`` : a template coadd exposure assembled out of patches
- ``sources`` : None for this subtask
"""
# Table for CoaddPSF
tractsSchema = afwTable.ExposureTable.makeMinimalSchema()
tractKey = tractsSchema.addField('tract', type=np.int32, doc='Which tract')
patchKey = tractsSchema.addField('patch', type=np.int32, doc='Which patch')
weightKey = tractsSchema.addField('weight', type=float, doc='Weight for each tract, should be 1')
tractsCatalog = afwTable.ExposureCatalog(tractsSchema)
skyMap = sensorRef.get(datasetType=self.config.coaddName + "Coadd_skyMap")
expWcs = exposure.getWcs()
expBoxD = geom.Box2D(exposure.getBBox())
expBoxD.grow(self.config.templateBorderSize)
ctrSkyPos = expWcs.pixelToSky(expBoxD.getCenter())
centralTractInfo = skyMap.findTract(ctrSkyPos)
if not centralTractInfo:
raise RuntimeError("No suitable tract found for central point")
self.log.info("Central skyMap tract %s" % (centralTractInfo.getId(),))
skyCorners = [expWcs.pixelToSky(pixPos) for pixPos in expBoxD.getCorners()]
tractPatchList = skyMap.findTractPatchList(skyCorners)
if not tractPatchList:
raise RuntimeError("No suitable tract found")
self.log.info("All overlapping skyMap tracts %s" % ([a[0].getId() for a in tractPatchList]))
# Move central tract to front of the list and use as the reference
tracts = [tract[0].getId() for tract in tractPatchList]
centralIndex = tracts.index(centralTractInfo.getId())
tracts.insert(0, tracts.pop(centralIndex))
tractPatchList.insert(0, tractPatchList.pop(centralIndex))
coaddPsf = None
coaddFilter = None
nPatchesFound = 0
maskedImageList = []
weightList = []
for itract,tract in enumerate(tracts):
tractInfo = tractPatchList[itract][0]
coaddWcs = tractInfo.getWcs()
coaddBBox = geom.Box2D()
for skyPos in skyCorners:
coaddBBox.include(coaddWcs.skyToPixel(skyPos))
coaddBBox = geom.Box2I(coaddBBox)
if itract == 0:
# Define final wcs and bounding box from the reference tract
finalWcs = coaddWcs
finalBBox = coaddBBox
patchList = tractPatchList[itract][1]
for patchInfo in patchList:
self.log.info('Adding patch %s from tract %s' % (patchInfo.getIndex(),tract))
| patchInt = int(f"{patchInfo.getIndex()[0]}{patchInfo.getIndex()[1]}")
innerBBox = geom.Box2I(tractInfo._minimumBoundingBox(finalWcs))
if itract == 0:
# clip to image and tract boundaries
patchSubBBox.clip(finalBBox)
patchSubBBox.clip(innerBBox)
if patchSubBBox.getArea() == 0:
self.log.debug("No ovlerap for patch %s" % patchInfo)
continue
patchArgDict = dict(
datasetType="deepCoadd_sub",
bbox=patchSubBBox,
tract=tractInfo.getId(),
patch="%s,%s" % (patchInfo.getIndex()[0], patchInfo.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = sensorRef.get(**patchArgDict)
if coaddFilter is None:
coaddFilter = coaddPatch.getFilter()
# create full image from final bounding box
exp = afwImage.ExposureF(finalBBox, finalWcs)
exp.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
exp.maskedImage.assign(coaddPatch.maskedImage, patchSubBBox)
maskedImageList.append(exp.maskedImage)
weightList.append(1)
record = tractsCatalog.addNew()
record.setPsf(coaddPatch.getPsf())
record.setWcs(coaddPatch.getWcs())
record.setPhotoCalib(coaddPatch.getPhotoCalib())
record.setBBox(patchSubBBox)
record.set(tractKey, tract)
record.set(patchKey, patchInt)
record.set(weightKey, 1.)
nPatchesFound += 1
else:
# compute the exposure bounding box in a tract that is not the reference tract
localBox = geom.Box2I()
for skyPos in skyCorners:
localBox.include(geom.Point2I(tractInfo.getWcs().skyToPixel(skyPos)))
# clip to patch bounding box
localBox.clip(patchSubBBox)
# grow border to deal with warping at edges
localBox.grow(self.config.templateBorderSize)
# clip to tract inner bounding box
localInnerBBox = geom.Box2I(tractInfo._minimumBoundingBox(tractInfo.getWcs()))
localBox.clip(localInnerBBox)
patchArgDict = dict(
datasetType="deepCoadd_sub",
bbox=localBox,
tract=tractInfo.getId(),
patch="%s,%s" % (patchInfo.getIndex()[0], patchInfo.getIndex()[1]),
filter=exposure.getFilter().getName()
)
coaddPatch = sensorRef.get(**patchArgDict)
# warp to reference tract wcs
xyTransform = afwGeom.makeWcsPairTransform(coaddPatch.getWcs(), finalWcs)
psfWarped = WarpedPsf(coaddPatch.getPsf(), xyTransform)
warped = self.warper.warpExposure(finalWcs, coaddPatch, maxBBox=finalBBox)
# check if warpped image is viable
if warped.getBBox().getArea() == 0:
self.log.info("No ovlerap for warped patch %s. Skipping" % patchInfo)
continue
warped.setPsf(psfWarped)
exp = afwImage.ExposureF(finalBBox, finalWcs)
exp.maskedImage.set(np.nan, afwImage.Mask.getPlaneBitMask("NO_DATA"), np.nan)
exp.maskedImage.assign(warped.maskedImage, warped.getBBox())
maskedImageList.append(exp.maskedImage)
weightList.append(1)
record = tractsCatalog.addNew()
record.setPsf(psfWarped)
record.setWcs(finalWcs)
record.setPhotoCalib(coaddPatch | # Local patch information
patchSubBBox = geom.Box2I(patchInfo.getInnerBBox())
patchSubBBox.clip(coaddBBox) | random_line_split |
arm.py | _joint_limits(abs_input):
return False
self._position_joint_desired = np.copy(abs_input)
joint_positions = self._get_joint_positions_all(abs_input)
p.setJointMotorControlArray(self.body,
self.joints,
p.POSITION_CONTROL,
targetPositions=joint_positions,
targetVelocities=[0.] * len(joint_positions))
return abs_input
def move(self, abs_input: np.ndarray, link_index=None) -> [bool, np.ndarray]:
"""
Absolute translation in Cartesian space (RCM frame).
Set target joint positions without actual physical move (need pybullet to step).
:param abs_input: the absolute translation you want to make (in Cartesian space, tip_T_rcm, 4*4).
:param link_index: the index for the link to compute inverse kinematics; should be consistent with dVRK.
:return: whether or not able to reach the given input.
"""
assert abs_input.shape == (4, 4)
if link_index is None:
# default link index is the DoF
link_index = self.EEF_LINK_INDEX
pose_world = self.pose_rcm2world(abs_input, 'tuple')
# joints_inv = np.array(inverse_kinematics(self.body, self.EEF_LINK_INDEX,
# pose_world[0], pose_world[1]))
joints_inv = self.inverse_kinematics(pose_world, link_index)
return self.move_joint(joints_inv)
def update_rcm_pose(self):
""" Update the world_T_rcm (wTr) and rcm_T_world (rTw) transformation matrix.
"""
positions = get_joint_positions(self.body, self.joints) # dummy positions; not affect rcm pose
# RCM pose in the world frame
world_pose_rcm = forward_kinematics(self.body, self.joints, positions, self.RCM_LINK_INDEX)
self.wTr = get_matrix_from_pose_2d(world_pose_rcm) # world_T_rcm
self.rTw = np.linalg.inv(self.wTr)
def update_tip_pose(self):
"""
Update the eef_T_tip (eTt) and tip_T_eef (tTe) transformation matrix.
The EEF link can be either the same link of Tip or the other link.
"""
world_pose_eef = get_link_pose(self.body, self.EEF_LINK_INDEX)
wTe = get_matrix_from_pose_2d(world_pose_eef) # world_T_eef
world_pose_eef = get_link_pose(self.body, self.TIP_LINK_INDEX)
wTt = get_matrix_from_pose_2d(world_pose_eef) # world_T_tip
self.eTt = np.matmul(np.linalg.inv(wTe), wTt)
self.tTe = np.linalg.inv(self.eTt)
def pose_rcm2world(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
PyBullet helper function to transform pose from the RCM frame to the world frame.
With tool-tip offset.
:param pose: offset 'tip' pose in the RCM frame; normalized by the scaling factor.
:param option: which output type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the world frame.
"""
# rcm_T_tip -> rcm_T_tool
pose_rcm = self._pose_transform(pose, np.linalg.inv(self.tool_T_tip), premultiply=False)
pose_rcm[0: 3, 3] *= self.scaling # recover the original size
# rcm_T_tool -> world_T_tool
pose_world = self._pose_transform(pose_rcm, self.wTr)
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_world = get_pose_2d_from_matrix(pose_world)
return pose_world
def pose_world2rcm(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
PyBullet helper function to transform pose from the world frame to the RCM frame.
With tool-tip offset.
:param pose: 'tool' (eef) pose in the world frame.
:param option: which type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the RCM frame; normalized by the scaling factor.
"""
# world_T_tool -> rcm_T_tool
pose_rcm = self._pose_transform(pose, self.rTw, premultiply=True)
# rcm_T_tool -> rcm_T_tip
pose_rcm = np.matmul(pose_rcm, self.tool_T_tip)
pose_rcm[0: 3, 3] /= self.scaling # scaled
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_rcm = get_pose_2d_from_matrix(pose_rcm)
return pose_rcm
def pose_tip2eef(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
Helper function to transform the tip pose given in the world frame to the eef pose.
:param pose: actual tip pose in the any frame.
:param option: which type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the RCM frame; normalized by the scaling factor.
"""
# any_T_tip -> any_T_eef
pose_eef = self._pose_transform(pose, self.tTe, premultiply=False)
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_eef = get_pose_2d_from_matrix(pose_eef)
return pose_eef
def reset_joint(self, abs_input: [list, np.ndarray]) -> [bool, np.ndarray]:
"""
Helper function for PyBullet initial reset.
Not recommend to use during simulation.
"""
if not self._check_joint_limits(abs_input):
return
joint_positions = self._get_joint_positions_all(abs_input)
set_joint_positions(self.body, self.joints, joint_positions)
return self.move_joint(abs_input)
def inverse_kinematics(self, pose_world: tuple, link_index: None) -> np.ndarray:
"""
Compute the inverse kinematics using PyBullet built-in methods.
Given the pose in the world frame, output the joint positions normalized by self.scaling.
"""
if link_index is None:
link_index = self.DoF - 1
joints_inv = p.calculateInverseKinematics(
bodyUniqueId=self.body,
endEffectorLinkIndex=link_index,
targetPosition=pose_world[0], # inertial pose, not joint pose
targetOrientation=pose_world[1],
lowerLimits=self.limits['lower'][:self.DoF],
upperLimits=self.limits['upper'][:self.DoF],
jointRanges=self.limits['upper'][:self.DoF] - self.limits['lower'][:self.DoF],
restPoses=[0] * self.DoF,
residualThreshold=1e-9, # can tune
maxNumIterations=200
)
# joints_inv = inverse_kinematics(self.body, link_index, pose_world[0], pose_world[1])
joints_inv = np.array(joints_inv)
for i in range(self.DoF):
if self.JOINT_TYPES[i] == 'P':
joints_inv[i] /= self.scaling
return wrap_angle(joints_inv[:self.DoF])
def get_jacobian_spatial(self, qs=None) -> np.ndarray:
"""
Calculate the Jacobian matrix in the base (world?rcm) frame using the Peter Corke toolbox.
(PyBullet uses the initial frame instead of the joint frame, not sure).
return Jacobian matrix in shape (6, DoF).
"""
if qs is None:
qs = self.get_current_joint_position()
return self.robot.jacob0(qs)
def _check_joint_limits(self, abs_input: [list, np.ndarray]):
""" Check if the joint set is within the joint limits.
"""
assert len(abs_input) == self.DoF, "The number of joints should match the arm DoF."
if not np.all(np.bitwise_and(abs_input >= self.limits['lower'][:self.DoF],
abs_input <= self.limits['upper'][:self.DoF])):
print("Joint position out of valid range!")
print("Set joint:", abs_input)
return False
return True
def _get_joint_positions_all(self, abs_input: [list, np.ndarray]):
""" With the consideration of parallel mechanism constraints and other redundant joints.
"""
return np.copy(abs_input)
@staticmethod
def _pose_transform(pose, mat: np.ndarray, premultiply=True) -> np.ndarray:
"""
:param pose: tuple (position (3), orientation (4)) or matrix (4*4).
:param mat: transformation matrix.
:param premultiply: premultiply or postmultiply the mat.
:return: pose in the transformed frame.
"""
if isinstance(pose, (tuple, list)):
pose_ori = get_matrix_from_pose_2d(pose)
else: | pose_ori = pose.copy()
if premultiply:
pose_tf = np.matmul(mat, pose_ori)
else:
pose_tf = np.matmul(pose_ori, mat) | random_line_split |
|
arm.py | 0., 1.),
limits=None, tool_T_tip=np.eye(4), scaling=1.):
"""
:param urdf_file: URDF fileName.
:param pos: basePosition.
:param orn: baseOrientation in quaternion.
"""
# should connect to the PyBullet server first
self.body = p.loadURDF(urdf_file,
np.array(pos) * scaling, orn,
useFixedBase=True, globalScaling=scaling,
flags=p.URDF_MAINTAIN_LINK_ORDER) # self.collision=True is not suitable
self.joints = get_joints(self.body)
self._position_joint_desired = np.zeros(self.DoF)
self.limits = limits
self.tool_T_tip = tool_T_tip # tool_T_tip offset
self.scaling = scaling # scaling factor
# update RCM pose and related transformations
self.wTr, self.rTw = None, None
self.update_rcm_pose()
# update EEF and TIP related transformations
self.eTt, self.tTe = None, None
self.update_tip_pose()
self._set_collision()
# self._add_constraint() # have effect when the joint positions are not set
# use roboticstoolbox to calculate the forward and inverse kinematics quickly
links = []
for i in range(self.DoF):
# DH parameters
if self.JOINT_TYPES[i] == 'R':
links.append(rtb.RevoluteMDH(alpha=self.ALPHA[i], a=self.A[i], d=self.D[i], offset=self.THETA[i]))
else:
links.append(rtb.PrismaticMDH(alpha=self.ALPHA[i], a=self.A[i], theta=self.THETA[i], offset=self.D[i]))
self.robot = rtb.DHRobot(links, name=self.NAME)
def get_current_position(self) -> np.ndarray:
""" Get the 'current cartesian position' of the arm (RCM frame).
Return 4*4 matrix. """
pose_world = forward_kinematics(self.body, eef_link=self.DoF - 1)
pose_rcm = self.pose_world2rcm(pose_world, 'matrix')
return pose_rcm
def get_current_joint_position(self) -> list:
""" Get the 'current joint position' of the arm. """
joint_positions = get_joint_positions(self.body, self.joints[:self.DoF])
for i in range(self.DoF):
if self.JOINT_TYPES[i] == 'P':
# get the unscaled joint position
joint_positions[i] /= self.scaling
return joint_positions
def get_desired_joint_position(self):
|
def get_joint_number(self) -> int:
""" Get the number of joints on the arm specified. """
return self.DoF
def dmove_joint(self, delta_pos: [list, np.ndarray]) -> [bool, np.ndarray]:
""" Incremental move in joint space.
"""
if not isinstance(delta_pos, np.ndarray):
delta_pos = np.array(delta_pos)
abs_pos = np.array(self.get_current_joint_position()) # or self._position_joint_desired ?
abs_pos += delta_pos
return self.move_joint(abs_pos)
def dmove_joint_one(self, delta_pos: float, indices: int) -> bool:
""" Incremental index move of 1 joint in joint space.
"""
return self.dmove_joint_some(np.array([delta_pos]), np.array([indices]))
def dmove_joint_some(self, delta_pos: np.ndarray, indices: np.ndarray) -> bool:
""" Incremental index move of a series of joints in joint space.
"""
if not len(delta_pos) == len(indices):
return False
abs_pos = np.array(self.get_current_joint_position())
for i in range(len(indices)):
abs_pos[indices[i]] += delta_pos[i]
return self.move_joint(abs_pos)
def move_joint(self, abs_input: [list, np.ndarray]) -> [bool, np.ndarray]:
"""
Absolute move in joint space.
Set desired joint positions without actual physical move (need pybullet to step).
:param abs_input: the absolute translation you want to make (in joint space).
:return: whether or not able to reach the given input.
"""
if not self._check_joint_limits(abs_input):
return False
self._position_joint_desired = np.copy(abs_input)
joint_positions = self._get_joint_positions_all(abs_input)
p.setJointMotorControlArray(self.body,
self.joints,
p.POSITION_CONTROL,
targetPositions=joint_positions,
targetVelocities=[0.] * len(joint_positions))
return abs_input
def move(self, abs_input: np.ndarray, link_index=None) -> [bool, np.ndarray]:
"""
Absolute translation in Cartesian space (RCM frame).
Set target joint positions without actual physical move (need pybullet to step).
:param abs_input: the absolute translation you want to make (in Cartesian space, tip_T_rcm, 4*4).
:param link_index: the index for the link to compute inverse kinematics; should be consistent with dVRK.
:return: whether or not able to reach the given input.
"""
assert abs_input.shape == (4, 4)
if link_index is None:
# default link index is the DoF
link_index = self.EEF_LINK_INDEX
pose_world = self.pose_rcm2world(abs_input, 'tuple')
# joints_inv = np.array(inverse_kinematics(self.body, self.EEF_LINK_INDEX,
# pose_world[0], pose_world[1]))
joints_inv = self.inverse_kinematics(pose_world, link_index)
return self.move_joint(joints_inv)
def update_rcm_pose(self):
""" Update the world_T_rcm (wTr) and rcm_T_world (rTw) transformation matrix.
"""
positions = get_joint_positions(self.body, self.joints) # dummy positions; not affect rcm pose
# RCM pose in the world frame
world_pose_rcm = forward_kinematics(self.body, self.joints, positions, self.RCM_LINK_INDEX)
self.wTr = get_matrix_from_pose_2d(world_pose_rcm) # world_T_rcm
self.rTw = np.linalg.inv(self.wTr)
def update_tip_pose(self):
"""
Update the eef_T_tip (eTt) and tip_T_eef (tTe) transformation matrix.
The EEF link can be either the same link of Tip or the other link.
"""
world_pose_eef = get_link_pose(self.body, self.EEF_LINK_INDEX)
wTe = get_matrix_from_pose_2d(world_pose_eef) # world_T_eef
world_pose_eef = get_link_pose(self.body, self.TIP_LINK_INDEX)
wTt = get_matrix_from_pose_2d(world_pose_eef) # world_T_tip
self.eTt = np.matmul(np.linalg.inv(wTe), wTt)
self.tTe = np.linalg.inv(self.eTt)
def pose_rcm2world(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
PyBullet helper function to transform pose from the RCM frame to the world frame.
With tool-tip offset.
:param pose: offset 'tip' pose in the RCM frame; normalized by the scaling factor.
:param option: which output type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the world frame.
"""
# rcm_T_tip -> rcm_T_tool
pose_rcm = self._pose_transform(pose, np.linalg.inv(self.tool_T_tip), premultiply=False)
pose_rcm[0: 3, 3] *= self.scaling # recover the original size
# rcm_T_tool -> world_T_tool
pose_world = self._pose_transform(pose_rcm, self.wTr)
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_world = get_pose_2d_from_matrix(pose_world)
return pose_world
def pose_world2rcm(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
PyBullet helper function to transform pose from the world frame to the RCM frame.
With tool-tip offset.
:param pose: 'tool' (eef) pose in the world frame.
:param option: which type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the RCM frame; normalized by the scaling factor.
"""
# world_T_tool -> rcm_T_tool
pose_rcm = self._pose_transform(pose, self.rTw, premultiply=True)
# rcm_T_tool -> rcm_T_tip
pose_rcm = np.matmul(pose_rcm, self.tool_T_tip)
pose_rcm[0: 3, 3] /= self.scaling # scaled
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_rcm = get_pose_2d_from_matrix(pose_rcm)
return pose_rcm
def pose_tip2eef(self, pose: Union[tuple, list, np.ndarray | """ Get the 'desired joint position' of the arm. """
return self._position_joint_desired | identifier_body |
arm.py | self.limits = limits
self.tool_T_tip = tool_T_tip # tool_T_tip offset
self.scaling = scaling # scaling factor
# update RCM pose and related transformations
self.wTr, self.rTw = None, None
self.update_rcm_pose()
# update EEF and TIP related transformations
self.eTt, self.tTe = None, None
self.update_tip_pose()
self._set_collision()
# self._add_constraint() # have effect when the joint positions are not set
# use roboticstoolbox to calculate the forward and inverse kinematics quickly
links = []
for i in range(self.DoF):
# DH parameters
if self.JOINT_TYPES[i] == 'R':
links.append(rtb.RevoluteMDH(alpha=self.ALPHA[i], a=self.A[i], d=self.D[i], offset=self.THETA[i]))
else:
links.append(rtb.PrismaticMDH(alpha=self.ALPHA[i], a=self.A[i], theta=self.THETA[i], offset=self.D[i]))
self.robot = rtb.DHRobot(links, name=self.NAME)
def get_current_position(self) -> np.ndarray:
""" Get the 'current cartesian position' of the arm (RCM frame).
Return 4*4 matrix. """
pose_world = forward_kinematics(self.body, eef_link=self.DoF - 1)
pose_rcm = self.pose_world2rcm(pose_world, 'matrix')
return pose_rcm
def get_current_joint_position(self) -> list:
""" Get the 'current joint position' of the arm. """
joint_positions = get_joint_positions(self.body, self.joints[:self.DoF])
for i in range(self.DoF):
if self.JOINT_TYPES[i] == 'P':
# get the unscaled joint position
joint_positions[i] /= self.scaling
return joint_positions
def get_desired_joint_position(self):
""" Get the 'desired joint position' of the arm. """
return self._position_joint_desired
def get_joint_number(self) -> int:
""" Get the number of joints on the arm specified. """
return self.DoF
def dmove_joint(self, delta_pos: [list, np.ndarray]) -> [bool, np.ndarray]:
""" Incremental move in joint space.
"""
if not isinstance(delta_pos, np.ndarray):
delta_pos = np.array(delta_pos)
abs_pos = np.array(self.get_current_joint_position()) # or self._position_joint_desired ?
abs_pos += delta_pos
return self.move_joint(abs_pos)
def dmove_joint_one(self, delta_pos: float, indices: int) -> bool:
""" Incremental index move of 1 joint in joint space.
"""
return self.dmove_joint_some(np.array([delta_pos]), np.array([indices]))
def dmove_joint_some(self, delta_pos: np.ndarray, indices: np.ndarray) -> bool:
""" Incremental index move of a series of joints in joint space.
"""
if not len(delta_pos) == len(indices):
return False
abs_pos = np.array(self.get_current_joint_position())
for i in range(len(indices)):
abs_pos[indices[i]] += delta_pos[i]
return self.move_joint(abs_pos)
def move_joint(self, abs_input: [list, np.ndarray]) -> [bool, np.ndarray]:
"""
Absolute move in joint space.
Set desired joint positions without actual physical move (need pybullet to step).
:param abs_input: the absolute translation you want to make (in joint space).
:return: whether or not able to reach the given input.
"""
if not self._check_joint_limits(abs_input):
return False
self._position_joint_desired = np.copy(abs_input)
joint_positions = self._get_joint_positions_all(abs_input)
p.setJointMotorControlArray(self.body,
self.joints,
p.POSITION_CONTROL,
targetPositions=joint_positions,
targetVelocities=[0.] * len(joint_positions))
return abs_input
def move(self, abs_input: np.ndarray, link_index=None) -> [bool, np.ndarray]:
"""
Absolute translation in Cartesian space (RCM frame).
Set target joint positions without actual physical move (need pybullet to step).
:param abs_input: the absolute translation you want to make (in Cartesian space, tip_T_rcm, 4*4).
:param link_index: the index for the link to compute inverse kinematics; should be consistent with dVRK.
:return: whether or not able to reach the given input.
"""
assert abs_input.shape == (4, 4)
if link_index is None:
# default link index is the DoF
link_index = self.EEF_LINK_INDEX
pose_world = self.pose_rcm2world(abs_input, 'tuple')
# joints_inv = np.array(inverse_kinematics(self.body, self.EEF_LINK_INDEX,
# pose_world[0], pose_world[1]))
joints_inv = self.inverse_kinematics(pose_world, link_index)
return self.move_joint(joints_inv)
def update_rcm_pose(self):
""" Update the world_T_rcm (wTr) and rcm_T_world (rTw) transformation matrix.
"""
positions = get_joint_positions(self.body, self.joints) # dummy positions; not affect rcm pose
# RCM pose in the world frame
world_pose_rcm = forward_kinematics(self.body, self.joints, positions, self.RCM_LINK_INDEX)
self.wTr = get_matrix_from_pose_2d(world_pose_rcm) # world_T_rcm
self.rTw = np.linalg.inv(self.wTr)
def update_tip_pose(self):
"""
Update the eef_T_tip (eTt) and tip_T_eef (tTe) transformation matrix.
The EEF link can be either the same link of Tip or the other link.
"""
world_pose_eef = get_link_pose(self.body, self.EEF_LINK_INDEX)
wTe = get_matrix_from_pose_2d(world_pose_eef) # world_T_eef
world_pose_eef = get_link_pose(self.body, self.TIP_LINK_INDEX)
wTt = get_matrix_from_pose_2d(world_pose_eef) # world_T_tip
self.eTt = np.matmul(np.linalg.inv(wTe), wTt)
self.tTe = np.linalg.inv(self.eTt)
def pose_rcm2world(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
PyBullet helper function to transform pose from the RCM frame to the world frame.
With tool-tip offset.
:param pose: offset 'tip' pose in the RCM frame; normalized by the scaling factor.
:param option: which output type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the world frame.
"""
# rcm_T_tip -> rcm_T_tool
pose_rcm = self._pose_transform(pose, np.linalg.inv(self.tool_T_tip), premultiply=False)
pose_rcm[0: 3, 3] *= self.scaling # recover the original size
# rcm_T_tool -> world_T_tool
pose_world = self._pose_transform(pose_rcm, self.wTr)
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_world = get_pose_2d_from_matrix(pose_world)
return pose_world
def pose_world2rcm(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
PyBullet helper function to transform pose from the world frame to the RCM frame.
With tool-tip offset.
:param pose: 'tool' (eef) pose in the world frame.
:param option: which type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the RCM frame; normalized by the scaling factor.
"""
# world_T_tool -> rcm_T_tool
pose_rcm = self._pose_transform(pose, self.rTw, premultiply=True)
# rcm_T_tool -> rcm_T_tip
pose_rcm = np.matmul(pose_rcm, self.tool_T_tip)
pose_rcm[0: 3, 3] /= self.scaling # scaled
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_rcm = get_pose_2d_from_matrix(pose_rcm)
return pose_rcm
def pose_tip2eef(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
Helper function to transform the tip pose given in the world frame to the eef pose.
:param pose: actual tip pose in the any frame.
:param option: which type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the RCM frame; normalized by the scaling factor.
"""
# any_T_tip -> any_T_eef
pose_eef = self._pose_transform(pose, self.tTe, premultiply=False)
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
| pose_eef = get_pose_2d_from_matrix(pose_eef) | conditional_block |
|
arm.py | self._position_joint_desired = np.copy(abs_input)
joint_positions = self._get_joint_positions_all(abs_input)
p.setJointMotorControlArray(self.body,
self.joints,
p.POSITION_CONTROL,
targetPositions=joint_positions,
targetVelocities=[0.] * len(joint_positions))
return abs_input
def move(self, abs_input: np.ndarray, link_index=None) -> [bool, np.ndarray]:
"""
Absolute translation in Cartesian space (RCM frame).
Set target joint positions without actual physical move (need pybullet to step).
:param abs_input: the absolute translation you want to make (in Cartesian space, tip_T_rcm, 4*4).
:param link_index: the index for the link to compute inverse kinematics; should be consistent with dVRK.
:return: whether or not able to reach the given input.
"""
assert abs_input.shape == (4, 4)
if link_index is None:
# default link index is the DoF
link_index = self.EEF_LINK_INDEX
pose_world = self.pose_rcm2world(abs_input, 'tuple')
# joints_inv = np.array(inverse_kinematics(self.body, self.EEF_LINK_INDEX,
# pose_world[0], pose_world[1]))
joints_inv = self.inverse_kinematics(pose_world, link_index)
return self.move_joint(joints_inv)
def update_rcm_pose(self):
""" Update the world_T_rcm (wTr) and rcm_T_world (rTw) transformation matrix.
"""
positions = get_joint_positions(self.body, self.joints) # dummy positions; not affect rcm pose
# RCM pose in the world frame
world_pose_rcm = forward_kinematics(self.body, self.joints, positions, self.RCM_LINK_INDEX)
self.wTr = get_matrix_from_pose_2d(world_pose_rcm) # world_T_rcm
self.rTw = np.linalg.inv(self.wTr)
def update_tip_pose(self):
"""
Update the eef_T_tip (eTt) and tip_T_eef (tTe) transformation matrix.
The EEF link can be either the same link of Tip or the other link.
"""
world_pose_eef = get_link_pose(self.body, self.EEF_LINK_INDEX)
wTe = get_matrix_from_pose_2d(world_pose_eef) # world_T_eef
world_pose_eef = get_link_pose(self.body, self.TIP_LINK_INDEX)
wTt = get_matrix_from_pose_2d(world_pose_eef) # world_T_tip
self.eTt = np.matmul(np.linalg.inv(wTe), wTt)
self.tTe = np.linalg.inv(self.eTt)
def pose_rcm2world(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
PyBullet helper function to transform pose from the RCM frame to the world frame.
With tool-tip offset.
:param pose: offset 'tip' pose in the RCM frame; normalized by the scaling factor.
:param option: which output type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the world frame.
"""
# rcm_T_tip -> rcm_T_tool
pose_rcm = self._pose_transform(pose, np.linalg.inv(self.tool_T_tip), premultiply=False)
pose_rcm[0: 3, 3] *= self.scaling # recover the original size
# rcm_T_tool -> world_T_tool
pose_world = self._pose_transform(pose_rcm, self.wTr)
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_world = get_pose_2d_from_matrix(pose_world)
return pose_world
def pose_world2rcm(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
PyBullet helper function to transform pose from the world frame to the RCM frame.
With tool-tip offset.
:param pose: 'tool' (eef) pose in the world frame.
:param option: which type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the RCM frame; normalized by the scaling factor.
"""
# world_T_tool -> rcm_T_tool
pose_rcm = self._pose_transform(pose, self.rTw, premultiply=True)
# rcm_T_tool -> rcm_T_tip
pose_rcm = np.matmul(pose_rcm, self.tool_T_tip)
pose_rcm[0: 3, 3] /= self.scaling # scaled
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_rcm = get_pose_2d_from_matrix(pose_rcm)
return pose_rcm
def pose_tip2eef(self, pose: Union[tuple, list, np.ndarray], option=None):
"""
Helper function to transform the tip pose given in the world frame to the eef pose.
:param pose: actual tip pose in the any frame.
:param option: which type of transformed pose should be, 'tuple' or 'matrix'.
:return: pose in the RCM frame; normalized by the scaling factor.
"""
# any_T_tip -> any_T_eef
pose_eef = self._pose_transform(pose, self.tTe, premultiply=False)
if option == 'tuple' or (option is None and isinstance(pose, (tuple, list))):
pose_eef = get_pose_2d_from_matrix(pose_eef)
return pose_eef
def reset_joint(self, abs_input: [list, np.ndarray]) -> [bool, np.ndarray]:
"""
Helper function for PyBullet initial reset.
Not recommend to use during simulation.
"""
if not self._check_joint_limits(abs_input):
return
joint_positions = self._get_joint_positions_all(abs_input)
set_joint_positions(self.body, self.joints, joint_positions)
return self.move_joint(abs_input)
def inverse_kinematics(self, pose_world: tuple, link_index: None) -> np.ndarray:
"""
Compute the inverse kinematics using PyBullet built-in methods.
Given the pose in the world frame, output the joint positions normalized by self.scaling.
"""
if link_index is None:
link_index = self.DoF - 1
joints_inv = p.calculateInverseKinematics(
bodyUniqueId=self.body,
endEffectorLinkIndex=link_index,
targetPosition=pose_world[0], # inertial pose, not joint pose
targetOrientation=pose_world[1],
lowerLimits=self.limits['lower'][:self.DoF],
upperLimits=self.limits['upper'][:self.DoF],
jointRanges=self.limits['upper'][:self.DoF] - self.limits['lower'][:self.DoF],
restPoses=[0] * self.DoF,
residualThreshold=1e-9, # can tune
maxNumIterations=200
)
# joints_inv = inverse_kinematics(self.body, link_index, pose_world[0], pose_world[1])
joints_inv = np.array(joints_inv)
for i in range(self.DoF):
if self.JOINT_TYPES[i] == 'P':
joints_inv[i] /= self.scaling
return wrap_angle(joints_inv[:self.DoF])
def get_jacobian_spatial(self, qs=None) -> np.ndarray:
"""
Calculate the Jacobian matrix in the base (world?rcm) frame using the Peter Corke toolbox.
(PyBullet uses the initial frame instead of the joint frame, not sure).
return Jacobian matrix in shape (6, DoF).
"""
if qs is None:
qs = self.get_current_joint_position()
return self.robot.jacob0(qs)
def _check_joint_limits(self, abs_input: [list, np.ndarray]):
""" Check if the joint set is within the joint limits.
"""
assert len(abs_input) == self.DoF, "The number of joints should match the arm DoF."
if not np.all(np.bitwise_and(abs_input >= self.limits['lower'][:self.DoF],
abs_input <= self.limits['upper'][:self.DoF])):
print("Joint position out of valid range!")
print("Set joint:", abs_input)
return False
return True
def _get_joint_positions_all(self, abs_input: [list, np.ndarray]):
""" With the consideration of parallel mechanism constraints and other redundant joints.
"""
return np.copy(abs_input)
@staticmethod
def _pose_transform(pose, mat: np.ndarray, premultiply=True) -> np.ndarray:
"""
:param pose: tuple (position (3), orientation (4)) or matrix (4*4).
:param mat: transformation matrix.
:param premultiply: premultiply or postmultiply the mat.
:return: pose in the transformed frame.
"""
if isinstance(pose, (tuple, list)):
pose_ori = get_matrix_from_pose_2d(pose)
else:
pose_ori = pose.copy()
if premultiply:
pose_tf = np.matmul(mat, pose_ori)
else:
pose_tf = np.matmul(pose_ori, mat)
return pose_tf
def | _set_collision | identifier_name |
|
user_controller.js | .9400
// // },
// // {
// // city : 'Chicago',
// // desc : 'This is the second best city in the world!',
// // lat : 41.8819,
// // long : -87.6278
// // },
// // {
// // city : 'Los Angeles',
// // desc : 'This city is live!',
// // lat : 34.0500,
// // long : -118.2500
// // },
// // {
// // city : 'Las Vegas',
// // desc : 'Sin City...\'nuff said!',
// // lat : 36.0800,
// // long : -115.1522
// // }
// // ];
// var latlng = new google.maps.LatLng(39.305, -76.617);
// // map = new google.maps.Map(document.getElementById('map'), {
// // center: latlng,
// // zoom: 12
// // });
// //Angular App Module and Controller
// // var mapOptions = {
// // zoom: 4,
// // center: latlng,
// // mapTypeId: google.maps.MapTypeId.TERRAIN
// // }
// $scope.map = new google.maps.Map(document.getElementById('map'), {
// center: latlng,
// zoom: 12
// });
// $scope.markers = [];
// var infoWindow = new google.maps.InfoWindow();
// var createMarker = function (info){
// var marker = new google.maps.Marker({
// map: $scope.map,
// position: new google.maps.LatLng(info.lat, info.long),
// title: info.city
// });
// marker.content = '<div class="infoWindowContent">' + info.desc + '</div>';
// google.maps.event.addListener(marker, 'click', function(){
// infoWindow.setContent('<h2>' + marker.title + '</h2>' + marker.content);
// infoWindow.open($scope.map, marker);
// });
// $scope.markers.push(marker);
// }
// for (i = 0; i < cities.length; i++){
// createMarker(cities[i]);
// }
// $scope.openInfoWindow = function(e, selectedMarker){
// e.preventDefault();
// google.maps.event.trigger(selectedMarker, 'click');
// }
// $scope.map = { center: { latitude: 45, longitude: -73 }, zoom: 8 };
// $scope.username = "some name";
// $scope.creator = "some name";
$scope.Delete_spot = function(spot){
user_factory.Delete_spot(spot, function(data){
location.reload();
})
}
$scope.showSomething = function(input1,input2) {
return input1 == input2 ? 'Cancel' : '';
};
$scope.scrollTo = function(id)
{
$location.hash(id);
$anchorScroll();
}
$scope.dash_user = {};
user_factory.checkSesh(data => {
if (!data)
{
$location.url('/');
}
else
{
$scope.dash_user = data;
}
return data;
})
var pwordRegex =
/(?=^.{8,}$)(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*()_+}{:;'?/><.;,])(?!.*\s).*$/; //regex to test password against
var emailRegex = /^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$///regex to test email against
var house_numberRegex= /^\d+[a-zA-Z]*$/
var driver_licenseRegex = /^[A-Z]{1}\d{7}$/
$scope.users = 'm';
$scope.register_user = function()
{
$scope.error = {message: 'All fields are required'};
if($scope.user.f_name.length < 2)
{
$scope.error = {first: 'Invalid first name'};
}
else if($scope.user.l_name.length < 2)
{
$scope.error = {last: 'Invalid last name'};
}
else if (!$scope.user.email.match(emailRegex)) { //if the email entered does not match regex...
$scope.error = {email: 'Invalid email'};
}
else if (!$scope.user.password.match(pwordRegex)) { //if the password entered does not match regex...
$scope.error = {password: 'Password does not meet minimum requirements:Must be at least 8 characters in length and include at least 1 lowercase and 1 uppercase letter, 1 number, and 1 special character' }
}
else if (!$scope.user.password.match($scope.user.confirm_password)) { //if the password entered does not match regex...
$scope.error = {confirm_password: 'Password and confirm password must match' }
}
else
{
$scope.error = {};
user_factory.register_user($scope.user,setUsers);
$scope.user = {};
$location.url('/dashboard');
}
}
$scope.index_user = function(){
user_factory.index_user(function(data){
$scope.users = 'lll';
})
}
function setUsers(data)
{
if(data.already){
$scope.already = data.already
error = data.already
}
logged_in_user = data;
}
$scope.log_get_error = function()
{
var error = user_factory.log_get_error();
return error.already;
}
$scope.log_get_user = function()
{
var user = user_factory.log_get_user();
return user.f_name;
}
$scope.login = () => { //when the user hits the login button...
$scope.logErrors = []; //clear out all previous login errors
user_factory.login($scope.loginUser, data => { //run the userFactory.login method and pass the entered user information and a callback function
if (data.errors) { //if the returned data has an errors key...
for (let key in data.errors) { //for every key in the data.errors...
$scope.logErrors.push(data.errors[key].message); //push these errors to the logErrors array
}
$scope.loginUser = {}; //clear the login input fields
// second.focus(); //put the user's cursor back on the first input in login
} else if (data.errorsFront) { //if the returned data has the errorsFront key (custom)...
$scope.logErrors = data.errorsFront; //set logErrors to equal the returned errors...
// second.focus(); //put the user's cursor back on the first input in login
} else { //if no errors are returned...
$location.url('/dashboard'); //send the user to the dashboard with their respective user id
} //if/else
}); //userFactory.login
}; //$scope.login
$scope.add_spot = function()
{
if($scope.newSpot.contact.length == 10)
{
$scope.error = {};
var user = user_factory.log_get_user();
user_factory.add_spot($scope.newSpot,user,setSpots);
$scope.newSpot = {};
$location.url('/spots');
}
else{
$scope.error = {message: 'Invalid phone number'};
}
}
$scope.geocode = function()
{
if($scope.newSpot.contact.length < 10) | else if($scope.newSpot.street.length < 5) {
$scope.error = {street: 'Invalid street'};
} else if (!$scope.newSpot.house_number.match(house_numberRegex)) { //if the house number entered does not match regex...
$scope.error = {house_number: 'Invalid house_number'};
} else if (!$scope.newSpot.license.match(driver_licenseRegex)) { //if the house number entered does not match regex...
$scope.error = {driver_license: 'Invalid driver license number'};
} else {
var location = $scope.newSpot.street + " "+ "San Francisco" + " "+ "California" + " "+ "United States" + " "+ $scope.newSpot.zip_code
var user = user_factory.log_get_user();
user_factory.geocode($scope.newSpot, location, user, function(data) {
$scope.address = 'Your address'+ ' " ' + data + ' " '
$location.url('/spots');
})
}
}
//show all the spots
$scope.index_spot = function() {
user_factory.index_spot(function(data) {
$scope.spots = data;
$scope.spot = {};
})
}
$scope.index_spot();
$scope.create_renter_by_id = function() {
if($scope.newRenter.contact.length > 5) {
$scope.error = {};
function toDateStr(ts) {
let dataF = new Date(); dataF.setTime(ts);
let strDataF = dataF.toLocaleString();
return strDataF;
}
var firstdate = toDateStr($scope.newRenter.arriving_on)
var | {
$scope.error = {contact: 'Invalid phone number'};
} | conditional_block |
user_controller.js | .9400
// // },
// // {
// // city : 'Chicago',
// // desc : 'This is the second best city in the world!',
// // lat : 41.8819,
// // long : -87.6278
// // },
// // {
// // city : 'Los Angeles',
// // desc : 'This city is live!',
// // lat : 34.0500,
// // long : -118.2500
// // },
// // {
// // city : 'Las Vegas',
// // desc : 'Sin City...\'nuff said!',
// // lat : 36.0800,
// // long : -115.1522
// // }
// // ];
// var latlng = new google.maps.LatLng(39.305, -76.617);
// // map = new google.maps.Map(document.getElementById('map'), {
// // center: latlng,
// // zoom: 12
// // });
// //Angular App Module and Controller
// // var mapOptions = {
// // zoom: 4,
// // center: latlng,
// // mapTypeId: google.maps.MapTypeId.TERRAIN
// // }
// $scope.map = new google.maps.Map(document.getElementById('map'), {
// center: latlng,
// zoom: 12
// });
// $scope.markers = [];
// var infoWindow = new google.maps.InfoWindow();
// var createMarker = function (info){
// var marker = new google.maps.Marker({
// map: $scope.map,
// position: new google.maps.LatLng(info.lat, info.long),
// title: info.city
// });
// marker.content = '<div class="infoWindowContent">' + info.desc + '</div>';
// google.maps.event.addListener(marker, 'click', function(){
// infoWindow.setContent('<h2>' + marker.title + '</h2>' + marker.content);
// infoWindow.open($scope.map, marker);
// });
// $scope.markers.push(marker);
// }
// for (i = 0; i < cities.length; i++){
// createMarker(cities[i]);
// }
// $scope.openInfoWindow = function(e, selectedMarker){
// e.preventDefault();
// google.maps.event.trigger(selectedMarker, 'click');
// }
// $scope.map = { center: { latitude: 45, longitude: -73 }, zoom: 8 };
// $scope.username = "some name";
// $scope.creator = "some name";
$scope.Delete_spot = function(spot){
user_factory.Delete_spot(spot, function(data){
location.reload();
})
}
$scope.showSomething = function(input1,input2) {
return input1 == input2 ? 'Cancel' : '';
};
$scope.scrollTo = function(id)
{
$location.hash(id);
$anchorScroll();
}
$scope.dash_user = {};
user_factory.checkSesh(data => {
if (!data)
{
$location.url('/');
}
else
{
$scope.dash_user = data;
}
return data;
})
var pwordRegex =
/(?=^.{8,}$)(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*()_+}{:;'?/><.;,])(?!.*\s).*$/; //regex to test password against
var emailRegex = /^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$///regex to test email against
var house_numberRegex= /^\d+[a-zA-Z]*$/
var driver_licenseRegex = /^[A-Z]{1}\d{7}$/
$scope.users = 'm';
$scope.register_user = function()
{
$scope.error = {message: 'All fields are required'};
if($scope.user.f_name.length < 2)
{
$scope.error = {first: 'Invalid first name'};
}
else if($scope.user.l_name.length < 2)
{
$scope.error = {last: 'Invalid last name'};
}
else if (!$scope.user.email.match(emailRegex)) { //if the email entered does not match regex...
$scope.error = {email: 'Invalid email'};
}
else if (!$scope.user.password.match(pwordRegex)) { //if the password entered does not match regex...
$scope.error = {password: 'Password does not meet minimum requirements:Must be at least 8 characters in length and include at least 1 lowercase and 1 uppercase letter, 1 number, and 1 special character' }
}
else if (!$scope.user.password.match($scope.user.confirm_password)) { //if the password entered does not match regex...
$scope.error = {confirm_password: 'Password and confirm password must match' }
}
else
{
$scope.error = {};
user_factory.register_user($scope.user,setUsers);
$scope.user = {};
$location.url('/dashboard');
}
}
$scope.index_user = function(){
user_factory.index_user(function(data){
$scope.users = 'lll';
})
}
function setUsers(data)
{
if(data.already){
$scope.already = data.already
error = data.already
}
logged_in_user = data;
}
$scope.log_get_error = function()
{
var error = user_factory.log_get_error();
return error.already;
}
$scope.log_get_user = function()
{
var user = user_factory.log_get_user();
return user.f_name;
}
$scope.login = () => { //when the user hits the login button...
$scope.logErrors = []; //clear out all previous login errors
user_factory.login($scope.loginUser, data => { //run the userFactory.login method and pass the entered user information and a callback function
if (data.errors) { //if the returned data has an errors key...
for (let key in data.errors) { //for every key in the data.errors...
$scope.logErrors.push(data.errors[key].message); //push these errors to the logErrors array
}
$scope.loginUser = {}; //clear the login input fields
// second.focus(); //put the user's cursor back on the first input in login
} else if (data.errorsFront) { //if the returned data has the errorsFront key (custom)...
$scope.logErrors = data.errorsFront; //set logErrors to equal the returned errors...
// second.focus(); //put the user's cursor back on the first input in login
} else { //if no errors are returned...
$location.url('/dashboard'); //send the user to the dashboard with their respective user id
} //if/else
}); //userFactory.login
}; //$scope.login
$scope.add_spot = function()
{
if($scope.newSpot.contact.length == 10)
{
$scope.error = {};
var user = user_factory.log_get_user();
user_factory.add_spot($scope.newSpot,user,setSpots);
$scope.newSpot = {};
$location.url('/spots');
}
else{
$scope.error = {message: 'Invalid phone number'};
}
}
$scope.geocode = function()
{
if($scope.newSpot.contact.length < 10) {
$scope.error = {contact: 'Invalid phone number'};
} else if($scope.newSpot.street.length < 5) {
$scope.error = {street: 'Invalid street'};
} else if (!$scope.newSpot.house_number.match(house_numberRegex)) { //if the house number entered does not match regex...
$scope.error = {house_number: 'Invalid house_number'};
} else if (!$scope.newSpot.license.match(driver_licenseRegex)) { //if the house number entered does not match regex...
$scope.error = {driver_license: 'Invalid driver license number'};
} else {
var location = $scope.newSpot.street + " "+ "San Francisco" + " "+ "California" + " "+ "United States" + " "+ $scope.newSpot.zip_code
var user = user_factory.log_get_user();
user_factory.geocode($scope.newSpot, location, user, function(data) {
$scope.address = 'Your address'+ ' " ' + data + ' " '
$location.url('/spots');
})
}
}
//show all the spots
$scope.index_spot = function() {
user_factory.index_spot(function(data) {
$scope.spots = data;
$scope.spot = {};
})
}
$scope.index_spot();
$scope.create_renter_by_id = function() {
if($scope.newRenter.contact.length > 5) {
$scope.error = {};
function | (ts) {
let dataF = new Date(); dataF.setTime(ts);
let strDataF = dataF.toLocaleString();
return strDataF;
}
var firstdate = toDateStr($scope.newRenter.arriving_on)
var | toDateStr | identifier_name |
user_controller.js | 9400
// // },
// // {
// // city : 'Chicago',
// // desc : 'This is the second best city in the world!',
// // lat : 41.8819,
// // long : -87.6278
// // },
// // {
// // city : 'Los Angeles',
// // desc : 'This city is live!',
// // lat : 34.0500,
// // long : -118.2500
// // },
// // {
// // city : 'Las Vegas',
// // desc : 'Sin City...\'nuff said!',
// // lat : 36.0800,
// // long : -115.1522
// // }
// // ];
// var latlng = new google.maps.LatLng(39.305, -76.617);
// // map = new google.maps.Map(document.getElementById('map'), {
// // center: latlng,
// // zoom: 12
// // });
// //Angular App Module and Controller
// // var mapOptions = {
// // zoom: 4,
// // center: latlng,
// // mapTypeId: google.maps.MapTypeId.TERRAIN
// // }
// $scope.map = new google.maps.Map(document.getElementById('map'), {
// center: latlng,
// zoom: 12
// });
// $scope.markers = [];
// var infoWindow = new google.maps.InfoWindow();
// var createMarker = function (info){
// var marker = new google.maps.Marker({
// map: $scope.map,
// position: new google.maps.LatLng(info.lat, info.long),
// title: info.city
// });
// marker.content = '<div class="infoWindowContent">' + info.desc + '</div>';
// google.maps.event.addListener(marker, 'click', function(){
// infoWindow.setContent('<h2>' + marker.title + '</h2>' + marker.content);
// infoWindow.open($scope.map, marker);
// });
// $scope.markers.push(marker);
// }
// for (i = 0; i < cities.length; i++){
// createMarker(cities[i]);
// }
// $scope.openInfoWindow = function(e, selectedMarker){
// e.preventDefault();
// google.maps.event.trigger(selectedMarker, 'click');
// }
// $scope.map = { center: { latitude: 45, longitude: -73 }, zoom: 8 };
// $scope.username = "some name";
// $scope.creator = "some name";
$scope.Delete_spot = function(spot){
user_factory.Delete_spot(spot, function(data){
location.reload();
})
}
$scope.showSomething = function(input1,input2) {
return input1 == input2 ? 'Cancel' : '';
};
$scope.scrollTo = function(id)
{
$location.hash(id);
$anchorScroll();
}
$scope.dash_user = {};
user_factory.checkSesh(data => {
if (!data)
{
$location.url('/');
}
else
{
$scope.dash_user = data;
}
return data;
})
var pwordRegex =
/(?=^.{8,}$)(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*()_+}{:;'?/><.;,])(?!.*\s).*$/; //regex to test password against
var emailRegex = /^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$///regex to test email against
var house_numberRegex= /^\d+[a-zA-Z]*$/
var driver_licenseRegex = /^[A-Z]{1}\d{7}$/
$scope.users = 'm';
$scope.register_user = function()
{
$scope.error = {message: 'All fields are required'};
if($scope.user.f_name.length < 2)
{
$scope.error = {first: 'Invalid first name'};
}
else if($scope.user.l_name.length < 2)
{
$scope.error = {last: 'Invalid last name'};
}
else if (!$scope.user.email.match(emailRegex)) { //if the email entered does not match regex...
$scope.error = {email: 'Invalid email'};
}
else if (!$scope.user.password.match(pwordRegex)) { //if the password entered does not match regex...
$scope.error = {password: 'Password does not meet minimum requirements:Must be at least 8 characters in length and include at least 1 lowercase and 1 uppercase letter, 1 number, and 1 special character' }
}
else if (!$scope.user.password.match($scope.user.confirm_password)) { //if the password entered does not match regex...
$scope.error = {confirm_password: 'Password and confirm password must match' }
}
else
{
$scope.error = {};
user_factory.register_user($scope.user,setUsers);
$scope.user = {};
$location.url('/dashboard');
}
}
$scope.index_user = function(){
user_factory.index_user(function(data){
$scope.users = 'lll';
})
}
function setUsers(data)
{
if(data.already){
$scope.already = data.already
error = data.already
}
logged_in_user = data;
}
$scope.log_get_error = function()
{
var error = user_factory.log_get_error();
return error.already;
}
$scope.log_get_user = function()
{
var user = user_factory.log_get_user();
return user.f_name;
}
$scope.login = () => { //when the user hits the login button...
$scope.logErrors = []; //clear out all previous login errors
user_factory.login($scope.loginUser, data => { //run the userFactory.login method and pass the entered user information and a callback function
if (data.errors) { //if the returned data has an errors key...
for (let key in data.errors) { //for every key in the data.errors...
$scope.logErrors.push(data.errors[key].message); //push these errors to the logErrors array
}
$scope.loginUser = {}; //clear the login input fields
// second.focus(); //put the user's cursor back on the first input in login
} else if (data.errorsFront) { //if the returned data has the errorsFront key (custom)...
$scope.logErrors = data.errorsFront; //set logErrors to equal the returned errors...
// second.focus(); //put the user's cursor back on the first input in login
} else { //if no errors are returned...
$location.url('/dashboard'); //send the user to the dashboard with their respective user id
} //if/else
}); //userFactory.login
}; //$scope.login
$scope.add_spot = function()
{
if($scope.newSpot.contact.length == 10)
{
$scope.error = {};
var user = user_factory.log_get_user();
user_factory.add_spot($scope.newSpot,user,setSpots);
$scope.newSpot = {};
$location.url('/spots');
}
else{
$scope.error = {message: 'Invalid phone number'};
}
}
$scope.geocode = function()
{
if($scope.newSpot.contact.length < 10) {
$scope.error = {contact: 'Invalid phone number'};
} else if($scope.newSpot.street.length < 5) {
$scope.error = {street: 'Invalid street'};
} else if (!$scope.newSpot.house_number.match(house_numberRegex)) { //if the house number entered does not match regex...
$scope.error = {house_number: 'Invalid house_number'};
} else if (!$scope.newSpot.license.match(driver_licenseRegex)) { //if the house number entered does not match regex...
$scope.error = {driver_license: 'Invalid driver license number'};
} else {
var location = $scope.newSpot.street + " "+ "San Francisco" + " "+ "California" + " "+ "United States" + " "+ $scope.newSpot.zip_code
var user = user_factory.log_get_user();
user_factory.geocode($scope.newSpot, location, user, function(data) {
$scope.address = 'Your address'+ ' " ' + data + ' " '
$location.url('/spots');
})
}
}
//show all the spots
$scope.index_spot = function() {
user_factory.index_spot(function(data) {
$scope.spots = data;
$scope.spot = {};
})
}
$scope.index_spot();
$scope.create_renter_by_id = function() {
if($scope.newRenter.contact.length > 5) {
$scope.error = {};
function toDateStr(ts) |
var firstdate = toDateStr($scope.newRenter.arriving_on)
var | {
let dataF = new Date(); dataF.setTime(ts);
let strDataF = dataF.toLocaleString();
return strDataF;
} | identifier_body |
user_controller.js | .9400
// // },
// // {
// // city : 'Chicago',
// // desc : 'This is the second best city in the world!',
// // lat : 41.8819,
// // long : -87.6278
// // },
// // {
// // city : 'Los Angeles',
// // desc : 'This city is live!',
// // lat : 34.0500,
// // long : -118.2500
// // },
// // {
// // city : 'Las Vegas',
// // desc : 'Sin City...\'nuff said!',
// // lat : 36.0800,
// // long : -115.1522
// // }
// // ];
// var latlng = new google.maps.LatLng(39.305, -76.617);
// // map = new google.maps.Map(document.getElementById('map'), {
// // center: latlng,
// // zoom: 12
// // });
// //Angular App Module and Controller
// // var mapOptions = {
// // zoom: 4,
// // center: latlng,
// // mapTypeId: google.maps.MapTypeId.TERRAIN
// // }
// $scope.map = new google.maps.Map(document.getElementById('map'), {
// center: latlng,
// zoom: 12
// });
// $scope.markers = [];
// var infoWindow = new google.maps.InfoWindow();
// var createMarker = function (info){
// var marker = new google.maps.Marker({
// map: $scope.map,
// position: new google.maps.LatLng(info.lat, info.long),
// title: info.city
// });
// marker.content = '<div class="infoWindowContent">' + info.desc + '</div>';
// google.maps.event.addListener(marker, 'click', function(){
// infoWindow.setContent('<h2>' + marker.title + '</h2>' + marker.content);
// infoWindow.open($scope.map, marker);
// });
// $scope.markers.push(marker);
// }
// for (i = 0; i < cities.length; i++){
// createMarker(cities[i]);
// }
// $scope.openInfoWindow = function(e, selectedMarker){
// e.preventDefault();
// google.maps.event.trigger(selectedMarker, 'click');
// }
// $scope.map = { center: { latitude: 45, longitude: -73 }, zoom: 8 };
// $scope.username = "some name";
// $scope.creator = "some name";
$scope.Delete_spot = function(spot){
user_factory.Delete_spot(spot, function(data){
location.reload();
})
}
$scope.showSomething = function(input1,input2) {
return input1 == input2 ? 'Cancel' : '';
};
$scope.scrollTo = function(id)
{
$location.hash(id);
$anchorScroll();
}
$scope.dash_user = {};
user_factory.checkSesh(data => {
if (!data)
{
$location.url('/');
}
else
{
$scope.dash_user = data;
}
return data;
})
var pwordRegex =
/(?=^.{8,}$)(?=.*\d)(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%^&*()_+}{:;'?/><.;,])(?!.*\s).*$/; //regex to test password against
var emailRegex = /^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$///regex to test email against
var house_numberRegex= /^\d+[a-zA-Z]*$/
var driver_licenseRegex = /^[A-Z]{1}\d{7}$/
$scope.users = 'm';
$scope.register_user = function()
{
$scope.error = {message: 'All fields are required'};
if($scope.user.f_name.length < 2)
{
$scope.error = {first: 'Invalid first name'};
}
else if($scope.user.l_name.length < 2)
{
$scope.error = {last: 'Invalid last name'};
}
else if (!$scope.user.email.match(emailRegex)) { //if the email entered does not match regex...
$scope.error = {email: 'Invalid email'};
}
else if (!$scope.user.password.match(pwordRegex)) { //if the password entered does not match regex...
$scope.error = {password: 'Password does not meet minimum requirements:Must be at least 8 characters in length and include at least 1 lowercase and 1 uppercase letter, 1 number, and 1 special character' }
}
else if (!$scope.user.password.match($scope.user.confirm_password)) { //if the password entered does not match regex...
$scope.error = {confirm_password: 'Password and confirm password must match' }
}
else
{
$scope.error = {};
user_factory.register_user($scope.user,setUsers);
$scope.user = {};
$location.url('/dashboard');
}
}
$scope.index_user = function(){
user_factory.index_user(function(data){
$scope.users = 'lll';
})
}
function setUsers(data)
{
if(data.already){
$scope.already = data.already
error = data.already
}
logged_in_user = data;
}
$scope.log_get_error = function()
{
var error = user_factory.log_get_error();
return error.already;
}
$scope.log_get_user = function()
{
var user = user_factory.log_get_user();
return user.f_name;
}
$scope.login = () => { //when the user hits the login button...
$scope.logErrors = []; //clear out all previous login errors
user_factory.login($scope.loginUser, data => { //run the userFactory.login method and pass the entered user information and a callback function
if (data.errors) { //if the returned data has an errors key...
for (let key in data.errors) { //for every key in the data.errors...
$scope.logErrors.push(data.errors[key].message); //push these errors to the logErrors array
}
$scope.loginUser = {}; //clear the login input fields
// second.focus(); //put the user's cursor back on the first input in login
} else if (data.errorsFront) { //if the returned data has the errorsFront key (custom)...
$scope.logErrors = data.errorsFront; //set logErrors to equal the returned errors...
// second.focus(); //put the user's cursor back on the first input in login
} else { //if no errors are returned...
$location.url('/dashboard'); //send the user to the dashboard with their respective user id
} //if/else
}); //userFactory.login
}; //$scope.login
$scope.add_spot = function()
{
if($scope.newSpot.contact.length == 10)
{
$scope.error = {};
var user = user_factory.log_get_user();
user_factory.add_spot($scope.newSpot,user,setSpots);
$scope.newSpot = {};
$location.url('/spots');
}
else{
$scope.error = {message: 'Invalid phone number'};
} | }
$scope.geocode = function()
{
if($scope.newSpot.contact.length < 10) {
$scope.error = {contact: 'Invalid phone number'};
} else if($scope.newSpot.street.length < 5) {
$scope.error = {street: 'Invalid street'};
} else if (!$scope.newSpot.house_number.match(house_numberRegex)) { //if the house number entered does not match regex...
$scope.error = {house_number: 'Invalid house_number'};
} else if (!$scope.newSpot.license.match(driver_licenseRegex)) { //if the house number entered does not match regex...
$scope.error = {driver_license: 'Invalid driver license number'};
} else {
var location = $scope.newSpot.street + " "+ "San Francisco" + " "+ "California" + " "+ "United States" + " "+ $scope.newSpot.zip_code
var user = user_factory.log_get_user();
user_factory.geocode($scope.newSpot, location, user, function(data) {
$scope.address = 'Your address'+ ' " ' + data + ' " '
$location.url('/spots');
})
}
}
//show all the spots
$scope.index_spot = function() {
user_factory.index_spot(function(data) {
$scope.spots = data;
$scope.spot = {};
})
}
$scope.index_spot();
$scope.create_renter_by_id = function() {
if($scope.newRenter.contact.length > 5) {
$scope.error = {};
function toDateStr(ts) {
let dataF = new Date(); dataF.setTime(ts);
let strDataF = dataF.toLocaleString();
return strDataF;
}
var firstdate = toDateStr($scope.newRenter.arriving_on)
var se | random_line_split |
|
rip_show_statistics_bd.pb.go | }
return ""
}
type RipShowStatisticsBd struct {
ReceivedPackets uint32 `protobuf:"varint,50,opt,name=received_packets,json=receivedPackets,proto3" json:"received_packets,omitempty"`
DiscardedPackets uint32 `protobuf:"varint,51,opt,name=discarded_packets,json=discardedPackets,proto3" json:"discarded_packets,omitempty"`
DiscardedRoutes uint32 `protobuf:"varint,52,opt,name=discarded_routes,json=discardedRoutes,proto3" json:"discarded_routes,omitempty"`
StandbyPacketsReceived uint32 `protobuf:"varint,53,opt,name=standby_packets_received,json=standbyPacketsReceived,proto3" json:"standby_packets_received,omitempty"`
SentMessages uint32 `protobuf:"varint,54,opt,name=sent_messages,json=sentMessages,proto3" json:"sent_messages,omitempty"`
SentMessageFailures uint32 `protobuf:"varint,55,opt,name=sent_message_failures,json=sentMessageFailures,proto3" json:"sent_message_failures,omitempty"`
QueryResponses uint32 `protobuf:"varint,56,opt,name=query_responses,json=queryResponses,proto3" json:"query_responses,omitempty"`
PeriodicUpdates uint32 `protobuf:"varint,57,opt,name=periodic_updates,json=periodicUpdates,proto3" json:"periodic_updates,omitempty"`
RouteCount uint32 `protobuf:"varint,58,opt,name=route_count,json=routeCount,proto3" json:"route_count,omitempty"`
PathCount uint32 `protobuf:"varint,59,opt,name=path_count,json=pathCount,proto3" json:"path_count,omitempty"`
RouteMallocFailures uint32 `protobuf:"varint,60,opt,name=route_malloc_failures,json=routeMallocFailures,proto3" json:"route_malloc_failures,omitempty"`
PathMallocFailures uint32 `protobuf:"varint,61,opt,name=path_malloc_failures,json=pathMallocFailures,proto3" json:"path_malloc_failures,omitempty"`
RibUpdates uint32 `protobuf:"varint,62,opt,name=rib_updates,json=ribUpdates,proto3" json:"rib_updates,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RipShowStatisticsBd) Reset() |
func (m *RipShowStatisticsBd) String() string { return proto.CompactTextString(m) }
func (*RipShowStatisticsBd) ProtoMessage() {}
func (*RipShowStatisticsBd) Descriptor() ([]byte, []int) {
return fileDescriptor_66227cbf5e51e264, []int{1}
}
func (m *RipShowStatisticsBd) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RipShowStatisticsBd.Unmarshal(m, b)
}
func (m *RipShowStatisticsBd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RipShowStatisticsBd.Marshal(b, m, deterministic)
}
func (m *RipShowStatisticsBd) XXX_Merge(src proto.Message) {
xxx_messageInfo_RipShowStatisticsBd.Merge(m, src)
}
func (m *RipShowStatisticsBd) XXX_Size() int {
return xxx_messageInfo_RipShowStatisticsBd.Size(m)
}
func (m *RipShowStatisticsBd) XXX_DiscardUnknown() {
xxx_messageInfo_RipShowStatisticsBd.DiscardUnknown(m)
}
var xxx_messageInfo_RipShowStatisticsBd proto.InternalMessageInfo
func (m *RipShowStatisticsBd) GetReceivedPackets() uint32 {
if m != nil {
return m.ReceivedPackets
}
return 0
}
func (m *RipShowStatisticsBd) GetDiscardedPackets() uint32 {
if m != nil {
return m.DiscardedPackets
}
return 0
}
func (m *RipShowStatisticsBd) GetDiscardedRoutes() uint32 {
if m != nil {
return m.DiscardedRoutes
}
return 0
}
func (m *RipShowStatisticsBd) GetStandbyPacketsReceived() uint32 {
if m != nil {
return m.StandbyPacketsReceived
}
return 0
}
func (m *RipShowStatisticsBd) GetSentMessages() uint32 {
if m != nil {
return m.SentMessages
}
return 0
}
func (m *RipShowStatisticsBd) GetSentMessageFailures() uint32 {
if m != nil {
return m.SentMessageFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetQueryResponses() uint32 {
if m != nil {
return m.QueryResponses
}
return 0
}
func (m *RipShowStatisticsBd) GetPeriodicUpdates() uint32 {
if m != nil {
return m.PeriodicUpdates
}
return 0
}
func (m *RipShowStatisticsBd) GetRouteCount() uint32 {
if m != nil {
return m.RouteCount
}
return 0
}
func (m *RipShowStatisticsBd) GetPathCount() uint32 {
if m != nil {
return m.PathCount
}
return 0
}
func (m *RipShowStatisticsBd) GetRouteMallocFailures() uint32 {
if m != nil {
return m.RouteMallocFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetPathMallocFailures() uint32 {
if m != nil {
return m.PathMallocFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetRibUpdates() uint32 {
if m != nil {
return m.RibUpdates
}
return 0
}
func init() {
proto.RegisterType((*RipShowStatisticsBd_KEYS)(nil), "cisco_ios_xr_ip_rip_oper.rip.vrfs.vrf.statistics.rip_show_statistics_bd_KEYS")
proto.RegisterType((*RipShowStatisticsBd)(nil), "cisco_ios_xr_ip_rip_oper.rip.vrfs.vrf.statistics.rip_show_statistics_bd")
}
func init() { proto.RegisterFile("rip_show_statistics_bd.proto", fileDescriptor_66227cbf5e51e264) }
var fileDescriptor_66227cbf5e51e264 = []byte{
// 403 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4b, 0x6b, 0x14, 0x41,
0x10, 0xc7, 0x59, 0x10, 0x35, 0xa5, 0x31, 0xb1, 0x35, 0xa1, 0x45, 0xc5, 0x10, 0x0f, 0x26, 0x08,
0x4b, 0xd8, 0xf8, 0x58, 0x9f, 0x17, 0xd1, 0x8b, 0x44, 0x64, 0xc4, 0x83, 0xa7, 0xa2, 0xa7, 0xa7,
0xd6, 0x34, 0xee, 0x4e, 0xb7, 0x5d, 0x3d, 0xab, 0xf9, 0x88, 0x7e, 0x2b, 0xe9, 0x9a, 0xc7, 0x2e,
0x61, 0x2f, 0x73, 0xf8, 0xfd, 0x1f, 0x53, 0xff, 0x61, 0xe0, 0x41, 0x74, 0x01, 0xf9, 0xdc, 0xff,
0x41, 0x4e, 0x26, 0x39, 0x4e, 0xce, 0x32, 0x96, 0xd5, 0x38, 0x44, 0x9f, 0xbc, 0x3a, 0xb1, 0x8e,
0xad, 0x47, | { *m = RipShowStatisticsBd{} } | identifier_body |
rip_show_statistics_bd.pb.go | }
func (m *RipShowStatisticsBd_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_RipShowStatisticsBd_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_RipShowStatisticsBd_KEYS proto.InternalMessageInfo
func (m *RipShowStatisticsBd_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
type RipShowStatisticsBd struct {
ReceivedPackets uint32 `protobuf:"varint,50,opt,name=received_packets,json=receivedPackets,proto3" json:"received_packets,omitempty"`
DiscardedPackets uint32 `protobuf:"varint,51,opt,name=discarded_packets,json=discardedPackets,proto3" json:"discarded_packets,omitempty"`
DiscardedRoutes uint32 `protobuf:"varint,52,opt,name=discarded_routes,json=discardedRoutes,proto3" json:"discarded_routes,omitempty"`
StandbyPacketsReceived uint32 `protobuf:"varint,53,opt,name=standby_packets_received,json=standbyPacketsReceived,proto3" json:"standby_packets_received,omitempty"`
SentMessages uint32 `protobuf:"varint,54,opt,name=sent_messages,json=sentMessages,proto3" json:"sent_messages,omitempty"`
SentMessageFailures uint32 `protobuf:"varint,55,opt,name=sent_message_failures,json=sentMessageFailures,proto3" json:"sent_message_failures,omitempty"`
QueryResponses uint32 `protobuf:"varint,56,opt,name=query_responses,json=queryResponses,proto3" json:"query_responses,omitempty"`
PeriodicUpdates uint32 `protobuf:"varint,57,opt,name=periodic_updates,json=periodicUpdates,proto3" json:"periodic_updates,omitempty"`
RouteCount uint32 `protobuf:"varint,58,opt,name=route_count,json=routeCount,proto3" json:"route_count,omitempty"`
PathCount uint32 `protobuf:"varint,59,opt,name=path_count,json=pathCount,proto3" json:"path_count,omitempty"`
RouteMallocFailures uint32 `protobuf:"varint,60,opt,name=route_malloc_failures,json=routeMallocFailures,proto3" json:"route_malloc_failures,omitempty"`
PathMallocFailures uint32 `protobuf:"varint,61,opt,name=path_malloc_failures,json=pathMallocFailures,proto3" json:"path_malloc_failures,omitempty"`
RibUpdates uint32 `protobuf:"varint,62,opt,name=rib_updates,json=ribUpdates,proto3" json:"rib_updates,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RipShowStatisticsBd) Reset() { *m = RipShowStatisticsBd{} }
func (m *RipShowStatisticsBd) String() string { return proto.CompactTextString(m) }
func (*RipShowStatisticsBd) ProtoMessage() {}
func (*RipShowStatisticsBd) Descriptor() ([]byte, []int) {
return fileDescriptor_66227cbf5e51e264, []int{1}
}
func (m *RipShowStatisticsBd) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RipShowStatisticsBd.Unmarshal(m, b)
}
func (m *RipShowStatisticsBd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RipShowStatisticsBd.Marshal(b, m, deterministic)
}
func (m *RipShowStatisticsBd) XXX_Merge(src proto.Message) {
xxx_messageInfo_RipShowStatisticsBd.Merge(m, src)
}
func (m *RipShowStatisticsBd) XXX_Size() int {
return xxx_messageInfo_RipShowStatisticsBd.Size(m)
}
func (m *RipShowStatisticsBd) XXX_DiscardUnknown() {
xxx_messageInfo_RipShowStatisticsBd.DiscardUnknown(m)
}
var xxx_messageInfo_RipShowStatisticsBd proto.InternalMessageInfo
func (m *RipShowStatisticsBd) GetReceivedPackets() uint32 {
if m != nil {
return m.ReceivedPackets
}
return 0
}
func (m *RipShowStatisticsBd) GetDiscardedPackets() uint32 {
if m != nil {
return m.DiscardedPackets
}
return 0
}
func (m *RipShowStatisticsBd) GetDiscardedRoutes() uint32 {
if m != nil {
return m.DiscardedRoutes
}
return 0
}
func (m *RipShowStatisticsBd) GetStandbyPacketsReceived() uint32 {
if m != nil {
return m.StandbyPacketsReceived
}
return 0
}
func (m *RipShowStatisticsBd) GetSentMessages() uint32 {
if m != nil {
return m.SentMessages
}
return 0
}
func (m *RipShowStatisticsBd) GetSentMessageFailures() uint32 {
if m != nil {
return m.SentMessageFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetQueryResponses() uint32 {
if m != nil {
return m.QueryResponses
}
return 0
}
func (m *RipShowStatisticsBd) GetPeriodicUpdates() uint32 {
if m != nil {
return m.PeriodicUpdates
}
return 0
}
func (m *RipShowStatisticsBd) GetRouteCount() uint32 {
if m != nil {
return m.RouteCount
}
return 0
}
func (m *RipShowStatisticsBd) GetPathCount() uint32 {
if m != nil {
return m.PathCount
}
return 0
}
func (m *RipShowStatisticsBd) GetRouteMallocFailures() uint32 {
if m != nil {
return m.RouteMallocFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetPathMallocFailures() uint32 {
if m != nil {
return m.PathMallocFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetRibUpdates() uint32 {
if m != nil {
return m.RibUpdates
}
return 0
}
func init() {
proto.RegisterType((*RipShowStatisticsBd_KEYS)(nil), "cisco_ios_xr_ip_rip_oper.rip.vrfs.vrf.statistics.rip_show_statistics_bd_KEYS")
proto.RegisterType((*RipShowStatisticsBd)(nil), "cisco_ios_xr_ip_rip_oper.rip.vrfs.vrf.statistics.rip_show_statistics_bd")
}
func init() { proto.RegisterFile("rip_show_statistics_bd.proto", fileDescriptor_66227cbf5e51e264) }
var fileDescriptor_66227cbf5e51e264 = []byte{
// 403 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4b, 0x6b, 0x14, 0x41,
0x10, 0xc7, 0x59, 0x10, 0x35, 0xa5, 0x31, 0xb1, 0x35, 0xa1, 0x45, 0xc5, 0x10, 0x0f, 0x26, 0x08,
0x4b, 0xd8, 0xf8, 0x58, 0x9f, 0x17, 0xd1, 0x8b, 0x44, 0x64, 0xc4, 0x83, 0xa7, 0xa2, 0xa7, 0xa7,
0xd6, 0x34, 0xee, 0x4e, 0xb7, 0x5d, 0x3d, 0xab, 0xf9, 0x88, 0x7e, 0x2b, 0xe9, 0x9a, 0xc7, 0x2e,
0x61, 0x2f, 0x73, 0xf8, 0xfd, 0x1f, 0x53, 0xff, 0x61, 0xe0, 0x41, 0x74, 0x01, | func (m *RipShowStatisticsBd_KEYS) XXX_Size() int {
return xxx_messageInfo_RipShowStatisticsBd_KEYS.Size(m) | random_line_split |
|
rip_show_statistics_bd.pb.go | }
return ""
}
type RipShowStatisticsBd struct {
ReceivedPackets uint32 `protobuf:"varint,50,opt,name=received_packets,json=receivedPackets,proto3" json:"received_packets,omitempty"`
DiscardedPackets uint32 `protobuf:"varint,51,opt,name=discarded_packets,json=discardedPackets,proto3" json:"discarded_packets,omitempty"`
DiscardedRoutes uint32 `protobuf:"varint,52,opt,name=discarded_routes,json=discardedRoutes,proto3" json:"discarded_routes,omitempty"`
StandbyPacketsReceived uint32 `protobuf:"varint,53,opt,name=standby_packets_received,json=standbyPacketsReceived,proto3" json:"standby_packets_received,omitempty"`
SentMessages uint32 `protobuf:"varint,54,opt,name=sent_messages,json=sentMessages,proto3" json:"sent_messages,omitempty"`
SentMessageFailures uint32 `protobuf:"varint,55,opt,name=sent_message_failures,json=sentMessageFailures,proto3" json:"sent_message_failures,omitempty"`
QueryResponses uint32 `protobuf:"varint,56,opt,name=query_responses,json=queryResponses,proto3" json:"query_responses,omitempty"`
PeriodicUpdates uint32 `protobuf:"varint,57,opt,name=periodic_updates,json=periodicUpdates,proto3" json:"periodic_updates,omitempty"`
RouteCount uint32 `protobuf:"varint,58,opt,name=route_count,json=routeCount,proto3" json:"route_count,omitempty"`
PathCount uint32 `protobuf:"varint,59,opt,name=path_count,json=pathCount,proto3" json:"path_count,omitempty"`
RouteMallocFailures uint32 `protobuf:"varint,60,opt,name=route_malloc_failures,json=routeMallocFailures,proto3" json:"route_malloc_failures,omitempty"`
PathMallocFailures uint32 `protobuf:"varint,61,opt,name=path_malloc_failures,json=pathMallocFailures,proto3" json:"path_malloc_failures,omitempty"`
RibUpdates uint32 `protobuf:"varint,62,opt,name=rib_updates,json=ribUpdates,proto3" json:"rib_updates,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RipShowStatisticsBd) Reset() { *m = RipShowStatisticsBd{} }
func (m *RipShowStatisticsBd) String() string { return proto.CompactTextString(m) }
func (*RipShowStatisticsBd) ProtoMessage() {}
func (*RipShowStatisticsBd) Descriptor() ([]byte, []int) {
return fileDescriptor_66227cbf5e51e264, []int{1}
}
func (m *RipShowStatisticsBd) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RipShowStatisticsBd.Unmarshal(m, b)
}
func (m *RipShowStatisticsBd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RipShowStatisticsBd.Marshal(b, m, deterministic)
}
func (m *RipShowStatisticsBd) XXX_Merge(src proto.Message) {
xxx_messageInfo_RipShowStatisticsBd.Merge(m, src)
}
func (m *RipShowStatisticsBd) XXX_Size() int {
return xxx_messageInfo_RipShowStatisticsBd.Size(m)
}
func (m *RipShowStatisticsBd) XXX_DiscardUnknown() {
xxx_messageInfo_RipShowStatisticsBd.DiscardUnknown(m)
}
var xxx_messageInfo_RipShowStatisticsBd proto.InternalMessageInfo
func (m *RipShowStatisticsBd) GetReceivedPackets() uint32 {
if m != nil {
return m.ReceivedPackets
}
return 0
}
func (m *RipShowStatisticsBd) GetDiscardedPackets() uint32 {
if m != nil {
return m.DiscardedPackets
}
return 0
}
func (m *RipShowStatisticsBd) GetDiscardedRoutes() uint32 {
if m != nil {
return m.DiscardedRoutes
}
return 0
}
func (m *RipShowStatisticsBd) GetStandbyPacketsReceived() uint32 {
if m != nil {
return m.StandbyPacketsReceived
}
return 0
}
func (m *RipShowStatisticsBd) GetSentMessages() uint32 {
if m != nil |
return 0
}
func (m *RipShowStatisticsBd) GetSentMessageFailures() uint32 {
if m != nil {
return m.SentMessageFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetQueryResponses() uint32 {
if m != nil {
return m.QueryResponses
}
return 0
}
func (m *RipShowStatisticsBd) GetPeriodicUpdates() uint32 {
if m != nil {
return m.PeriodicUpdates
}
return 0
}
func (m *RipShowStatisticsBd) GetRouteCount() uint32 {
if m != nil {
return m.RouteCount
}
return 0
}
func (m *RipShowStatisticsBd) GetPathCount() uint32 {
if m != nil {
return m.PathCount
}
return 0
}
func (m *RipShowStatisticsBd) GetRouteMallocFailures() uint32 {
if m != nil {
return m.RouteMallocFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetPathMallocFailures() uint32 {
if m != nil {
return m.PathMallocFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetRibUpdates() uint32 {
if m != nil {
return m.RibUpdates
}
return 0
}
func init() {
proto.RegisterType((*RipShowStatisticsBd_KEYS)(nil), "cisco_ios_xr_ip_rip_oper.rip.vrfs.vrf.statistics.rip_show_statistics_bd_KEYS")
proto.RegisterType((*RipShowStatisticsBd)(nil), "cisco_ios_xr_ip_rip_oper.rip.vrfs.vrf.statistics.rip_show_statistics_bd")
}
func init() { proto.RegisterFile("rip_show_statistics_bd.proto", fileDescriptor_66227cbf5e51e264) }
var fileDescriptor_66227cbf5e51e264 = []byte{
// 403 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4b, 0x6b, 0x14, 0x41,
0x10, 0xc7, 0x59, 0x10, 0x35, 0xa5, 0x31, 0xb1, 0x35, 0xa1, 0x45, 0xc5, 0x10, 0x0f, 0x26, 0x08,
0x4b, 0xd8, 0xf8, 0x58, 0x9f, 0x17, 0xd1, 0x8b, 0x44, 0x64, 0xc4, 0x83, 0xa7, 0xa2, 0xa7, 0xa7,
0xd6, 0x34, 0xee, 0x4e, 0xb7, 0x5d, 0x3d, 0xab, 0xf9, 0x88, 0x7e, 0x2b, 0xe9, 0x9a, 0xc7, 0x2e,
0x61, 0x2f, 0x73, 0xf8, 0xfd, 0x1f, 0x53, 0xff, 0x61, 0xe0, 0x41, 0x74, 0x01, 0xf9, 0xdc, 0xff,
0x41, 0x4e, 0x26, 0x39, 0x4e, 0xce, 0x32, 0x96, 0xd5, 0x38, 0x44, 0x9f, 0xbc, 0x3a, 0xb1, 0x8e,
0xad, 0x47, | {
return m.SentMessages
} | conditional_block |
rip_show_statistics_bd.pb.go | }
return ""
}
type RipShowStatisticsBd struct {
ReceivedPackets uint32 `protobuf:"varint,50,opt,name=received_packets,json=receivedPackets,proto3" json:"received_packets,omitempty"`
DiscardedPackets uint32 `protobuf:"varint,51,opt,name=discarded_packets,json=discardedPackets,proto3" json:"discarded_packets,omitempty"`
DiscardedRoutes uint32 `protobuf:"varint,52,opt,name=discarded_routes,json=discardedRoutes,proto3" json:"discarded_routes,omitempty"`
StandbyPacketsReceived uint32 `protobuf:"varint,53,opt,name=standby_packets_received,json=standbyPacketsReceived,proto3" json:"standby_packets_received,omitempty"`
SentMessages uint32 `protobuf:"varint,54,opt,name=sent_messages,json=sentMessages,proto3" json:"sent_messages,omitempty"`
SentMessageFailures uint32 `protobuf:"varint,55,opt,name=sent_message_failures,json=sentMessageFailures,proto3" json:"sent_message_failures,omitempty"`
QueryResponses uint32 `protobuf:"varint,56,opt,name=query_responses,json=queryResponses,proto3" json:"query_responses,omitempty"`
PeriodicUpdates uint32 `protobuf:"varint,57,opt,name=periodic_updates,json=periodicUpdates,proto3" json:"periodic_updates,omitempty"`
RouteCount uint32 `protobuf:"varint,58,opt,name=route_count,json=routeCount,proto3" json:"route_count,omitempty"`
PathCount uint32 `protobuf:"varint,59,opt,name=path_count,json=pathCount,proto3" json:"path_count,omitempty"`
RouteMallocFailures uint32 `protobuf:"varint,60,opt,name=route_malloc_failures,json=routeMallocFailures,proto3" json:"route_malloc_failures,omitempty"`
PathMallocFailures uint32 `protobuf:"varint,61,opt,name=path_malloc_failures,json=pathMallocFailures,proto3" json:"path_malloc_failures,omitempty"`
RibUpdates uint32 `protobuf:"varint,62,opt,name=rib_updates,json=ribUpdates,proto3" json:"rib_updates,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RipShowStatisticsBd) Reset() { *m = RipShowStatisticsBd{} }
func (m *RipShowStatisticsBd) String() string { return proto.CompactTextString(m) }
func (*RipShowStatisticsBd) ProtoMessage() {}
func (*RipShowStatisticsBd) | () ([]byte, []int) {
return fileDescriptor_66227cbf5e51e264, []int{1}
}
func (m *RipShowStatisticsBd) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RipShowStatisticsBd.Unmarshal(m, b)
}
func (m *RipShowStatisticsBd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RipShowStatisticsBd.Marshal(b, m, deterministic)
}
func (m *RipShowStatisticsBd) XXX_Merge(src proto.Message) {
xxx_messageInfo_RipShowStatisticsBd.Merge(m, src)
}
func (m *RipShowStatisticsBd) XXX_Size() int {
return xxx_messageInfo_RipShowStatisticsBd.Size(m)
}
func (m *RipShowStatisticsBd) XXX_DiscardUnknown() {
xxx_messageInfo_RipShowStatisticsBd.DiscardUnknown(m)
}
var xxx_messageInfo_RipShowStatisticsBd proto.InternalMessageInfo
func (m *RipShowStatisticsBd) GetReceivedPackets() uint32 {
if m != nil {
return m.ReceivedPackets
}
return 0
}
func (m *RipShowStatisticsBd) GetDiscardedPackets() uint32 {
if m != nil {
return m.DiscardedPackets
}
return 0
}
func (m *RipShowStatisticsBd) GetDiscardedRoutes() uint32 {
if m != nil {
return m.DiscardedRoutes
}
return 0
}
func (m *RipShowStatisticsBd) GetStandbyPacketsReceived() uint32 {
if m != nil {
return m.StandbyPacketsReceived
}
return 0
}
func (m *RipShowStatisticsBd) GetSentMessages() uint32 {
if m != nil {
return m.SentMessages
}
return 0
}
func (m *RipShowStatisticsBd) GetSentMessageFailures() uint32 {
if m != nil {
return m.SentMessageFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetQueryResponses() uint32 {
if m != nil {
return m.QueryResponses
}
return 0
}
func (m *RipShowStatisticsBd) GetPeriodicUpdates() uint32 {
if m != nil {
return m.PeriodicUpdates
}
return 0
}
func (m *RipShowStatisticsBd) GetRouteCount() uint32 {
if m != nil {
return m.RouteCount
}
return 0
}
func (m *RipShowStatisticsBd) GetPathCount() uint32 {
if m != nil {
return m.PathCount
}
return 0
}
func (m *RipShowStatisticsBd) GetRouteMallocFailures() uint32 {
if m != nil {
return m.RouteMallocFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetPathMallocFailures() uint32 {
if m != nil {
return m.PathMallocFailures
}
return 0
}
func (m *RipShowStatisticsBd) GetRibUpdates() uint32 {
if m != nil {
return m.RibUpdates
}
return 0
}
func init() {
proto.RegisterType((*RipShowStatisticsBd_KEYS)(nil), "cisco_ios_xr_ip_rip_oper.rip.vrfs.vrf.statistics.rip_show_statistics_bd_KEYS")
proto.RegisterType((*RipShowStatisticsBd)(nil), "cisco_ios_xr_ip_rip_oper.rip.vrfs.vrf.statistics.rip_show_statistics_bd")
}
func init() { proto.RegisterFile("rip_show_statistics_bd.proto", fileDescriptor_66227cbf5e51e264) }
var fileDescriptor_66227cbf5e51e264 = []byte{
// 403 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4b, 0x6b, 0x14, 0x41,
0x10, 0xc7, 0x59, 0x10, 0x35, 0xa5, 0x31, 0xb1, 0x35, 0xa1, 0x45, 0xc5, 0x10, 0x0f, 0x26, 0x08,
0x4b, 0xd8, 0xf8, 0x58, 0x9f, 0x17, 0xd1, 0x8b, 0x44, 0x64, 0xc4, 0x83, 0xa7, 0xa2, 0xa7, 0xa7,
0xd6, 0x34, 0xee, 0x4e, 0xb7, 0x5d, 0x3d, 0xab, 0xf9, 0x88, 0x7e, 0x2b, 0xe9, 0x9a, 0xc7, 0x2e,
0x61, 0x2f, 0x73, 0xf8, 0xfd, 0x1f, 0x53, 0xff, 0x61, 0xe0, 0x41, 0x74, 0x01, 0xf9, 0xdc, 0xff,
0x41, 0x4e, 0x26, 0x39, 0x4e, 0xce, 0x32, 0x96, 0xd5, 0x38, 0x44, 0x9f, 0xbc, 0x3a, 0xb1, 0x8e,
0xad, 0x47, | Descriptor | identifier_name |
main.rs | {
#[structopt(
help = "The pattern to search for. Shall be a regular expression passed to regex crate."
)]
pattern: String,
#[structopt(help = "Root repo to grep")]
repo: Option<PathBuf>,
#[structopt(short, long, help = "Branch name")]
branch: Option<String>,
#[structopt(
short,
long,
help = "Search from all branches. Ignores -b option if given"
)]
all: bool,
#[structopt(short, long, help = "Depth to search into git commit history")]
depth: Option<usize>,
#[structopt(
short = "o",
long,
help = "Turn off showing matches to a file only once; the default behavior is that if the same file with the same name has different versions that matches, they will not be printed."
)]
no_once_file: bool,
#[structopt(
short = "c",
long,
help = "Disable color coding for the output, default is to use colors in terminal"
)]
no_color_code: bool,
#[structopt(
short = "g",
long,
help = "Disable output grouping. Better for machine inputs"
)]
no_output_grouping: bool,
#[structopt(short, long, help = "Verbose flag")]
verbose: bool,
#[structopt(short, long, help = "Add an entry to list of extensions to search")]
extensions: Vec<String>,
#[structopt(
short,
long,
help = "Add an entry to list of directory names to ignore"
)]
ignore_dirs: Vec<String>,
}
fn main() -> Result<()> {
let settings: Settings = Opt::from_args().try_into()?;
eprintln!(
"Searching path: {:?} extensions: {:?} ignore_dirs: {:?}",
settings.repo, settings.extensions, settings.ignore_dirs
);
let _file_list = process_files_git(&settings.repo, &settings)?;
Ok(())
}
#[allow(dead_code)]
struct MatchEntry {
commit: Oid,
path: PathBuf,
start: usize,
end: usize,
}
#[derive(Debug)]
struct Settings {
pattern: Regex,
repo: PathBuf,
branch: Option<String>,
all: bool,
depth: Option<usize>,
once_file: bool,
color_code: bool,
output_grouping: bool,
verbose: bool,
extensions: HashSet<OsString>,
ignore_dirs: HashSet<OsString>,
}
// It's a bit awkward to convert from Opt to Settings, but some settings are hard to write
// conversion code inside structopt annotations.
impl TryFrom<Opt> for Settings {
type Error = anyhow::Error;
fn try_from(src: Opt) -> std::result::Result<Self, Self::Error> {
let default_exts = [
".sh", ".js", ".tcl", ".pl", ".py", ".rb", ".c", ".cpp", ".h", ".rc", ".rci", ".dlg",
".pas", ".dpr", ".cs", ".rs",
];
let default_ignore_dirs = [".hg", ".svn", ".git", ".bzr", "node_modules", "target"]; // Probably we could ignore all directories beginning with a dot.
Ok(Self {
pattern: Regex::new(&src.pattern)
.map_err(|e| anyhow!("Error in regex compilation: {:?}", e))?,
repo: canonicalize(
src.repo.unwrap_or_else(|| {
PathBuf::from(env::current_dir().unwrap().to_str().unwrap())
}),
)
.expect("Canonicalized path"),
branch: src.branch,
all: src.all,
depth: src.depth,
once_file: !src.no_once_file,
color_code: !src.no_color_code,
output_grouping: !src.no_output_grouping,
verbose: src.verbose,
extensions: if src.extensions.is_empty() {
default_exts.iter().map(|ext| ext[1..].into()).collect()
} else {
default_exts
.iter()
.map(|ext| ext[1..].into())
.chain(src.extensions.iter().map(|ext| ext[1..].into()))
.collect()
},
ignore_dirs: if src.ignore_dirs.is_empty() {
default_ignore_dirs.iter().map(|ext| ext.into()).collect()
} else {
default_ignore_dirs
.iter()
.map(|ext| ext.into())
.chain(src.ignore_dirs.iter().map(|ext| ext.into()))
.collect()
},
})
}
}
struct ProcessTree<'a> {
settings: &'a Settings,
repo: &'a Repository,
checked_paths: HashSet<PathBuf>,
checked_blobs: HashSet<Oid>,
checked_trees: HashSet<Oid>,
walked: usize,
skipped_blobs: usize,
all_matches: Vec<MatchEntry>,
}
impl<'a> ProcessTree<'a> {
fn process(&mut self, tree: &Tree, commit: &Commit, path: &Path, visited: &mut bool) {
if self.checked_trees.contains(&tree.id()) {
return;
}
self.checked_trees.insert(tree.id());
self.walked += 1;
for entry in tree {
match (|| {
let name = entry.name()?;
let entry_path = path.join(name);
// We want to match with absolute path from root, but it seems impossible with `tree.walk`.
if self.settings.once_file && self.checked_paths.contains(&entry_path) {
return None;
}
self.checked_paths.insert(entry_path.clone());
let obj = match entry.to_object(&self.repo) {
Ok(obj) => obj,
Err(e) => {
eprintln!("couldn't get_object: {:?}", e);
return None;
}
};
if obj.kind() == Some(ObjectType::Tree) {
self.process(obj.as_tree()?, commit, &entry_path, visited);
return None;
}
if entry.kind() != Some(ObjectType::Blob)
|| self.settings.ignore_dirs.contains(&OsString::from(name))
{
return None;
}
let blob = obj.peel_to_blob().ok()?;
if blob.is_binary() {
return None;
}
let ext = PathBuf::from(name).extension()?.to_owned();
if !self.settings.extensions.contains(&ext.to_ascii_lowercase()) {
return None;
}
if self.checked_blobs.contains(&blob.id()) {
self.skipped_blobs += 1;
return None;
}
self.checked_blobs.insert(blob.id());
let ret = process_file(self.settings, commit, blob.content(), &entry_path, visited);
Some(ret)
})() {
Some(matches) => {
self.all_matches.extend(matches);
}
_ => (),
}
}
}
}
fn process_files_git(_root: &Path, settings: &Settings) -> Result<Vec<MatchEntry>> {
let repo = Repository::open(&settings.repo)?;
let reference = if let Some(ref branch) = settings.branch {
repo.resolve_reference_from_short_name(&branch)?
} else {
repo.head()?
};
let mut process_tree = ProcessTree {
settings,
repo: &repo,
checked_paths: HashSet::new(),
checked_blobs: HashSet::new(),
checked_trees: HashSet::new(),
walked: 0,
skipped_blobs: 0,
all_matches: vec![],
};
let mut checked_commits = HashMap::new();
let mut iter = 0;
let mut next_refs = if settings.all {
repo.references()?
.map(|refs| refs.and_then(|refb| refb.peel_to_commit()))
.collect::<std::result::Result<Vec<_>, _>>()?
} else {
vec![reference.peel_to_commit()?]
};
loop {
for commit in &next_refs {
if checked_commits.contains_key(&commit.id()) {
continue;
}
let entry = checked_commits.entry(commit.id()).or_insert(false);
let tree = if let Ok(tree) = commit.tree() {
tree
} else {
continue;
};
process_tree.process(&tree, commit, &PathBuf::from(""), entry);
}
next_refs = next_refs
.iter()
.map(|reference| reference.parent_ids())
.flatten()
.filter(|reference| !checked_commits.contains_key(reference))
.map(|id| repo.find_commit(id))
.collect::<std::result::Result<Vec<_>, git2::Error>>()?;
if settings.verbose {
eprintln!(
"[{}] {} Matches in {} files {} skipped blobs... Next round has {} refs...",
iter,
process_tree.all_matches.len(),
process_tree.walked,
process_tree.skipped_blobs,
next_refs.len()
);
}
iter += 1;
if next_refs.is_empty() || settings.depth.map(|depth| depth <= iter).unwrap_or(false) {
break;
}
}
Ok(process_tree.all_matches)
}
fn process_file(
settings: &Settings,
commit: &Commit,
input: &[u8],
filepath: &Path,
visited: &mut bool,
) -> Vec<MatchEntry> {
let mut ret = vec![];
// Non-utf8 files are not supported.
let input_str = if let Ok(utf8) = std::str::from_utf8(&input) {
| Opt | identifier_name |
|
main.rs | if given"
)]
all: bool,
#[structopt(short, long, help = "Depth to search into git commit history")]
depth: Option<usize>,
#[structopt(
short = "o",
long,
help = "Turn off showing matches to a file only once; the default behavior is that if the same file with the same name has different versions that matches, they will not be printed."
)]
no_once_file: bool,
#[structopt(
short = "c",
long,
help = "Disable color coding for the output, default is to use colors in terminal"
)]
no_color_code: bool,
#[structopt(
short = "g",
long,
help = "Disable output grouping. Better for machine inputs"
)]
no_output_grouping: bool,
#[structopt(short, long, help = "Verbose flag")]
verbose: bool,
#[structopt(short, long, help = "Add an entry to list of extensions to search")]
extensions: Vec<String>,
#[structopt(
short,
long,
help = "Add an entry to list of directory names to ignore"
)]
ignore_dirs: Vec<String>,
}
fn main() -> Result<()> {
let settings: Settings = Opt::from_args().try_into()?;
eprintln!(
"Searching path: {:?} extensions: {:?} ignore_dirs: {:?}",
settings.repo, settings.extensions, settings.ignore_dirs
);
let _file_list = process_files_git(&settings.repo, &settings)?;
Ok(())
}
#[allow(dead_code)]
struct MatchEntry {
commit: Oid,
path: PathBuf,
start: usize,
end: usize,
}
#[derive(Debug)]
struct Settings {
pattern: Regex,
repo: PathBuf,
branch: Option<String>,
all: bool,
depth: Option<usize>,
once_file: bool,
color_code: bool,
output_grouping: bool,
verbose: bool,
extensions: HashSet<OsString>,
ignore_dirs: HashSet<OsString>,
}
// It's a bit awkward to convert from Opt to Settings, but some settings are hard to write
// conversion code inside structopt annotations.
impl TryFrom<Opt> for Settings {
type Error = anyhow::Error;
fn try_from(src: Opt) -> std::result::Result<Self, Self::Error> {
let default_exts = [
".sh", ".js", ".tcl", ".pl", ".py", ".rb", ".c", ".cpp", ".h", ".rc", ".rci", ".dlg",
".pas", ".dpr", ".cs", ".rs",
];
let default_ignore_dirs = [".hg", ".svn", ".git", ".bzr", "node_modules", "target"]; // Probably we could ignore all directories beginning with a dot.
Ok(Self {
pattern: Regex::new(&src.pattern)
.map_err(|e| anyhow!("Error in regex compilation: {:?}", e))?,
repo: canonicalize(
src.repo.unwrap_or_else(|| {
PathBuf::from(env::current_dir().unwrap().to_str().unwrap())
}),
)
.expect("Canonicalized path"),
branch: src.branch,
all: src.all,
depth: src.depth,
once_file: !src.no_once_file,
color_code: !src.no_color_code,
output_grouping: !src.no_output_grouping,
verbose: src.verbose,
extensions: if src.extensions.is_empty() {
default_exts.iter().map(|ext| ext[1..].into()).collect()
} else {
default_exts
.iter()
.map(|ext| ext[1..].into())
.chain(src.extensions.iter().map(|ext| ext[1..].into()))
.collect()
},
ignore_dirs: if src.ignore_dirs.is_empty() {
default_ignore_dirs.iter().map(|ext| ext.into()).collect()
} else {
default_ignore_dirs
.iter()
.map(|ext| ext.into())
.chain(src.ignore_dirs.iter().map(|ext| ext.into()))
.collect()
},
})
}
}
struct ProcessTree<'a> {
settings: &'a Settings,
repo: &'a Repository,
checked_paths: HashSet<PathBuf>,
checked_blobs: HashSet<Oid>,
checked_trees: HashSet<Oid>,
walked: usize,
skipped_blobs: usize,
all_matches: Vec<MatchEntry>,
}
impl<'a> ProcessTree<'a> {
fn process(&mut self, tree: &Tree, commit: &Commit, path: &Path, visited: &mut bool) {
if self.checked_trees.contains(&tree.id()) {
return;
}
self.checked_trees.insert(tree.id());
self.walked += 1;
for entry in tree {
match (|| {
let name = entry.name()?;
let entry_path = path.join(name);
// We want to match with absolute path from root, but it seems impossible with `tree.walk`.
if self.settings.once_file && self.checked_paths.contains(&entry_path) {
return None;
}
self.checked_paths.insert(entry_path.clone());
let obj = match entry.to_object(&self.repo) {
Ok(obj) => obj,
Err(e) => {
eprintln!("couldn't get_object: {:?}", e);
return None;
}
};
if obj.kind() == Some(ObjectType::Tree) {
self.process(obj.as_tree()?, commit, &entry_path, visited);
return None;
}
if entry.kind() != Some(ObjectType::Blob)
|| self.settings.ignore_dirs.contains(&OsString::from(name))
{
return None;
} | let blob = obj.peel_to_blob().ok()?;
if blob.is_binary() {
return None;
}
let ext = PathBuf::from(name).extension()?.to_owned();
if !self.settings.extensions.contains(&ext.to_ascii_lowercase()) {
return None;
}
if self.checked_blobs.contains(&blob.id()) {
self.skipped_blobs += 1;
return None;
}
self.checked_blobs.insert(blob.id());
let ret = process_file(self.settings, commit, blob.content(), &entry_path, visited);
Some(ret)
})() {
Some(matches) => {
self.all_matches.extend(matches);
}
_ => (),
}
}
}
}
fn process_files_git(_root: &Path, settings: &Settings) -> Result<Vec<MatchEntry>> {
let repo = Repository::open(&settings.repo)?;
let reference = if let Some(ref branch) = settings.branch {
repo.resolve_reference_from_short_name(&branch)?
} else {
repo.head()?
};
let mut process_tree = ProcessTree {
settings,
repo: &repo,
checked_paths: HashSet::new(),
checked_blobs: HashSet::new(),
checked_trees: HashSet::new(),
walked: 0,
skipped_blobs: 0,
all_matches: vec![],
};
let mut checked_commits = HashMap::new();
let mut iter = 0;
let mut next_refs = if settings.all {
repo.references()?
.map(|refs| refs.and_then(|refb| refb.peel_to_commit()))
.collect::<std::result::Result<Vec<_>, _>>()?
} else {
vec![reference.peel_to_commit()?]
};
loop {
for commit in &next_refs {
if checked_commits.contains_key(&commit.id()) {
continue;
}
let entry = checked_commits.entry(commit.id()).or_insert(false);
let tree = if let Ok(tree) = commit.tree() {
tree
} else {
continue;
};
process_tree.process(&tree, commit, &PathBuf::from(""), entry);
}
next_refs = next_refs
.iter()
.map(|reference| reference.parent_ids())
.flatten()
.filter(|reference| !checked_commits.contains_key(reference))
.map(|id| repo.find_commit(id))
.collect::<std::result::Result<Vec<_>, git2::Error>>()?;
if settings.verbose {
eprintln!(
"[{}] {} Matches in {} files {} skipped blobs... Next round has {} refs...",
iter,
process_tree.all_matches.len(),
process_tree.walked,
process_tree.skipped_blobs,
next_refs.len()
);
}
iter += 1;
if next_refs.is_empty() || settings.depth.map(|depth| depth <= iter).unwrap_or(false) {
break;
}
}
Ok(process_tree.all_matches)
}
fn process_file(
settings: &Settings,
commit: &Commit,
input: &[u8],
filepath: &Path,
visited: &mut bool,
) -> Vec<MatchEntry> {
let mut ret = vec![];
// Non-utf8 files are not supported.
let input_str = if let Ok(utf8) = std::str::from_utf8(&input) {
utf8
} else {
return vec![];
};
for found in settings.pattern.find_iter(&input_str) {
ret.push(MatchEntry {
commit: commit.id(),
path: filepath.to_path_buf(),
start: found.start(),
end: found.end(),
});
// Very naive way to count line numbers. Assumes newlines would not be part of multibyte
// character, which is true for utf8 that is the only supported | random_line_split |
|
main.rs | "Disable color coding for the output, default is to use colors in terminal"
)]
no_color_code: bool,
#[structopt(
short = "g",
long,
help = "Disable output grouping. Better for machine inputs"
)]
no_output_grouping: bool,
#[structopt(short, long, help = "Verbose flag")]
verbose: bool,
#[structopt(short, long, help = "Add an entry to list of extensions to search")]
extensions: Vec<String>,
#[structopt(
short,
long,
help = "Add an entry to list of directory names to ignore"
)]
ignore_dirs: Vec<String>,
}
fn main() -> Result<()> {
let settings: Settings = Opt::from_args().try_into()?;
eprintln!(
"Searching path: {:?} extensions: {:?} ignore_dirs: {:?}",
settings.repo, settings.extensions, settings.ignore_dirs
);
let _file_list = process_files_git(&settings.repo, &settings)?;
Ok(())
}
#[allow(dead_code)]
struct MatchEntry {
commit: Oid,
path: PathBuf,
start: usize,
end: usize,
}
#[derive(Debug)]
struct Settings {
pattern: Regex,
repo: PathBuf,
branch: Option<String>,
all: bool,
depth: Option<usize>,
once_file: bool,
color_code: bool,
output_grouping: bool,
verbose: bool,
extensions: HashSet<OsString>,
ignore_dirs: HashSet<OsString>,
}
// It's a bit awkward to convert from Opt to Settings, but some settings are hard to write
// conversion code inside structopt annotations.
impl TryFrom<Opt> for Settings {
type Error = anyhow::Error;
fn try_from(src: Opt) -> std::result::Result<Self, Self::Error> {
let default_exts = [
".sh", ".js", ".tcl", ".pl", ".py", ".rb", ".c", ".cpp", ".h", ".rc", ".rci", ".dlg",
".pas", ".dpr", ".cs", ".rs",
];
let default_ignore_dirs = [".hg", ".svn", ".git", ".bzr", "node_modules", "target"]; // Probably we could ignore all directories beginning with a dot.
Ok(Self {
pattern: Regex::new(&src.pattern)
.map_err(|e| anyhow!("Error in regex compilation: {:?}", e))?,
repo: canonicalize(
src.repo.unwrap_or_else(|| {
PathBuf::from(env::current_dir().unwrap().to_str().unwrap())
}),
)
.expect("Canonicalized path"),
branch: src.branch,
all: src.all,
depth: src.depth,
once_file: !src.no_once_file,
color_code: !src.no_color_code,
output_grouping: !src.no_output_grouping,
verbose: src.verbose,
extensions: if src.extensions.is_empty() {
default_exts.iter().map(|ext| ext[1..].into()).collect()
} else {
default_exts
.iter()
.map(|ext| ext[1..].into())
.chain(src.extensions.iter().map(|ext| ext[1..].into()))
.collect()
},
ignore_dirs: if src.ignore_dirs.is_empty() {
default_ignore_dirs.iter().map(|ext| ext.into()).collect()
} else {
default_ignore_dirs
.iter()
.map(|ext| ext.into())
.chain(src.ignore_dirs.iter().map(|ext| ext.into()))
.collect()
},
})
}
}
struct ProcessTree<'a> {
settings: &'a Settings,
repo: &'a Repository,
checked_paths: HashSet<PathBuf>,
checked_blobs: HashSet<Oid>,
checked_trees: HashSet<Oid>,
walked: usize,
skipped_blobs: usize,
all_matches: Vec<MatchEntry>,
}
impl<'a> ProcessTree<'a> {
fn process(&mut self, tree: &Tree, commit: &Commit, path: &Path, visited: &mut bool) {
if self.checked_trees.contains(&tree.id()) {
return;
}
self.checked_trees.insert(tree.id());
self.walked += 1;
for entry in tree {
match (|| {
let name = entry.name()?;
let entry_path = path.join(name);
// We want to match with absolute path from root, but it seems impossible with `tree.walk`.
if self.settings.once_file && self.checked_paths.contains(&entry_path) {
return None;
}
self.checked_paths.insert(entry_path.clone());
let obj = match entry.to_object(&self.repo) {
Ok(obj) => obj,
Err(e) => {
eprintln!("couldn't get_object: {:?}", e);
return None;
}
};
if obj.kind() == Some(ObjectType::Tree) {
self.process(obj.as_tree()?, commit, &entry_path, visited);
return None;
}
if entry.kind() != Some(ObjectType::Blob)
|| self.settings.ignore_dirs.contains(&OsString::from(name))
{
return None;
}
let blob = obj.peel_to_blob().ok()?;
if blob.is_binary() {
return None;
}
let ext = PathBuf::from(name).extension()?.to_owned();
if !self.settings.extensions.contains(&ext.to_ascii_lowercase()) {
return None;
}
if self.checked_blobs.contains(&blob.id()) {
self.skipped_blobs += 1;
return None;
}
self.checked_blobs.insert(blob.id());
let ret = process_file(self.settings, commit, blob.content(), &entry_path, visited);
Some(ret)
})() {
Some(matches) => {
self.all_matches.extend(matches);
}
_ => (),
}
}
}
}
fn process_files_git(_root: &Path, settings: &Settings) -> Result<Vec<MatchEntry>> {
let repo = Repository::open(&settings.repo)?;
let reference = if let Some(ref branch) = settings.branch {
repo.resolve_reference_from_short_name(&branch)?
} else {
repo.head()?
};
let mut process_tree = ProcessTree {
settings,
repo: &repo,
checked_paths: HashSet::new(),
checked_blobs: HashSet::new(),
checked_trees: HashSet::new(),
walked: 0,
skipped_blobs: 0,
all_matches: vec![],
};
let mut checked_commits = HashMap::new();
let mut iter = 0;
let mut next_refs = if settings.all {
repo.references()?
.map(|refs| refs.and_then(|refb| refb.peel_to_commit()))
.collect::<std::result::Result<Vec<_>, _>>()?
} else {
vec![reference.peel_to_commit()?]
};
loop {
for commit in &next_refs {
if checked_commits.contains_key(&commit.id()) {
continue;
}
let entry = checked_commits.entry(commit.id()).or_insert(false);
let tree = if let Ok(tree) = commit.tree() {
tree
} else {
continue;
};
process_tree.process(&tree, commit, &PathBuf::from(""), entry);
}
next_refs = next_refs
.iter()
.map(|reference| reference.parent_ids())
.flatten()
.filter(|reference| !checked_commits.contains_key(reference))
.map(|id| repo.find_commit(id))
.collect::<std::result::Result<Vec<_>, git2::Error>>()?;
if settings.verbose {
eprintln!(
"[{}] {} Matches in {} files {} skipped blobs... Next round has {} refs...",
iter,
process_tree.all_matches.len(),
process_tree.walked,
process_tree.skipped_blobs,
next_refs.len()
);
}
iter += 1;
if next_refs.is_empty() || settings.depth.map(|depth| depth <= iter).unwrap_or(false) {
break;
}
}
Ok(process_tree.all_matches)
}
fn process_file(
settings: &Settings,
commit: &Commit,
input: &[u8],
filepath: &Path,
visited: &mut bool,
) -> Vec<MatchEntry> {
let mut ret = vec![];
// Non-utf8 files are not supported.
let input_str = if let Ok(utf8) = std::str::from_utf8(&input) {
utf8
} else {
return vec![];
};
for found in settings.pattern.find_iter(&input_str) {
ret.push(MatchEntry {
commit: commit.id(),
path: filepath.to_path_buf(),
start: found.start(),
end: found.end(),
});
// Very naive way to count line numbers. Assumes newlines would not be part of multibyte
// character, which is true for utf8 that is the only supported encoding in Rust anyway.
let mut line_number = 1;
let mut line_start = 0;
let mut line_end = 0;
for (i, c) in input.iter().enumerate() {
if *c == b'\n' {
line_number += 1;
if i < found.start() {
line_start = (i + 1).min(input.len());
}
if found.end() <= i | {
line_end = (i as usize).max(line_start);
break;
} | conditional_block |
|
lib.rs | directly, but being able
//! to combine modes like this could be useful for workshops in which
//! participants work through test cases enabling one at a time. Trybuild was
//! originally developed for my [procedural macros workshop at Rust
//! Latam][workshop].
//!
//! [workshop]: https://github.com/dtolnay/proc-macro-workshop
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.pass("tests/01-parse-header.rs");
//! t.pass("tests/02-parse-body.rs");
//! t.compile_fail("tests/03-expand-four-errors.rs");
//! t.pass("tests/04-paste-ident.rs");
//! t.pass("tests/05-repeat-section.rs");
//! //t.pass("tests/06-make-work-in-function.rs");
//! //t.pass("tests/07-init-array.rs");
//! //t.compile_fail("tests/08-ident-span.rs");
//! }
//! ```
//!
//! Pass tests are considered to succeed if they compile successfully and have a
//! `main` function that does not panic when the compiled binary is executed.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186580-7f376f80-6e96-11e9-9cae-8257609269ef.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Details
//!
//! That's the entire API.
//!
//! <br>
//!
//! # Workflow
//!
//! There are two ways to update the _*.stderr_ files as you iterate on your
//! test cases or your library; handwriting them is not recommended.
//!
//! First, if a test case is being run as compile_fail but a corresponding
//! _*.stderr_ file does not exist, the test runner will save the actual
//! compiler output with the right filename into a directory called *wip* within
//! the directory containing Cargo.toml. So you can update these files by
//! deleting them, running `cargo test`, and moving all the files from *wip*
//! into your testcase directory.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186579-7cd51580-6e96-11e9-9f19-54dcecc9fbba.png" width="700">
//! </p>
//!
//! Alternatively, run `cargo test` with the environment variable
//! `TRYBUILD=overwrite` to skip the *wip* directory and write all compiler
//! output directly in place. You'll want to check `git diff` afterward to be
//! sure the compiler's output is what you had in mind.
//!
//! <br>
//!
//! # What to test
//!
//! When it comes to compile-fail tests, write tests for anything for which you
//! care to find out when there are changes in the user-facing compiler output.
//! As a negative example, please don't write compile-fail tests simply calling
//! all of your public APIs with arguments of the wrong type; there would be no
//! benefit.
//!
//! A common use would be for testing specific targeted error messages emitted
//! by a procedural macro. For example the derive macro from the [`ref-cast`]
//! crate is required to be placed on a type that has either `#[repr(C)]` or
//! `#[repr(transparent)]` in order for the expansion to be free of undefined
//! behavior, which it enforces at compile time:
//!
//! [`ref-cast`]: https://github.com/dtolnay/ref-cast
//!
//! ```console
//! error: RefCast trait requires #[repr(C)] or #[repr(transparent)]
//! --> $DIR/missing-repr.rs:3:10
//! |
//! 3 | #[derive(RefCast)]
//! | ^^^^^^^
//! ```
//!
//! Macros that consume helper attributes will want to check that unrecognized
//! content within those attributes is properly indicated to the caller. Is the
//! error message correctly placed under the erroneous tokens, not on a useless
//! call\_site span?
//!
//! ```console
//! error: unknown serde field attribute `qqq`
//! --> $DIR/unknown-attribute.rs:5:13
//! |
//! 5 | #[serde(qqq = "...")]
//! | ^^^
//! ```
//!
//! Declarative macros can benefit from compile-fail tests too. The [`json!`]
//! macro from serde\_json is just a great big macro\_rules macro but makes an
//! effort to have error messages from broken JSON in the input always appear on
//! the most appropriate token:
//!
//! [`json!`]: https://docs.rs/serde_json/1.0/serde_json/macro.json.html
//!
//! ```console
//! error: no rules expected the token `,`
//! --> $DIR/double-comma.rs:4:38
//! |
//! 4 | println!("{}", json!({ "k": null,, }));
//! | ^ no rules expected this token in macro call
//! ```
//!
//! Sometimes we may have a macro that expands successfully but we count on it
//! to trigger particular compiler errors at some point beyond macro expansion.
//! For example the [`readonly`] crate introduces struct fields that are public
//! but readable only, even if the caller has a &mut reference to the
//! surrounding struct. If someone writes to a readonly field, we need to be
//! sure that it wouldn't compile:
//!
//! [`readonly`]: https://github.com/dtolnay/readonly
//!
//! ```console
//! error[E0594]: cannot assign to data in a `&` reference
//! --> $DIR/write-a-readonly.rs:17:26
//! |
//! 17 | println!("{}", s.n); s.n += 1;
//! | ^^^^^^^^ cannot assign
//! ```
//!
//! In all of these cases, the compiler's output can change because our crate or
//! one of our dependencies broke something, or as a consequence of changes in
//! the Rust compiler. Both are good reasons to have well conceived compile-fail
//! tests. If we refactor and mistakenly cause an error that used to be correct
//! to now no longer be emitted or be emitted in the wrong place, that is
//! important for a test suite to catch. If the compiler changes something that
//! makes error messages that we care about substantially worse, it is also
//! important to catch and report as a compiler issue.
#![doc(html_root_url = "https://docs.rs/trybuild/1.0.83")]
#![allow(
clippy::collapsible_if,
clippy::default_trait_access,
clippy::derive_partial_eq_without_eq,
clippy::doc_markdown,
clippy::enum_glob_use,
clippy::iter_not_returning_iterator, // https://github.com/rust-lang/rust-clippy/issues/8285
clippy::let_underscore_untyped, // https://github.com/rust-lang/rust-clippy/issues/10410
clippy::manual_assert,
clippy::manual_range_contains,
clippy::module_inception,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::range_plus_one,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::trivially_copy_pass_by_ref,
clippy::unused_self,
clippy::while_let_on_iterator,
)]
#![deny(clippy::clone_on_ref_ptr)]
#[macro_use]
mod term;
#[macro_use]
mod path;
mod cargo;
mod dependencies;
mod diff;
mod directory;
mod env;
mod error;
mod expand;
mod features;
mod flock;
mod inherit;
mod manifest;
mod message;
mod normalize;
mod run;
mod rustflags;
use std::cell::RefCell;
use std::panic::RefUnwindSafe;
use std::path::{Path, PathBuf};
use std::thread;
#[derive(Debug)]
pub struct TestCases {
runner: RefCell<Runner>,
}
#[derive(Debug)]
struct Runner {
tests: Vec<Test>,
}
#[derive(Clone, Debug)]
struct Test {
path: PathBuf,
expected: Expected,
}
#[derive(Copy, Clone, Debug)]
enum Expected {
Pass,
CompileFail,
}
impl TestCases {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
TestCases {
runner: RefCell::new(Runner { tests: Vec::new() }),
}
}
pub fn pass<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::Pass,
});
}
pub fn compile_fail<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::CompileFail,
});
}
}
impl RefUnwindSafe for TestCases {}
#[doc(hidden)]
impl Drop for TestCases {
fn | drop | identifier_name |
|
lib.rs | test cases enabling one at a time. Trybuild was
//! originally developed for my [procedural macros workshop at Rust
//! Latam][workshop].
//!
//! [workshop]: https://github.com/dtolnay/proc-macro-workshop
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.pass("tests/01-parse-header.rs");
//! t.pass("tests/02-parse-body.rs");
//! t.compile_fail("tests/03-expand-four-errors.rs");
//! t.pass("tests/04-paste-ident.rs");
//! t.pass("tests/05-repeat-section.rs");
//! //t.pass("tests/06-make-work-in-function.rs");
//! //t.pass("tests/07-init-array.rs");
//! //t.compile_fail("tests/08-ident-span.rs");
//! }
//! ```
//!
//! Pass tests are considered to succeed if they compile successfully and have a
//! `main` function that does not panic when the compiled binary is executed.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186580-7f376f80-6e96-11e9-9cae-8257609269ef.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Details
//!
//! That's the entire API.
//!
//! <br>
//!
//! # Workflow
//!
//! There are two ways to update the _*.stderr_ files as you iterate on your
//! test cases or your library; handwriting them is not recommended.
//!
//! First, if a test case is being run as compile_fail but a corresponding
//! _*.stderr_ file does not exist, the test runner will save the actual
//! compiler output with the right filename into a directory called *wip* within
//! the directory containing Cargo.toml. So you can update these files by
//! deleting them, running `cargo test`, and moving all the files from *wip*
//! into your testcase directory.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186579-7cd51580-6e96-11e9-9f19-54dcecc9fbba.png" width="700">
//! </p>
//!
//! Alternatively, run `cargo test` with the environment variable
//! `TRYBUILD=overwrite` to skip the *wip* directory and write all compiler
//! output directly in place. You'll want to check `git diff` afterward to be
//! sure the compiler's output is what you had in mind.
//!
//! <br>
//!
//! # What to test
//!
//! When it comes to compile-fail tests, write tests for anything for which you
//! care to find out when there are changes in the user-facing compiler output.
//! As a negative example, please don't write compile-fail tests simply calling
//! all of your public APIs with arguments of the wrong type; there would be no
//! benefit.
//!
//! A common use would be for testing specific targeted error messages emitted
//! by a procedural macro. For example the derive macro from the [`ref-cast`]
//! crate is required to be placed on a type that has either `#[repr(C)]` or
//! `#[repr(transparent)]` in order for the expansion to be free of undefined
//! behavior, which it enforces at compile time:
//!
//! [`ref-cast`]: https://github.com/dtolnay/ref-cast
//!
//! ```console
//! error: RefCast trait requires #[repr(C)] or #[repr(transparent)]
//! --> $DIR/missing-repr.rs:3:10
//! |
//! 3 | #[derive(RefCast)]
//! | ^^^^^^^
//! ```
//!
//! Macros that consume helper attributes will want to check that unrecognized
//! content within those attributes is properly indicated to the caller. Is the
//! error message correctly placed under the erroneous tokens, not on a useless
//! call\_site span?
//!
//! ```console
//! error: unknown serde field attribute `qqq`
//! --> $DIR/unknown-attribute.rs:5:13
//! |
//! 5 | #[serde(qqq = "...")]
//! | ^^^
//! ```
//!
//! Declarative macros can benefit from compile-fail tests too. The [`json!`]
//! macro from serde\_json is just a great big macro\_rules macro but makes an
//! effort to have error messages from broken JSON in the input always appear on
//! the most appropriate token:
//!
//! [`json!`]: https://docs.rs/serde_json/1.0/serde_json/macro.json.html
//!
//! ```console
//! error: no rules expected the token `,`
//! --> $DIR/double-comma.rs:4:38
//! |
//! 4 | println!("{}", json!({ "k": null,, }));
//! | ^ no rules expected this token in macro call
//! ```
//!
//! Sometimes we may have a macro that expands successfully but we count on it
//! to trigger particular compiler errors at some point beyond macro expansion.
//! For example the [`readonly`] crate introduces struct fields that are public
//! but readable only, even if the caller has a &mut reference to the
//! surrounding struct. If someone writes to a readonly field, we need to be
//! sure that it wouldn't compile:
//!
//! [`readonly`]: https://github.com/dtolnay/readonly
//!
//! ```console
//! error[E0594]: cannot assign to data in a `&` reference
//! --> $DIR/write-a-readonly.rs:17:26
//! |
//! 17 | println!("{}", s.n); s.n += 1;
//! | ^^^^^^^^ cannot assign
//! ```
//!
//! In all of these cases, the compiler's output can change because our crate or
//! one of our dependencies broke something, or as a consequence of changes in
//! the Rust compiler. Both are good reasons to have well conceived compile-fail
//! tests. If we refactor and mistakenly cause an error that used to be correct
//! to now no longer be emitted or be emitted in the wrong place, that is
//! important for a test suite to catch. If the compiler changes something that
//! makes error messages that we care about substantially worse, it is also
//! important to catch and report as a compiler issue.
#![doc(html_root_url = "https://docs.rs/trybuild/1.0.83")]
#![allow(
clippy::collapsible_if,
clippy::default_trait_access,
clippy::derive_partial_eq_without_eq,
clippy::doc_markdown,
clippy::enum_glob_use,
clippy::iter_not_returning_iterator, // https://github.com/rust-lang/rust-clippy/issues/8285
clippy::let_underscore_untyped, // https://github.com/rust-lang/rust-clippy/issues/10410
clippy::manual_assert,
clippy::manual_range_contains,
clippy::module_inception,
clippy::module_name_repetitions,
clippy::must_use_candidate,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::range_plus_one,
clippy::similar_names,
clippy::single_match_else,
clippy::too_many_lines,
clippy::trivially_copy_pass_by_ref,
clippy::unused_self,
clippy::while_let_on_iterator,
)]
#![deny(clippy::clone_on_ref_ptr)]
#[macro_use]
mod term;
#[macro_use]
mod path;
mod cargo;
mod dependencies;
mod diff;
mod directory;
mod env;
mod error;
mod expand;
mod features;
mod flock;
mod inherit;
mod manifest;
mod message;
mod normalize;
mod run;
mod rustflags;
use std::cell::RefCell;
use std::panic::RefUnwindSafe;
use std::path::{Path, PathBuf};
use std::thread;
#[derive(Debug)]
pub struct TestCases {
runner: RefCell<Runner>,
}
#[derive(Debug)]
struct Runner {
tests: Vec<Test>,
}
#[derive(Clone, Debug)]
struct Test {
path: PathBuf,
expected: Expected,
}
#[derive(Copy, Clone, Debug)]
enum Expected {
Pass,
CompileFail,
}
impl TestCases {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
TestCases {
runner: RefCell::new(Runner { tests: Vec::new() }),
}
}
pub fn pass<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::Pass,
});
}
pub fn compile_fail<P: AsRef<Path>>(&self, path: P) {
self.runner.borrow_mut().tests.push(Test {
path: path.as_ref().to_owned(),
expected: Expected::CompileFail,
});
}
}
impl RefUnwindSafe for TestCases {}
#[doc(hidden)]
impl Drop for TestCases {
fn drop(&mut self) {
if !thread::panicking() | {
self.runner.borrow_mut().run();
} | conditional_block |
|
lib.rs | by the macro or errors detected by the Rust compiler in the
//! resulting expanded code, and compare against the expected errors to ensure
//! that they remain user-friendly.
//!
//! This style of testing is sometimes called *ui tests* because they test
//! aspects of the user's interaction with a library outside of what would be
//! covered by ordinary API tests.
//!
//! Nothing here is specific to macros; trybuild would work equally well for
//! testing misuse of non-macro APIs.
//!
//! <br>
//!
//! # Compile-fail tests
//!
//! A minimal trybuild setup looks like this:
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.compile_fail("tests/ui/*.rs");
//! }
//! ```
//!
//! The test can be run with `cargo test`. It will individually compile each of
//! the source files matching the glob pattern, expect them to fail to compile,
//! and assert that the compiler's error message matches an adjacently named
//! _*.stderr_ file containing the expected output (same file name as the test
//! except with a different extension). If it matches, the test case is
//! considered to succeed.
//!
//! Dependencies listed under `[dev-dependencies]` in the project's Cargo.toml
//! are accessible from within the test cases.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186574-76469e00-6e96-11e9-8cb5-b63b657170c9.png" width="700">
//! </p>
//!
//! Failing tests display the expected vs actual compiler output inline.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186575-79418e80-6e96-11e9-9478-c9b3dc10327f.png" width="700">
//! </p>
//!
//! A compile_fail test that fails to fail to compile is also a failure.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186576-7b0b5200-6e96-11e9-8bfd-2de705125108.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Pass tests
//!
//! The same test harness is able to run tests that are expected to pass, too.
//! Ordinarily you would just have Cargo run such tests directly, but being able
//! to combine modes like this could be useful for workshops in which
//! participants work through test cases enabling one at a time. Trybuild was
//! originally developed for my [procedural macros workshop at Rust
//! Latam][workshop].
//!
//! [workshop]: https://github.com/dtolnay/proc-macro-workshop
//!
//! ```
//! #[test]
//! fn ui() {
//! let t = trybuild::TestCases::new();
//! t.pass("tests/01-parse-header.rs");
//! t.pass("tests/02-parse-body.rs");
//! t.compile_fail("tests/03-expand-four-errors.rs");
//! t.pass("tests/04-paste-ident.rs");
//! t.pass("tests/05-repeat-section.rs");
//! //t.pass("tests/06-make-work-in-function.rs");
//! //t.pass("tests/07-init-array.rs");
//! //t.compile_fail("tests/08-ident-span.rs");
//! }
//! ```
//!
//! Pass tests are considered to succeed if they compile successfully and have a
//! `main` function that does not panic when the compiled binary is executed.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186580-7f376f80-6e96-11e9-9cae-8257609269ef.png" width="700">
//! </p>
//!
//! <br>
//!
//! # Details
//!
//! That's the entire API.
//!
//! <br>
//!
//! # Workflow
//!
//! There are two ways to update the _*.stderr_ files as you iterate on your
//! test cases or your library; handwriting them is not recommended.
//!
//! First, if a test case is being run as compile_fail but a corresponding
//! _*.stderr_ file does not exist, the test runner will save the actual
//! compiler output with the right filename into a directory called *wip* within
//! the directory containing Cargo.toml. So you can update these files by
//! deleting them, running `cargo test`, and moving all the files from *wip*
//! into your testcase directory.
//!
//! <p align="center">
//! <img src="https://user-images.githubusercontent.com/1940490/57186579-7cd51580-6e96-11e9-9f19-54dcecc9fbba.png" width="700">
//! </p>
//!
//! Alternatively, run `cargo test` with the environment variable
//! `TRYBUILD=overwrite` to skip the *wip* directory and write all compiler
//! output directly in place. You'll want to check `git diff` afterward to be
//! sure the compiler's output is what you had in mind.
//!
//! <br>
//!
//! # What to test
//!
//! When it comes to compile-fail tests, write tests for anything for which you
//! care to find out when there are changes in the user-facing compiler output.
//! As a negative example, please don't write compile-fail tests simply calling
//! all of your public APIs with arguments of the wrong type; there would be no
//! benefit.
//!
//! A common use would be for testing specific targeted error messages emitted | //! [`ref-cast`]: https://github.com/dtolnay/ref-cast
//!
//! ```console
//! error: RefCast trait requires #[repr(C)] or #[repr(transparent)]
//! --> $DIR/missing-repr.rs:3:10
//! |
//! 3 | #[derive(RefCast)]
//! | ^^^^^^^
//! ```
//!
//! Macros that consume helper attributes will want to check that unrecognized
//! content within those attributes is properly indicated to the caller. Is the
//! error message correctly placed under the erroneous tokens, not on a useless
//! call\_site span?
//!
//! ```console
//! error: unknown serde field attribute `qqq`
//! --> $DIR/unknown-attribute.rs:5:13
//! |
//! 5 | #[serde(qqq = "...")]
//! | ^^^
//! ```
//!
//! Declarative macros can benefit from compile-fail tests too. The [`json!`]
//! macro from serde\_json is just a great big macro\_rules macro but makes an
//! effort to have error messages from broken JSON in the input always appear on
//! the most appropriate token:
//!
//! [`json!`]: https://docs.rs/serde_json/1.0/serde_json/macro.json.html
//!
//! ```console
//! error: no rules expected the token `,`
//! --> $DIR/double-comma.rs:4:38
//! |
//! 4 | println!("{}", json!({ "k": null,, }));
//! | ^ no rules expected this token in macro call
//! ```
//!
//! Sometimes we may have a macro that expands successfully but we count on it
//! to trigger particular compiler errors at some point beyond macro expansion.
//! For example the [`readonly`] crate introduces struct fields that are public
//! but readable only, even if the caller has a &mut reference to the
//! surrounding struct. If someone writes to a readonly field, we need to be
//! sure that it wouldn't compile:
//!
//! [`readonly`]: https://github.com/dtolnay/readonly
//!
//! ```console
//! error[E0594]: cannot assign to data in a `&` reference
//! --> $DIR/write-a-readonly.rs:17:26
//! |
//! 17 | println!("{}", s.n); s.n += 1;
//! | ^^^^^^^^ cannot assign
//! ```
//!
//! In all of these cases, the compiler's output can change because our crate or
//! one of our dependencies broke something, or as a consequence of changes in
//! the Rust compiler. Both are good reasons to have well conceived compile-fail
//! tests. If we refactor and mistakenly cause an error that used to be correct
//! to now no longer be emitted or be emitted in the wrong place, that is
//! important for a test suite to catch. If the compiler changes something that
//! makes error messages that we care about substantially worse, it is also
//! important | //! by a procedural macro. For example the derive macro from the [`ref-cast`]
//! crate is required to be placed on a type that has either `#[repr(C)]` or
//! `#[repr(transparent)]` in order for the expansion to be free of undefined
//! behavior, which it enforces at compile time:
//! | random_line_split |
twitter.py | .strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
else:
print("######## Unmatched roytang url: %s" % (url))
print(d1["full_text"])
return True
return False
def process_tweet(d1):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'].startswith('photos\\'):
# no need to process further any tweets that are already mapped to a post
return True
tweet_source = d1["source"]
# print("#### %s: %s" % (tweet_source, orig_tweet_url))
# detect content syndicated from elsewhere
# instagram, tumblr, roytang.net
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get("urls", []):
raw_url = u["url"]
url = u["expanded_url"]
if process_syn_url(d1, raw_url, url):
return True
# print("######## URL = %s" % (url))
# also process raw urls
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1["full_text"])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1["id_str"] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1["source"]
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
replies = replies + 1
is_reply = True
# handle retweet
is_retweet = False
content = d1["full_text"]
if content.startswith("RT @"):
retweets = retweets + 1
is_retweet = True
media = []
if "extended_entities" in d1:
for m in d1["extended_entities"]["media"]:
media.append(m["media_url_https"])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
# if idx > 100:
# break
# save the url cache for future use
resolver.save_cache()
for source in countbysource:
print("countbysource: %s = %s" % (source, countbysource[source]))
print("replies: %s" % (replies))
print("retweets: %s" % (retweets))
print("withmedia: %s" % (withmedia))
print("raw: %s" % (raw))
print("total: %s" % (idx))
def thread_replies():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
# process in reverse order so tweet sequences are in order
d = reversed(d)
for d1 in d:
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
is_reply = True
if not is_reply:
continue
id_str = d1['id_str']
# if id_str != "602009895437737984" and id_str != "602009747294924802":
# continue
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, id_str)
# dont bother if already syndicated
if orig_tweet_url in urlmap:
continue
date = datetime.strptime(d1['created_at'], "%a %b %d %H:%M:%S %z %Y")
# process replies to myself
if d1["in_reply_to_screen_name"] == TWITTER_USERNAME:
replied_to_url = "https://twitter.com/%s/statuses/%s/" % (d1['in_reply_to_screen_name'], d1['in_reply_to_status_id_str'])
info = urlmap[replied_to_url]
source_path = Path(info['source_path'])
full_path = contentdir / source_path
# welp, we might as well move them to bundles
if full_path.name == "index.md":
parentdir = full_path.parent
else:
parentdir = full_path.parent / full_path.stem
if not parentdir.exists():
parentdir.mkdir(parents=True)
oldfile = full_path
full_path = parentdir / "index.md"
shutil.move(str(oldfile), str(full_path))
# also update the urlmap!
urlmap[replied_to_url]['source_path'] = str(full_path.relative_to(contentdir))
# append the reply to the original post, and add it as a syndication as well
with full_path.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
post['syndicated'].append({
'type': 'twitter',
'url': orig_tweet_url
})
content = get_content(d1)
post.content = post.content + "\n\r" + content
newfile = frontmatter.dumps(post)
with full_path.open("w", encoding="UTF-8") as w:
w.write(newfile)
# copy over any media from the reply as well
media = []
for m in d1.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
for imgfile in mediadir.glob(d1["id_str"] + "*.*"):
to_file = parentdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
# delete any existing file created for this reply
oldfile = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str + ".md")
if oldfile.exists():
os.remove(str(oldfile))
oldfolder = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str)
if oldfolder.exists():
shutil.rmtree(str(oldfolder))
# replace this entry in the urlmap! this is so that succeeding replies can find the correct root tweet to attach to
urlmap[orig_tweet_url] = info
else:
continue
idx = idx + 1
print(idx)
from utils import urlmap_to_mdfile
def cleanup_videos():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1["id_str"])
info = urlmap.get(orig_tweet_url)
if info is None:
continue
for m in d1.get("extended_entities", {}).get("media", []):
if "video_info" in m:
videos = []
lowest_bitrate = 1000000000000
lowest_video = ""
for vi in m["video_info"]["variants"]:
if 'bitrate' in vi:
videos.append(vi["url"])
bitrate = int(vi['bitrate'])
if bitrate < lowest_bitrate:
lowest_video = vi["url"]
lowest_bitrate = bitrate
mdfile = urlmap_to_mdfile(info)
if str(mdfile).find("\\photos\\") >= 0:
| print(mdfile)
# move it to notes, since it's not a photo
p = PostBuilder.from_mdfile(mdfile)
p.kind = "notes"
p.save()
# delete the old files
container = mdfile.parent
for f in container.iterdir():
os.remove(str(f))
container.rmdir() | conditional_block |
|
twitter.py | , stype):
with mdfile.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
if post.get('syndicated') == None:
post['syndicated'] = []
else:
for s in post['syndicated']:
if s["type"] == stype and s["url"] == url:
# dont add a duplicate!
return
post['syndicated'].append({
'type': stype,
'url': url
})
newfile = frontmatter.dumps(post)
with mdfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
def get_content(t):
content = t['full_text']
if "entities" in t:
# get raw urls in the text
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)
# replace mentions with link
for m in t["entities"]["user_mentions"]:
screen_name = m["screen_name"]
# replace with markdown link
mdlink = "[@%s](https://twitter.com/%s/)" % (screen_name, screen_name)
content = content.replace("@"+screen_name, mdlink)
processed_urls = []
# clean urls
for u in t["entities"]["urls"]:
url = u["url"]
processed_urls.append(url)
expanded_url = u["expanded_url"]
processed_urls.append(expanded_url)
# print("##### A URL!!! %s" % expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
# find urls that were not in the entities
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
def create_post(t):
id = t['id_str']
d = datetime.strptime(t['created_at'], "%a %b %d %H:%M:%S %z %Y")
content = get_content(t)
post = frontmatter.Post(content)
post['date'] = d
post['syndicated'] = [
{
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, t['id'])
}
]
kind = "notes"
if "in_reply_to_status_id_str" in t and "in_reply_to_screen_name" in t:
kind = "replies"
post["reply_to"] = {
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (t['in_reply_to_screen_name'], t['in_reply_to_status_id_str']),
"name": t["in_reply_to_screen_name"],
"label": "%s's tweet" % (t["in_reply_to_screen_name"])
}
elif t["full_text"].startswith("RT @"):
rc = retweetscache.get(id)
if rc is None:
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
else:
if "retweeted_user" in rc:
kind = "reposts"
post['repost_source'] = {
"type": "twitter",
"name": rc["retweeted_user"],
"url": "https://twitter.com/%s/statuses/%s/" % (rc['retweeted_user'], rc['retweeted_id'])
}
# dont process reposts for now
# return False
else:
# 785744070027030528 fails this
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
# else:
# # dont process others for now
# return False
media = []
for m in t.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
if len(media) > 0:
if kind != "reposts" and kind != "replies":
kind = "photos"
# dont process media for now
# return False
tags = []
for tag in t.get('entites', {}).get('hashtags', []):
tags.append(tag['text'].lower())
parsed_tags = re.findall(r"\s#(\w+)", " " + content)
for tag in parsed_tags:
if tag not in tags:
tags.append(tag.lower())
for tag in auto_tags:
if tag not in tags:
tags.append(tag)
if len(tags) > 0:
post["tags"] = tags
post["source"] = "twitter"
outdir = contentdir / kind / d.strftime("%Y") / d.strftime("%m")
if len(media) > 0:
outdir = outdir / (id)
if not outdir.exists():
outdir.mkdir(parents=True)
if len(media) > 0:
outfile = outdir / ( "index.md" )
# find photos
for imgfile in mediadir.glob(id + "*.*"):
to_file = outdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
else:
outfile = outdir / ( id + ".md" )
newfile = frontmatter.dumps(post)
with outfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
return True
def process_syn_url(d1, raw_url, url):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
url, no_errors = resolver.get_final_url(url)
if not no_errors:
print(d1["full_text"])
url = url.replace("www.instagram.com", "instagram.com")
url = url.replace("/roytang0400", "")
url = urldefrag(url)[0]
if url.find("instagram.com") >= 0 and url.find("?") >= 0:
# remove utm and other misc query params from insta urls
url = url.split("?")[0]
if url in urlmap:
u = urlmap[url]
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
if url.find("://roytang.net") >= 0 or url.find("://mtgstorm.com") >= 0:
link_url = urlparse(url)
u = urlmap.get(link_url.path, None)
if u is None:
# try matching by title
title_search_term = d1["full_text"]
title_search_term = title_search_term.replace("New blog post: ", "")
title_search_term = title_search_term.replace("New post: ", "")
title_search_term = title_search_term.replace(raw_url, "")
title_search_term = title_search_term.strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
else:
print("######## Unmatched roytang url: %s" % (url)) | def process_tweet(d1):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'].startswith('photos\\'):
# no need to process further any tweets that are already mapped to a post
return True
tweet_source = d1["source"]
# print("#### %s: %s" % (tweet_source, orig_tweet_url))
# detect content syndicated from elsewhere
# instagram, tumblr, roytang.net
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get("urls", []):
raw_url = u["url"]
url = u["expanded_url"]
if process_syn_url(d1, raw_url, url):
return True
# print("######## URL = %s" % (url))
# also process raw urls
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1["full_text"])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0 | print(d1["full_text"])
return True
return False
| random_line_split |
twitter.py | , stype):
with mdfile.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
if post.get('syndicated') == None:
post['syndicated'] = []
else:
for s in post['syndicated']:
if s["type"] == stype and s["url"] == url:
# dont add a duplicate!
return
post['syndicated'].append({
'type': stype,
'url': url
})
newfile = frontmatter.dumps(post)
with mdfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
def get_content(t):
content = t['full_text']
if "entities" in t:
# get raw urls in the text
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content)
# replace mentions with link
for m in t["entities"]["user_mentions"]:
screen_name = m["screen_name"]
# replace with markdown link
mdlink = "[@%s](https://twitter.com/%s/)" % (screen_name, screen_name)
content = content.replace("@"+screen_name, mdlink)
processed_urls = []
# clean urls
for u in t["entities"]["urls"]:
url = u["url"]
processed_urls.append(url)
expanded_url = u["expanded_url"]
processed_urls.append(expanded_url)
# print("##### A URL!!! %s" % expanded_url)
expanded_url, no_errors = resolver.get_final_url(expanded_url)
processed_urls.append(expanded_url)
content = content.replace(url, expanded_url)
# find urls that were not in the entities
for raw_url in raw_urls:
if raw_url not in processed_urls:
expanded_url, no_errors = resolver.get_final_url(raw_url)
content = content.replace(raw_url, expanded_url)
return content
def create_post(t):
id = t['id_str']
d = datetime.strptime(t['created_at'], "%a %b %d %H:%M:%S %z %Y")
content = get_content(t)
post = frontmatter.Post(content)
post['date'] = d
post['syndicated'] = [
{
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, t['id'])
}
]
kind = "notes"
if "in_reply_to_status_id_str" in t and "in_reply_to_screen_name" in t:
kind = "replies"
post["reply_to"] = {
"type": "twitter",
"url": "https://twitter.com/%s/statuses/%s/" % (t['in_reply_to_screen_name'], t['in_reply_to_status_id_str']),
"name": t["in_reply_to_screen_name"],
"label": "%s's tweet" % (t["in_reply_to_screen_name"])
}
elif t["full_text"].startswith("RT @"):
rc = retweetscache.get(id)
if rc is None:
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
else:
if "retweeted_user" in rc:
kind = "reposts"
post['repost_source'] = {
"type": "twitter",
"name": rc["retweeted_user"],
"url": "https://twitter.com/%s/statuses/%s/" % (rc['retweeted_user'], rc['retweeted_id'])
}
# dont process reposts for now
# return False
else:
# 785744070027030528 fails this
# RTed status is inaccessible, we'll just render it as an ordinary note
pass
# else:
# # dont process others for now
# return False
media = []
for m in t.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
if len(media) > 0:
if kind != "reposts" and kind != "replies":
kind = "photos"
# dont process media for now
# return False
tags = []
for tag in t.get('entites', {}).get('hashtags', []):
tags.append(tag['text'].lower())
parsed_tags = re.findall(r"\s#(\w+)", " " + content)
for tag in parsed_tags:
if tag not in tags:
tags.append(tag.lower())
for tag in auto_tags:
if tag not in tags:
tags.append(tag)
if len(tags) > 0:
post["tags"] = tags
post["source"] = "twitter"
outdir = contentdir / kind / d.strftime("%Y") / d.strftime("%m")
if len(media) > 0:
outdir = outdir / (id)
if not outdir.exists():
outdir.mkdir(parents=True)
if len(media) > 0:
outfile = outdir / ( "index.md" )
# find photos
for imgfile in mediadir.glob(id + "*.*"):
to_file = outdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
else:
outfile = outdir / ( id + ".md" )
newfile = frontmatter.dumps(post)
with outfile.open("w", encoding="UTF-8") as w:
w.write(newfile)
return True
def process_syn_url(d1, raw_url, url):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
url, no_errors = resolver.get_final_url(url)
if not no_errors:
print(d1["full_text"])
url = url.replace("www.instagram.com", "instagram.com")
url = url.replace("/roytang0400", "")
url = urldefrag(url)[0]
if url.find("instagram.com") >= 0 and url.find("?") >= 0:
# remove utm and other misc query params from insta urls
url = url.split("?")[0]
if url in urlmap:
u = urlmap[url]
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
if url.find("://roytang.net") >= 0 or url.find("://mtgstorm.com") >= 0:
link_url = urlparse(url)
u = urlmap.get(link_url.path, None)
if u is None:
# try matching by title
title_search_term = d1["full_text"]
title_search_term = title_search_term.replace("New blog post: ", "")
title_search_term = title_search_term.replace("New post: ", "")
title_search_term = title_search_term.replace(raw_url, "")
title_search_term = title_search_term.strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
else:
print("######## Unmatched roytang url: %s" % (url))
print(d1["full_text"])
return True
return False
def process_tweet(d1):
|
# also process raw urls
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1["full_text"])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = | orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'].startswith('photos\\'):
# no need to process further any tweets that are already mapped to a post
return True
tweet_source = d1["source"]
# print("#### %s: %s" % (tweet_source, orig_tweet_url))
# detect content syndicated from elsewhere
# instagram, tumblr, roytang.net
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get("urls", []):
raw_url = u["url"]
url = u["expanded_url"]
if process_syn_url(d1, raw_url, url):
return True
# print("######## URL = %s" % (url)) | identifier_body |
twitter.py | s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
url, no_errors = resolver.get_final_url(url)
if not no_errors:
print(d1["full_text"])
url = url.replace("www.instagram.com", "instagram.com")
url = url.replace("/roytang0400", "")
url = urldefrag(url)[0]
if url.find("instagram.com") >= 0 and url.find("?") >= 0:
# remove utm and other misc query params from insta urls
url = url.split("?")[0]
if url in urlmap:
u = urlmap[url]
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
if url.find("://roytang.net") >= 0 or url.find("://mtgstorm.com") >= 0:
link_url = urlparse(url)
u = urlmap.get(link_url.path, None)
if u is None:
# try matching by title
title_search_term = d1["full_text"]
title_search_term = title_search_term.replace("New blog post: ", "")
title_search_term = title_search_term.replace("New post: ", "")
title_search_term = title_search_term.replace(raw_url, "")
title_search_term = title_search_term.strip()
u = urlmap.get(title_search_term, None)
if u is not None:
source_path = Path(u['source_path'])
full_path = contentdir / source_path
add_syndication(full_path, orig_tweet_url, "twitter")
return True
else:
print("######## Unmatched roytang url: %s" % (url))
print(d1["full_text"])
return True
return False
def process_tweet(d1):
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, d1['id_str'])
if orig_tweet_url in urlmap:
og = urlmap.get(orig_tweet_url)
if og['source_path'].startswith('post\\') or og['source_path'].startswith('photos\\'):
# no need to process further any tweets that are already mapped to a post
return True
tweet_source = d1["source"]
# print("#### %s: %s" % (tweet_source, orig_tweet_url))
# detect content syndicated from elsewhere
# instagram, tumblr, roytang.net
for s in syndicated_sources:
if tweet_source.find(s) >= 0:
for u in d1.get('entities', {}).get("urls", []):
raw_url = u["url"]
url = u["expanded_url"]
if process_syn_url(d1, raw_url, url):
return True
# print("######## URL = %s" % (url))
# also process raw urls
raw_urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', d1["full_text"])
for raw_url in raw_urls:
if process_syn_url(d1, raw_url, raw_url):
return True
break
return create_post(d1)
def import_all():
countbysource = {}
replies = 0
retweets = 0
withmedia = 0
raw = 0
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
for d1 in d:
if debug_id is not None and d1["id_str"] != debug_id:
continue
if process_tweet(d1):
continue
tweet_source = d1["source"]
if tweet_source not in countbysource:
countbysource[tweet_source] = 1
else:
countbysource[tweet_source] = countbysource[tweet_source] + 1
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
replies = replies + 1
is_reply = True
# handle retweet
is_retweet = False
content = d1["full_text"]
if content.startswith("RT @"):
retweets = retweets + 1
is_retweet = True
media = []
if "extended_entities" in d1:
for m in d1["extended_entities"]["media"]:
media.append(m["media_url_https"])
if len(media) > 0:
withmedia = withmedia + 1
if not is_reply and not is_retweet and len(media) == 0:
raw = raw + 1
idx = idx + 1
# if idx > 100:
# break
# save the url cache for future use
resolver.save_cache()
for source in countbysource:
print("countbysource: %s = %s" % (source, countbysource[source]))
print("replies: %s" % (replies))
print("retweets: %s" % (retweets))
print("withmedia: %s" % (withmedia))
print("raw: %s" % (raw))
print("total: %s" % (idx))
def thread_replies():
with Path(SOURCE_FILE).open(encoding='utf-8') as f:
d = json.load(f)
idx = 0
# process in reverse order so tweet sequences are in order
d = reversed(d)
for d1 in d:
is_reply = False
if "in_reply_to_status_id_str" in d1 and "in_reply_to_screen_name" in d1:
is_reply = True
if not is_reply:
continue
id_str = d1['id_str']
# if id_str != "602009895437737984" and id_str != "602009747294924802":
# continue
orig_tweet_url = "https://twitter.com/%s/statuses/%s/" % (TWITTER_USERNAME, id_str)
# dont bother if already syndicated
if orig_tweet_url in urlmap:
continue
date = datetime.strptime(d1['created_at'], "%a %b %d %H:%M:%S %z %Y")
# process replies to myself
if d1["in_reply_to_screen_name"] == TWITTER_USERNAME:
replied_to_url = "https://twitter.com/%s/statuses/%s/" % (d1['in_reply_to_screen_name'], d1['in_reply_to_status_id_str'])
info = urlmap[replied_to_url]
source_path = Path(info['source_path'])
full_path = contentdir / source_path
# welp, we might as well move them to bundles
if full_path.name == "index.md":
parentdir = full_path.parent
else:
parentdir = full_path.parent / full_path.stem
if not parentdir.exists():
parentdir.mkdir(parents=True)
oldfile = full_path
full_path = parentdir / "index.md"
shutil.move(str(oldfile), str(full_path))
# also update the urlmap!
urlmap[replied_to_url]['source_path'] = str(full_path.relative_to(contentdir))
# append the reply to the original post, and add it as a syndication as well
with full_path.open(encoding="UTF-8") as f:
try:
post = frontmatter.load(f)
except:
print("Error parsing file")
return
post['syndicated'].append({
'type': 'twitter',
'url': orig_tweet_url
})
content = get_content(d1)
post.content = post.content + "\n\r" + content
newfile = frontmatter.dumps(post)
with full_path.open("w", encoding="UTF-8") as w:
w.write(newfile)
# copy over any media from the reply as well
media = []
for m in d1.get("extended_entities", {}).get("media", []):
media.append(m["media_url_https"])
for imgfile in mediadir.glob(d1["id_str"] + "*.*"):
to_file = parentdir / imgfile.name
shutil.copy(str(imgfile), str(to_file))
# delete any existing file created for this reply
oldfile = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str + ".md")
if oldfile.exists():
os.remove(str(oldfile))
oldfolder = contentdir / "replies" / date.strftime("%Y") / date.strftime("%m") / (id_str)
if oldfolder.exists():
shutil.rmtree(str(oldfolder))
# replace this entry in the urlmap! this is so that succeeding replies can find the correct root tweet to attach to
urlmap[orig_tweet_url] = info
else:
continue
idx = idx + 1
print(idx)
from utils import urlmap_to_mdfile
def | cleanup_videos | identifier_name |
|
lstm-english-french-word.py | import re
import collections
from sklearn.model_selection import train_test_split
import numpy as np
import tensorflow as tf
import time
from nltk.translate.bleu_score import sentence_bleu
from nltk.tokenize import RegexpTokenizer, word_tokenize
from keras.utils.np_utils import to_categorical
from keras import models
# Constants
BATCH_SIZE = 16
SAMPLE = 20000
VERSION = '001'
# Read data
root_dir = os.path.join(os.getcwd(), 'machine-learning/translation')
data_path = os.path.join(root_dir, 'fra.txt')
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
input_texts = []
output_texts = []
try:
for line in lines:
input_text, output_text = line.split('\t')
input_texts.append(input_text)
output_texts.append('{'+output_text+'}') # { denotes the start of sentence, } denotes the end of sentence
except ValueError:
print(line)
input_texts = input_texts[:SAMPLE]
output_texts = output_texts[:SAMPLE]
# Preprocess
def remove_blank(word_list):
return [word for word in word_list if word.strip()]
def is_curly_bracket(string):
return string == '{' or string == '}'
def remove_blank_curly_bracket(word_list):
return [word for word in word_list if word.strip() and not is_curly_bracket(word)]
def split_input_texts(text):
return [word.lower() for word in word_tokenize(text)]
def split_output_texts(text):
return [word.lower() for word in word_tokenize(text, language='french')]
# Tokenize input and output
input_text_list = [split_input_texts(input_text) for input_text in input_texts]
output_text_list = [split_output_texts(output_text) for output_text in output_texts]
# Count unique number of English words
input_text_flat_list = []
for input_text in input_text_list:
input_text_flat_list.extend(input_text)
input_vocabulary_size = len(set(input_text_flat_list)) + 1 # 3442
print('input vocabulary size', input_vocabulary_size)
# Make word-index lookup for english words
input_counter_list = collections.Counter(input_text_flat_list).most_common()
input_word_index_dict = {value[0]: index+1 for index, value in enumerate(input_counter_list)}
input_index_word_dict = {index+1: value[0] for index, value in enumerate(input_counter_list)}
# Count unique number of French words
output_text_flat_list = []
for output_text in output_text_list:
output_text_flat_list.extend(output_text)
output_vocabulary_size = len(set(output_text_flat_list)) + 1 # 7251
print('output vocabulary size', output_vocabulary_size)
# Make word-index lookup for French words
output_counter_list = collections.Counter(output_text_flat_list).most_common(output_vocabulary_size)
output_word_index_dict = {value[0]: index+1 for index, value in enumerate(output_counter_list)}
output_index_word_dict = {index+1: value[0] for index, value in enumerate(output_counter_list)}
output_index_word_dict[0] = ''
# Max size for input and output text
input_max_size = max(len(split_input_texts(text)) for text in input_texts) # 7
output_max_size = max(len(split_output_texts(text)) for text in output_texts) # 15
print('Input max size', input_max_size)
print('Output max size', output_max_size)
# Convert input/output texts to machine learning input X/y
def input_text_to_num(input_text):
input_text_list = split_input_texts(input_text)
num_list = [input_word_index_dict[word] for word in input_text_list]
return num_list
def output_text_to_num(output_text):
output_text_list = split_output_texts(output_text)
num_list = [output_word_index_dict[word] for word in output_text_list]
return num_list
def output_num_to_text(num_list):
text = [output_index_word_dict[num] for num in num_list]
return ' '.join(text)
def pad_num_list(num_list, size):
return num_list + [0] * (size - len(num_list))
def input_text_to_x(input_text):
num_list = input_text_to_num(input_text)
return pad_num_list(num_list, input_max_size)
def output_text_to_y(output_text):
num_list = output_text_to_num(output_text)
return pad_num_list(num_list, output_max_size)
X = np.array([input_text_to_x(text) for text in input_texts])
y = np.array([output_text_to_y(text) for text in output_texts])
print("X shape: {}, y shape: {}".format(X.shape, y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
def batch_generator(x, y, batch_size=BATCH_SIZE):
while True:
shuffle = np.random.permutation(len(x))
start = 0
x = x[shuffle]
y = y[shuffle]
while start + batch_size <= len(x):
x_batch = x[start:start+batch_size]
y_batch = y[start:start+batch_size]
yield [x_batch, y_batch[:,:-1]], to_categorical(y_batch[:, 1:], output_vocabulary_size)
start += batch_size
#[x_sample, y_sample], y_sample_2 = next(batch_generator(X_train, y_train))
# Define Model
embed_size = 300
# Encoder
encoder_inputs = Input(shape=(None,))
encoder_embed_layer = Embedding(input_vocabulary_size, embed_size)
encoder_embed_outputs = encoder_embed_layer(encoder_inputs)
encoder_lstm_layer = LSTM(embed_size, return_state=True)
_, encoder_state_h, encoder_state_c = encoder_lstm_layer(encoder_embed_outputs)
encoder_states = [encoder_state_h, encoder_state_c]
# Decoder
decoder_inputs = Input(shape=(None,))
decoder_embed_layer = Embedding(output_vocabulary_size, embed_size)
decoder_embed_outputs = decoder_embed_layer(decoder_inputs)
decoder_lstm_layer = LSTM(embed_size, return_sequences=True, return_state=True)
decoder_lstm_outputs, decoder_state_h, decoder_state_c = decoder_lstm_layer(decoder_embed_outputs, initial_state=encoder_states)
decoder_dense_layer = Dense(output_vocabulary_size, activation='softmax')
decoder_outputs = decoder_dense_layer(decoder_lstm_outputs)
# Define the model that will turn
model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outputs)
model.summary()
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
train_steps = len(X_train) // BATCH_SIZE
validation_steps = len(X_test) // BATCH_SIZE
n_epochs = 100
weight_path="{}_weights.best.hdf5".format('english_french')
checkpoint = ModelCheckpoint(weight_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', save_weights_only=True)
model.fit_generator(batch_generator(X_train, y_train), steps_per_epoch=train_steps, epochs=n_epochs, validation_data=batch_generator(X_test, y_test), validation_steps=validation_steps, verbose=1, callbacks=[checkpoint])
model.load_weights(weight_path)
model.save('english_french_{}.h5'.format(VERSION))
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state and a "start of sequence" token as target. Output will be the next target token
# 3) Repeat with the current target token and current states
model = models.load_model('english_french_{}.h5'.format(VERSION), compile=False)
decoder_embed_layer = model.layers[3]
decoder_lstm_layer = model.layers[5]
decoder_dense_layer = model.layers[6]
# Define sampling models
encoder_model = Model(inputs=encoder_inputs, outputs=encoder_states)
encoder_model.save('english_french_encoder_{}.h5'.format(VERSION))
decoder_state_input_h = Input(shape=(embed_size,))
decoder_state_input_c = Input(shape=(embed_size,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_embed_outputs = decoder_embed_layer(decoder_inputs)
decoder_lstm_outputs, decoder_state_h, decoder_state_c = decoder_lstm_layer(decoder_embed_outputs, initial_state=decoder_states_inputs)
decoder_states = [decoder_state_h, decoder_state_c]
decoder_outputs = decoder_dense_layer(decoder_lstm_outputs)
decoder_model = Model(inputs=[decoder_inputs] + decoder_states_inputs, outputs=[decoder_outputs] + decoder_states)
encoder_model = models.load_model('english_french_encoder_{}.h5'.format(VERSION), compile=False)
def decode_sequence(input_text):
input_text = [input_text]
input_seq = np.array([input_text_to_x(text) for text in input_text])
# Encode the input as state vectors.
encoder_states_val = encoder_model.predict(input_seq) # shape = (2,1,256)
# Generate empty target sequence of length 1.
target_sentences = ["{"]
target_seq = np.array([output_text_to_num(text) for text in target_sentences])
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
decoded_sentence_size = 0
while not stop_condition:
decoder_outputs_val, h, c = decoder_model.predict([target_seq] + encoder_states_val)
# Sample a token
sampled_token_index = | import os | random_line_split |
|
lstm-english-french-word.py | , output_text = line.split('\t')
input_texts.append(input_text)
output_texts.append('{'+output_text+'}') # { denotes the start of sentence, } denotes the end of sentence
except ValueError:
print(line)
input_texts = input_texts[:SAMPLE]
output_texts = output_texts[:SAMPLE]
# Preprocess
def remove_blank(word_list):
return [word for word in word_list if word.strip()]
def is_curly_bracket(string):
return string == '{' or string == '}'
def remove_blank_curly_bracket(word_list):
return [word for word in word_list if word.strip() and not is_curly_bracket(word)]
def split_input_texts(text):
return [word.lower() for word in word_tokenize(text)]
def split_output_texts(text):
return [word.lower() for word in word_tokenize(text, language='french')]
# Tokenize input and output
input_text_list = [split_input_texts(input_text) for input_text in input_texts]
output_text_list = [split_output_texts(output_text) for output_text in output_texts]
# Count unique number of English words
input_text_flat_list = []
for input_text in input_text_list:
input_text_flat_list.extend(input_text)
input_vocabulary_size = len(set(input_text_flat_list)) + 1 # 3442
print('input vocabulary size', input_vocabulary_size)
# Make word-index lookup for english words
input_counter_list = collections.Counter(input_text_flat_list).most_common()
input_word_index_dict = {value[0]: index+1 for index, value in enumerate(input_counter_list)}
input_index_word_dict = {index+1: value[0] for index, value in enumerate(input_counter_list)}
# Count unique number of French words
output_text_flat_list = []
for output_text in output_text_list:
output_text_flat_list.extend(output_text)
output_vocabulary_size = len(set(output_text_flat_list)) + 1 # 7251
print('output vocabulary size', output_vocabulary_size)
# Make word-index lookup for French words
output_counter_list = collections.Counter(output_text_flat_list).most_common(output_vocabulary_size)
output_word_index_dict = {value[0]: index+1 for index, value in enumerate(output_counter_list)}
output_index_word_dict = {index+1: value[0] for index, value in enumerate(output_counter_list)}
output_index_word_dict[0] = ''
# Max size for input and output text
input_max_size = max(len(split_input_texts(text)) for text in input_texts) # 7
output_max_size = max(len(split_output_texts(text)) for text in output_texts) # 15
print('Input max size', input_max_size)
print('Output max size', output_max_size)
# Convert input/output texts to machine learning input X/y
def input_text_to_num(input_text):
input_text_list = split_input_texts(input_text)
num_list = [input_word_index_dict[word] for word in input_text_list]
return num_list
def output_text_to_num(output_text):
output_text_list = split_output_texts(output_text)
num_list = [output_word_index_dict[word] for word in output_text_list]
return num_list
def | (num_list):
text = [output_index_word_dict[num] for num in num_list]
return ' '.join(text)
def pad_num_list(num_list, size):
return num_list + [0] * (size - len(num_list))
def input_text_to_x(input_text):
num_list = input_text_to_num(input_text)
return pad_num_list(num_list, input_max_size)
def output_text_to_y(output_text):
num_list = output_text_to_num(output_text)
return pad_num_list(num_list, output_max_size)
X = np.array([input_text_to_x(text) for text in input_texts])
y = np.array([output_text_to_y(text) for text in output_texts])
print("X shape: {}, y shape: {}".format(X.shape, y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
def batch_generator(x, y, batch_size=BATCH_SIZE):
while True:
shuffle = np.random.permutation(len(x))
start = 0
x = x[shuffle]
y = y[shuffle]
while start + batch_size <= len(x):
x_batch = x[start:start+batch_size]
y_batch = y[start:start+batch_size]
yield [x_batch, y_batch[:,:-1]], to_categorical(y_batch[:, 1:], output_vocabulary_size)
start += batch_size
#[x_sample, y_sample], y_sample_2 = next(batch_generator(X_train, y_train))
# Define Model
embed_size = 300
# Encoder
encoder_inputs = Input(shape=(None,))
encoder_embed_layer = Embedding(input_vocabulary_size, embed_size)
encoder_embed_outputs = encoder_embed_layer(encoder_inputs)
encoder_lstm_layer = LSTM(embed_size, return_state=True)
_, encoder_state_h, encoder_state_c = encoder_lstm_layer(encoder_embed_outputs)
encoder_states = [encoder_state_h, encoder_state_c]
# Decoder
decoder_inputs = Input(shape=(None,))
decoder_embed_layer = Embedding(output_vocabulary_size, embed_size)
decoder_embed_outputs = decoder_embed_layer(decoder_inputs)
decoder_lstm_layer = LSTM(embed_size, return_sequences=True, return_state=True)
decoder_lstm_outputs, decoder_state_h, decoder_state_c = decoder_lstm_layer(decoder_embed_outputs, initial_state=encoder_states)
decoder_dense_layer = Dense(output_vocabulary_size, activation='softmax')
decoder_outputs = decoder_dense_layer(decoder_lstm_outputs)
# Define the model that will turn
model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outputs)
model.summary()
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
train_steps = len(X_train) // BATCH_SIZE
validation_steps = len(X_test) // BATCH_SIZE
n_epochs = 100
weight_path="{}_weights.best.hdf5".format('english_french')
checkpoint = ModelCheckpoint(weight_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', save_weights_only=True)
model.fit_generator(batch_generator(X_train, y_train), steps_per_epoch=train_steps, epochs=n_epochs, validation_data=batch_generator(X_test, y_test), validation_steps=validation_steps, verbose=1, callbacks=[checkpoint])
model.load_weights(weight_path)
model.save('english_french_{}.h5'.format(VERSION))
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state and a "start of sequence" token as target. Output will be the next target token
# 3) Repeat with the current target token and current states
model = models.load_model('english_french_{}.h5'.format(VERSION), compile=False)
decoder_embed_layer = model.layers[3]
decoder_lstm_layer = model.layers[5]
decoder_dense_layer = model.layers[6]
# Define sampling models
encoder_model = Model(inputs=encoder_inputs, outputs=encoder_states)
encoder_model.save('english_french_encoder_{}.h5'.format(VERSION))
decoder_state_input_h = Input(shape=(embed_size,))
decoder_state_input_c = Input(shape=(embed_size,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_embed_outputs = decoder_embed_layer(decoder_inputs)
decoder_lstm_outputs, decoder_state_h, decoder_state_c = decoder_lstm_layer(decoder_embed_outputs, initial_state=decoder_states_inputs)
decoder_states = [decoder_state_h, decoder_state_c]
decoder_outputs = decoder_dense_layer(decoder_lstm_outputs)
decoder_model = Model(inputs=[decoder_inputs] + decoder_states_inputs, outputs=[decoder_outputs] + decoder_states)
encoder_model = models.load_model('english_french_encoder_{}.h5'.format(VERSION), compile=False)
def decode_sequence(input_text):
input_text = [input_text]
input_seq = np.array([input_text_to_x(text) for text in input_text])
# Encode the input as state vectors.
encoder_states_val = encoder_model.predict(input_seq) # shape = (2,1,256)
# Generate empty target sequence of length 1.
target_sentences = ["{"]
target_seq = np.array([output_text_to_num(text) for text in target_sentences])
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
decoded_sentence_size = 0
while not stop_condition:
decoder_outputs_val, h, c = decoder_model.predict([target_seq] + encoder_states_val)
# Sample a token
sampled_token_index = np.argmax(decoder_outputs_val, axis=-1)[0,0]
sampled_word = output_index_word_dict[sampled_token_index]
decoded_sentence += sampled_word + ' '
decoded_sentence_size += 1
# Exit condition: either hit max length or find stop character.
if sampled_word == '}' or decoded_sentence_size > output_max_size:
stop_condition = True
# Update the target sequence (of length 1).
target_sentences = [sampled_word]
target_seq = np.array([output_text_to_num(text) for text in target_sentences])
# Update states
encoder_states_val = [h, c]
return decoded_sentence
def decode_sequence2(input_text):
input_text = [input_text]
input_seq = np.array([input_text_to_x(text) for text in | output_num_to_text | identifier_name |
lstm-english-french-word.py | , output_text = line.split('\t')
input_texts.append(input_text)
output_texts.append('{'+output_text+'}') # { denotes the start of sentence, } denotes the end of sentence
except ValueError:
print(line)
input_texts = input_texts[:SAMPLE]
output_texts = output_texts[:SAMPLE]
# Preprocess
def remove_blank(word_list):
return [word for word in word_list if word.strip()]
def is_curly_bracket(string):
return string == '{' or string == '}'
def remove_blank_curly_bracket(word_list):
return [word for word in word_list if word.strip() and not is_curly_bracket(word)]
def split_input_texts(text):
return [word.lower() for word in word_tokenize(text)]
def split_output_texts(text):
|
# Tokenize input and output
input_text_list = [split_input_texts(input_text) for input_text in input_texts]
output_text_list = [split_output_texts(output_text) for output_text in output_texts]
# Count unique number of English words
input_text_flat_list = []
for input_text in input_text_list:
input_text_flat_list.extend(input_text)
input_vocabulary_size = len(set(input_text_flat_list)) + 1 # 3442
print('input vocabulary size', input_vocabulary_size)
# Make word-index lookup for english words
input_counter_list = collections.Counter(input_text_flat_list).most_common()
input_word_index_dict = {value[0]: index+1 for index, value in enumerate(input_counter_list)}
input_index_word_dict = {index+1: value[0] for index, value in enumerate(input_counter_list)}
# Count unique number of French words
output_text_flat_list = []
for output_text in output_text_list:
output_text_flat_list.extend(output_text)
output_vocabulary_size = len(set(output_text_flat_list)) + 1 # 7251
print('output vocabulary size', output_vocabulary_size)
# Make word-index lookup for French words
output_counter_list = collections.Counter(output_text_flat_list).most_common(output_vocabulary_size)
output_word_index_dict = {value[0]: index+1 for index, value in enumerate(output_counter_list)}
output_index_word_dict = {index+1: value[0] for index, value in enumerate(output_counter_list)}
output_index_word_dict[0] = ''
# Max size for input and output text
input_max_size = max(len(split_input_texts(text)) for text in input_texts) # 7
output_max_size = max(len(split_output_texts(text)) for text in output_texts) # 15
print('Input max size', input_max_size)
print('Output max size', output_max_size)
# Convert input/output texts to machine learning input X/y
def input_text_to_num(input_text):
input_text_list = split_input_texts(input_text)
num_list = [input_word_index_dict[word] for word in input_text_list]
return num_list
def output_text_to_num(output_text):
output_text_list = split_output_texts(output_text)
num_list = [output_word_index_dict[word] for word in output_text_list]
return num_list
def output_num_to_text(num_list):
text = [output_index_word_dict[num] for num in num_list]
return ' '.join(text)
def pad_num_list(num_list, size):
return num_list + [0] * (size - len(num_list))
def input_text_to_x(input_text):
num_list = input_text_to_num(input_text)
return pad_num_list(num_list, input_max_size)
def output_text_to_y(output_text):
num_list = output_text_to_num(output_text)
return pad_num_list(num_list, output_max_size)
X = np.array([input_text_to_x(text) for text in input_texts])
y = np.array([output_text_to_y(text) for text in output_texts])
print("X shape: {}, y shape: {}".format(X.shape, y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
def batch_generator(x, y, batch_size=BATCH_SIZE):
while True:
shuffle = np.random.permutation(len(x))
start = 0
x = x[shuffle]
y = y[shuffle]
while start + batch_size <= len(x):
x_batch = x[start:start+batch_size]
y_batch = y[start:start+batch_size]
yield [x_batch, y_batch[:,:-1]], to_categorical(y_batch[:, 1:], output_vocabulary_size)
start += batch_size
#[x_sample, y_sample], y_sample_2 = next(batch_generator(X_train, y_train))
# Define Model
embed_size = 300
# Encoder
encoder_inputs = Input(shape=(None,))
encoder_embed_layer = Embedding(input_vocabulary_size, embed_size)
encoder_embed_outputs = encoder_embed_layer(encoder_inputs)
encoder_lstm_layer = LSTM(embed_size, return_state=True)
_, encoder_state_h, encoder_state_c = encoder_lstm_layer(encoder_embed_outputs)
encoder_states = [encoder_state_h, encoder_state_c]
# Decoder
decoder_inputs = Input(shape=(None,))
decoder_embed_layer = Embedding(output_vocabulary_size, embed_size)
decoder_embed_outputs = decoder_embed_layer(decoder_inputs)
decoder_lstm_layer = LSTM(embed_size, return_sequences=True, return_state=True)
decoder_lstm_outputs, decoder_state_h, decoder_state_c = decoder_lstm_layer(decoder_embed_outputs, initial_state=encoder_states)
decoder_dense_layer = Dense(output_vocabulary_size, activation='softmax')
decoder_outputs = decoder_dense_layer(decoder_lstm_outputs)
# Define the model that will turn
model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outputs)
model.summary()
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
train_steps = len(X_train) // BATCH_SIZE
validation_steps = len(X_test) // BATCH_SIZE
n_epochs = 100
weight_path="{}_weights.best.hdf5".format('english_french')
checkpoint = ModelCheckpoint(weight_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', save_weights_only=True)
model.fit_generator(batch_generator(X_train, y_train), steps_per_epoch=train_steps, epochs=n_epochs, validation_data=batch_generator(X_test, y_test), validation_steps=validation_steps, verbose=1, callbacks=[checkpoint])
model.load_weights(weight_path)
model.save('english_french_{}.h5'.format(VERSION))
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state and a "start of sequence" token as target. Output will be the next target token
# 3) Repeat with the current target token and current states
model = models.load_model('english_french_{}.h5'.format(VERSION), compile=False)
decoder_embed_layer = model.layers[3]
decoder_lstm_layer = model.layers[5]
decoder_dense_layer = model.layers[6]
# Define sampling models
encoder_model = Model(inputs=encoder_inputs, outputs=encoder_states)
encoder_model.save('english_french_encoder_{}.h5'.format(VERSION))
decoder_state_input_h = Input(shape=(embed_size,))
decoder_state_input_c = Input(shape=(embed_size,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_embed_outputs = decoder_embed_layer(decoder_inputs)
decoder_lstm_outputs, decoder_state_h, decoder_state_c = decoder_lstm_layer(decoder_embed_outputs, initial_state=decoder_states_inputs)
decoder_states = [decoder_state_h, decoder_state_c]
decoder_outputs = decoder_dense_layer(decoder_lstm_outputs)
decoder_model = Model(inputs=[decoder_inputs] + decoder_states_inputs, outputs=[decoder_outputs] + decoder_states)
encoder_model = models.load_model('english_french_encoder_{}.h5'.format(VERSION), compile=False)
def decode_sequence(input_text):
input_text = [input_text]
input_seq = np.array([input_text_to_x(text) for text in input_text])
# Encode the input as state vectors.
encoder_states_val = encoder_model.predict(input_seq) # shape = (2,1,256)
# Generate empty target sequence of length 1.
target_sentences = ["{"]
target_seq = np.array([output_text_to_num(text) for text in target_sentences])
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
decoded_sentence_size = 0
while not stop_condition:
decoder_outputs_val, h, c = decoder_model.predict([target_seq] + encoder_states_val)
# Sample a token
sampled_token_index = np.argmax(decoder_outputs_val, axis=-1)[0,0]
sampled_word = output_index_word_dict[sampled_token_index]
decoded_sentence += sampled_word + ' '
decoded_sentence_size += 1
# Exit condition: either hit max length or find stop character.
if sampled_word == '}' or decoded_sentence_size > output_max_size:
stop_condition = True
# Update the target sequence (of length 1).
target_sentences = [sampled_word]
target_seq = np.array([output_text_to_num(text) for text in target_sentences])
# Update states
encoder_states_val = [h, c]
return decoded_sentence
def decode_sequence2(input_text):
input_text = [input_text]
input_seq = np.array([input_text_to_x(text) for text in input | return [word.lower() for word in word_tokenize(text, language='french')] | identifier_body |
lstm-english-french-word.py |
except ValueError:
print(line)
input_texts = input_texts[:SAMPLE]
output_texts = output_texts[:SAMPLE]
# Preprocess
def remove_blank(word_list):
return [word for word in word_list if word.strip()]
def is_curly_bracket(string):
return string == '{' or string == '}'
def remove_blank_curly_bracket(word_list):
return [word for word in word_list if word.strip() and not is_curly_bracket(word)]
def split_input_texts(text):
return [word.lower() for word in word_tokenize(text)]
def split_output_texts(text):
return [word.lower() for word in word_tokenize(text, language='french')]
# Tokenize input and output
input_text_list = [split_input_texts(input_text) for input_text in input_texts]
output_text_list = [split_output_texts(output_text) for output_text in output_texts]
# Count unique number of English words
input_text_flat_list = []
for input_text in input_text_list:
input_text_flat_list.extend(input_text)
input_vocabulary_size = len(set(input_text_flat_list)) + 1 # 3442
print('input vocabulary size', input_vocabulary_size)
# Make word-index lookup for english words
input_counter_list = collections.Counter(input_text_flat_list).most_common()
input_word_index_dict = {value[0]: index+1 for index, value in enumerate(input_counter_list)}
input_index_word_dict = {index+1: value[0] for index, value in enumerate(input_counter_list)}
# Count unique number of French words
output_text_flat_list = []
for output_text in output_text_list:
output_text_flat_list.extend(output_text)
output_vocabulary_size = len(set(output_text_flat_list)) + 1 # 7251
print('output vocabulary size', output_vocabulary_size)
# Make word-index lookup for French words
output_counter_list = collections.Counter(output_text_flat_list).most_common(output_vocabulary_size)
output_word_index_dict = {value[0]: index+1 for index, value in enumerate(output_counter_list)}
output_index_word_dict = {index+1: value[0] for index, value in enumerate(output_counter_list)}
output_index_word_dict[0] = ''
# Max size for input and output text
input_max_size = max(len(split_input_texts(text)) for text in input_texts) # 7
output_max_size = max(len(split_output_texts(text)) for text in output_texts) # 15
print('Input max size', input_max_size)
print('Output max size', output_max_size)
# Convert input/output texts to machine learning input X/y
def input_text_to_num(input_text):
input_text_list = split_input_texts(input_text)
num_list = [input_word_index_dict[word] for word in input_text_list]
return num_list
def output_text_to_num(output_text):
output_text_list = split_output_texts(output_text)
num_list = [output_word_index_dict[word] for word in output_text_list]
return num_list
def output_num_to_text(num_list):
text = [output_index_word_dict[num] for num in num_list]
return ' '.join(text)
def pad_num_list(num_list, size):
return num_list + [0] * (size - len(num_list))
def input_text_to_x(input_text):
num_list = input_text_to_num(input_text)
return pad_num_list(num_list, input_max_size)
def output_text_to_y(output_text):
num_list = output_text_to_num(output_text)
return pad_num_list(num_list, output_max_size)
X = np.array([input_text_to_x(text) for text in input_texts])
y = np.array([output_text_to_y(text) for text in output_texts])
print("X shape: {}, y shape: {}".format(X.shape, y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
def batch_generator(x, y, batch_size=BATCH_SIZE):
while True:
shuffle = np.random.permutation(len(x))
start = 0
x = x[shuffle]
y = y[shuffle]
while start + batch_size <= len(x):
x_batch = x[start:start+batch_size]
y_batch = y[start:start+batch_size]
yield [x_batch, y_batch[:,:-1]], to_categorical(y_batch[:, 1:], output_vocabulary_size)
start += batch_size
#[x_sample, y_sample], y_sample_2 = next(batch_generator(X_train, y_train))
# Define Model
embed_size = 300
# Encoder
encoder_inputs = Input(shape=(None,))
encoder_embed_layer = Embedding(input_vocabulary_size, embed_size)
encoder_embed_outputs = encoder_embed_layer(encoder_inputs)
encoder_lstm_layer = LSTM(embed_size, return_state=True)
_, encoder_state_h, encoder_state_c = encoder_lstm_layer(encoder_embed_outputs)
encoder_states = [encoder_state_h, encoder_state_c]
# Decoder
decoder_inputs = Input(shape=(None,))
decoder_embed_layer = Embedding(output_vocabulary_size, embed_size)
decoder_embed_outputs = decoder_embed_layer(decoder_inputs)
decoder_lstm_layer = LSTM(embed_size, return_sequences=True, return_state=True)
decoder_lstm_outputs, decoder_state_h, decoder_state_c = decoder_lstm_layer(decoder_embed_outputs, initial_state=encoder_states)
decoder_dense_layer = Dense(output_vocabulary_size, activation='softmax')
decoder_outputs = decoder_dense_layer(decoder_lstm_outputs)
# Define the model that will turn
model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_outputs)
model.summary()
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
train_steps = len(X_train) // BATCH_SIZE
validation_steps = len(X_test) // BATCH_SIZE
n_epochs = 100
weight_path="{}_weights.best.hdf5".format('english_french')
checkpoint = ModelCheckpoint(weight_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max', save_weights_only=True)
model.fit_generator(batch_generator(X_train, y_train), steps_per_epoch=train_steps, epochs=n_epochs, validation_data=batch_generator(X_test, y_test), validation_steps=validation_steps, verbose=1, callbacks=[checkpoint])
model.load_weights(weight_path)
model.save('english_french_{}.h5'.format(VERSION))
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state and a "start of sequence" token as target. Output will be the next target token
# 3) Repeat with the current target token and current states
model = models.load_model('english_french_{}.h5'.format(VERSION), compile=False)
decoder_embed_layer = model.layers[3]
decoder_lstm_layer = model.layers[5]
decoder_dense_layer = model.layers[6]
# Define sampling models
encoder_model = Model(inputs=encoder_inputs, outputs=encoder_states)
encoder_model.save('english_french_encoder_{}.h5'.format(VERSION))
decoder_state_input_h = Input(shape=(embed_size,))
decoder_state_input_c = Input(shape=(embed_size,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_embed_outputs = decoder_embed_layer(decoder_inputs)
decoder_lstm_outputs, decoder_state_h, decoder_state_c = decoder_lstm_layer(decoder_embed_outputs, initial_state=decoder_states_inputs)
decoder_states = [decoder_state_h, decoder_state_c]
decoder_outputs = decoder_dense_layer(decoder_lstm_outputs)
decoder_model = Model(inputs=[decoder_inputs] + decoder_states_inputs, outputs=[decoder_outputs] + decoder_states)
encoder_model = models.load_model('english_french_encoder_{}.h5'.format(VERSION), compile=False)
def decode_sequence(input_text):
input_text = [input_text]
input_seq = np.array([input_text_to_x(text) for text in input_text])
# Encode the input as state vectors.
encoder_states_val = encoder_model.predict(input_seq) # shape = (2,1,256)
# Generate empty target sequence of length 1.
target_sentences = ["{"]
target_seq = np.array([output_text_to_num(text) for text in target_sentences])
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
decoded_sentence_size = 0
while not stop_condition:
decoder_outputs_val, h, c = decoder_model.predict([target_seq] + encoder_states_val)
# Sample a token
sampled_token_index = np.argmax(decoder_outputs_val, axis=-1)[0,0]
sampled_word = output_index_word_dict[sampled_token_index]
decoded_sentence += sampled_word + ' '
decoded_sentence_size += 1
# Exit condition: either hit max length or find stop character.
if sampled_word == '}' or decoded_sentence_size > output_max_size:
stop_condition = True
# Update the target sequence (of length 1).
target_sentences = [sampled_word]
target_seq = np.array([output_text_to_num(text) for text in target_sentences])
# Update states
encoder_states_val = [h, c]
return decoded_sentence
def decode_sequence2(input_text):
input_text = [input_text]
input_seq = np.array([input_text_to_x(text) for text | input_text, output_text = line.split('\t')
input_texts.append(input_text)
output_texts.append('{'+output_text+'}') # { denotes the start of sentence, } denotes the end of sentence | conditional_block |
|
pypro.py | .subplot(212)
plt.pie(values, labels=parameters, colors=cols , startangle=90, shadow=True, explode=(0, 0, 0.1, 0), autopct='%1.1f%%') #(largest slice is popped out)
plt.title('Income Adequacy of females as percentages in the sample population')
plt.show() #graphs in same window depict the differences in income adequacy of people belonging to different genders
income18 = pd.read_csv('income18.csv')
parameters = income18['parameters']
cols = ['pink', 'yellow', 'green', 'purple']
values = income18['values']
plt.subplot(211)
plt.pie(values, labels=parameters, colors=cols , startangle=90, shadow=True, explode=(0, 0, 0.1, 0), autopct='%1.1f%%') #(largest slice is popped out)
plt.title('Income Adequacy of people in age group of 18-24 years as percentages in sample population')
income35 = pd.read_csv('income35.csv')
parameters = income35['parameters']
values = income35['values']
plt.subplot(212)
plt.pie(values, labels=parameters, colors=cols , startangle=90, shadow=True, explode=(0, 0, 0.1, 0), autopct='%1.1f%%') #(largest slice is popped out)
plt.title('Income Adequacy of people people in age group of 35-44 years as percentages in sample population ')
plt.show() #graphs in same window depict differences in income adequacy of people in different age groups
incomeasians = pd.read_csv('incomeasians.csv')
parameters = incomeasians['parameters']
values = incomeasians['values']
cols = ['brown', 'pink', 'yellow', 'purple']
plt.subplot(211)
plt.pie(values, labels=parameters, colors=cols , startangle=90, shadow=True, explode=(0, 0, 0.1, 0), autopct='%1.1f%%') #(largest slice is popped out)
plt.title('Income Adequacy of Asian people')
incomeeuropeons = pd.read_csv('incomeeuropeons.csv')
parameters = incomeeuropeons['parameters']
values = incomeeuropeons['values']
plt.subplot(212)
plt.pie(values, labels=parameters, colors=cols , startangle=90, shadow=True, explode=(0, 0, 0.1, 0), autopct='%1.1f%%') #(largest slice is popped out)
plt.title('Income Adequacy of Europeon people')
plt.show() #graphs in same window depict differences in income adequacy of people of different ehinicities
#let us compare some other aspects of living between different genders.
style.use('ggplot')
parameters = ['Life Satisfaction', 'Family Wellbeing', 'Happiness Yesterday']
females = [7.9, 7.8, 7.9]
males = [7.8, 7.8, 7.8]
plt.plot(parameters,males, label='Males')
plt.plot(parameters,females,label='Females')
plt.title('How people from different genders behold their lives?')
plt.ylabel('Score out of 10 where 10 is the best')
plt.xlabel('Different Parameters')
plt.grid(True,color='black')
plt.legend()
plt.show()
#let us compare above mentioned attributes between people of different age groupps.
age_group_18_24 = [7.7, 7.8, 7.8]
age_group_35_44 = [7.8, 7.8, 7.9]
plt.plot(parameters, age_group_18_24, label='Age Group 18-24 years',color='blue')
plt.plot(parameters, age_group_35_44, label='Age Group 35-44 years', color='brown')
plt.legend()
plt.title('How people from different age groups behold their lives?')
plt.ylabel('Score out of 10 where 10 is the best')
plt.xlabel('Different Parameters')
plt.grid(True,color='black')
plt.show()
#let us compare above mentioned attributes for people from different ethinicities.
europeons = [7.8, 7.7, 7.8]
asians = [7.7, 8, 7.9]
plt.plot(parameters, europeons,label='Europeons',color='green')
plt.plot(parameters, asians, label='Asians', color='purple')
plt.legend()
plt.title('How people from different age groups behold their lives?')
plt.ylabel('Score out of 10 where 10 is the best')
plt.xlabel('Different Parameters')
plt.grid(True,color='black')
plt.show()
#now let us see how different people trust the society and political bodies.
new_df = pd.read_csv('trust.csv')
print("Here, we can look at how different people rated their trust over different bodies out of 10 where 10 means complete trust (these are all mean values).")
new_df.set_index('index', inplace=True) #used set_index to change index from(0-4) to (1-4).
print(new_df)
males = pd.DataFrame({'People':[6.9], 'Health System':[7.3], "Parliament":[6.1], "Police":[7.8], 'Media':[4.8] }) #created dataframes as dictionary, learnt this from Edureka -a you tube channel
females = pd.DataFrame({'People':[6.8], "Health System":[7.1], "Parliament":[6.4], "Police":[7.7], "Media":[4.8]})
age_group_18_24 = pd.DataFrame({'People':[6.7], "Health System":[7.6], "Parliament":[6.6], "Police":[7.7], 'Media':[4.8]})
age_group_35_44 = pd.DataFrame({'People':[6.9], "Health System":[7.2], "Parliament":[6.4], "Police":[7.9], 'Media':[4.8]})
Asians = pd.DataFrame({'People':[7.4], "Health System":[7.9], "Parliament":[7.2], 'Police':[8.1], 'Media':[6.3]})
| print('let us see how much trust females have on the society as a score out of 10')
print(females)
print('let us see how much trust youngsters have on society as a score out of 10')
print(age_group_18_24)
print('let us see how much trust middle-aged people have on society as a score out of 10')
print(age_group_35_44 )
print('let us see how much trust Europeans have on society as a score out of 10')
print(Europeans )
print('let us see how much trust Asians have on society as a score out of 10')
print(Asians)
#let us lookmat what percentage of different populations had faced discrimination before
print('Let us look at what percentage of different populations had faced discrimination on any basis till the survay was conducted!')
onxaxis = ['Males', 'Females', 'AG(18-24)', 'AG(35-44)', 'Europeans', 'Asians']
onyaxis = [16.6, 20.3, 20.1, 19.4, 16.3, 23]
plt.bar(onxaxis, onyaxis, label='Percentage of population' )
plt.legend()
plt.xlabel('Categories of people ')
plt.ylabel('Percentage of population ')
plt.title('How many people had faced discrimination?')
plt.show()
print('As we can see in the respective bar graph, Largest portion of Asian population(23%) had faced discrimination whereas Europeans had the smallest portion of 16.3%.')
#now we will se how people rated their health using stack plots..
plt.subplot(211)
onxaxis = ['males', 'females', 'AG(18-24)', 'AG(35-44)', 'Europeans', 'Asians']
excellent = [18.5, 18.3, 22.2, 20.2, 18.8, 20.1]
very_good = [38.9, 39.2, 40.1, 40.1, 40.3, 39.0]
good = [28.7, 29.3, 28.8, 29.4, 27.3, 33.2]
poor = [13.9, 13.2, 8.9, 10.3, 13.7, 7.8]
plt.plot([], [], color = 'green | Europeans = pd.DataFrame({'People':[6.8], 'Health System':[7.1], 'Parliament':[6], 'Police':[8], 'Media':[4.5]})
#created dataframes using pandas to show how people from different catagories trusted the society
print("let us see how much trust males have on the society as a score out of 10")
print(males)
| random_line_split |
backup.py | ,0,0,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0],[0,1,1,1,1,1,0,1,1,1,0,1,0,1,1,1,1,1,1,0],[0,1,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0],[0,1,0,1,1,1,1,1,1,0,1,1,1,0,1,1,1,0,1,0],[0,1,0,1,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0],[0,1,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0],[0,1,0,1,1,1,1,0,1,1,1,0,1,1,1,0,1,1,1,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
position_history = (0,0)
class labyrinth_solver:
def __init__(self):
self.image_pub = rospy.Publisher("final_image",Image)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/usb_cam/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding="bgr8")
except CvBridgeError, e:
print e
# crop out the labyrinth region (y by x)
cv_image = cv_image[22:240, 44:268]
# resize the image to 200x200 each region is 10x10
cv_image = cv2.resize(cv_image, (400, 400))
# transfer the image from RGB to HSV
hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
# Red Ball Segmentation
lower_red = np.array([0,50,150])
upper_red = np.array([50,150,250])
temp_ball = cv2.inRange(hsv_image,lower_red,upper_red)
# Erosion and Dilation processing
kernel = np.ones((3,3),np.uint8)
temp_ball = cv2.dilate(temp_ball,kernel,iterations = 2)
#cv2.imshow("Red Ball", temp_ball)
# Calculate the contour
contours,hierarcy = cv2.findContours(temp_ball,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Select the biggest contout as the target
max_area = 0
for cnt in contours:
area=cv2.contourArea(cnt)
if area > max_area:
max_area=area
target = cnt
global position_history # calling global variable
# handling with target missing
if max_area >= 10:
(x,y),radius = cv2.minEnclosingCircle(target)
center = (int(x),int(y))
else:
center = position_history
# Compensate with some noise
radius = 10
if abs(center[0]-position_history[0])+abs(center[1]-position_history[1])<=4:
center = position_history
cv2.circle(cv_image,center,radius,(0,255,0),2)
position_history = center
cv2.imshow("Ball tracking", cv_image)
# manipulate the center coordinate to be the nearest 10 while extract the position in 20 by 20
# FIRST check who is more close to 0
checkx = center[0]%20-10
checky = center[1]%20-15
if abs(checkx) <= abs(checky):
newx = center[0] - checkx
newy = center[1]*0.955
elif abs(checkx) > abs(checky):
newx = center[0] | newcenter = (newx, int(newy))
# read the reference map for animation
map_ref = cv2.imread('/home/sunyue/catkin_ws/src/tracking/map.png')
cv2.circle(map_ref,newcenter,radius,(0,0,255),-5)
# SECOND transfer the real location to the 20x20 grid
gridx = newcenter[0]/20+1
gridy = newcenter[1]/20+1
# A* for path planning
goal = [10,2]
current = [gridx, gridy]
precheck = abs(current[0]-goal[0])+abs(current[1]-goal[1])
if precheck == 0: check = 0
else: check = 100
path = np.array([current])
backup = np.array([[0,0,0,0]])
while check!=0:
# generate the potential candidate
north = [current[0],current[1]-1]
south = [current[0],current[1]+1]
east = [current[0]+1,current[1]]
west = [current[0]-1,current[1]]
#print current
# calculate the heuristic
n_heuristic = math.sqrt(pow(north[0]-goal[0],2)+pow(north[1]-goal[1],2))
s_heuristic = math.sqrt(pow(south[0]-goal[0],2)+pow(south[1]-goal[1],2))
e_heuristic = math.sqrt(pow(east[0]-goal[0],2)+pow(east[1]-goal[1],2))
w_heuristic = math.sqrt(pow(west[0]-goal[0],2)+pow(west[1]-goal[1],2))
# check the punishment of obstacle
if MAP[north[1]-1,north[0]-1]==0: n_punish = 2000
else: n_punish = 0
if MAP[south[1]-1,south[0]-1]==0: s_punish = 2000
else: s_punish = 0
if MAP[east[1]-1,east[0]-1]==0: e_punish = 2000
else: e_punish = 0
if MAP[west[1]-1,west[0]-1]==0: w_punish = 2000
else: w_punish = 0
#print n_punish, s_punish, e_punish, w_punish
# check last node never go back
num = path.shape[0] # get the path step number
if num!=1:
last_step = path[-2]
n_check = north - last_step
s_check = south - last_step
e_check = east - last_step
w_check = west - last_step
if ( n_check[0]==0 and n_check[1]==0): n_punish = 2000
if ( s_check[0]==0 and s_check[1]==0): s_punish = 2000
if ( e_check[0]==0 and e_check[1]==0): e_punish = 2000
if ( w_check[0]==0 and w_check[1]==0): w_punish = 2000
# sum the cost together
n_cost = int(n_heuristic + n_punish)
s_cost = int(s_heuristic + s_punish)
e_cost = int(e_heuristic + e_punish)
w_cost = int(w_heuristic + w_punish)
cost = [n_cost, s_cost, e_cost, w_cost]
# there will be some situations should be taken into consideration
index = np.argmin(cost) # where the smallest cost is located
mincost = cost[index]
# First only one direction cost is less than 1000, then just pick that
if mincost<=1000: # there must be at least one solution
sumcheck = cost[0]+cost[1]+cost[2]+cost[3]
if sumcheck >= 6000: # only one next choice
if index == 0: next = north
elif index == 1: next = south
elif index == 2: | newy = 0.955*(center[1] - checky) | random_line_split |
backup.py | 1]
east = [current[0]+1,current[1]]
west = [current[0]-1,current[1]]
#print current
# calculate the heuristic
n_heuristic = math.sqrt(pow(north[0]-goal[0],2)+pow(north[1]-goal[1],2))
s_heuristic = math.sqrt(pow(south[0]-goal[0],2)+pow(south[1]-goal[1],2))
e_heuristic = math.sqrt(pow(east[0]-goal[0],2)+pow(east[1]-goal[1],2))
w_heuristic = math.sqrt(pow(west[0]-goal[0],2)+pow(west[1]-goal[1],2))
# check the punishment of obstacle
if MAP[north[1]-1,north[0]-1]==0: n_punish = 2000
else: n_punish = 0
if MAP[south[1]-1,south[0]-1]==0: s_punish = 2000
else: s_punish = 0
if MAP[east[1]-1,east[0]-1]==0: e_punish = 2000
else: e_punish = 0
if MAP[west[1]-1,west[0]-1]==0: w_punish = 2000
else: w_punish = 0
#print n_punish, s_punish, e_punish, w_punish
# check last node never go back
num = path.shape[0] # get the path step number
if num!=1:
last_step = path[-2]
n_check = north - last_step
s_check = south - last_step
e_check = east - last_step
w_check = west - last_step
if ( n_check[0]==0 and n_check[1]==0): n_punish = 2000
if ( s_check[0]==0 and s_check[1]==0): s_punish = 2000
if ( e_check[0]==0 and e_check[1]==0): e_punish = 2000
if ( w_check[0]==0 and w_check[1]==0): w_punish = 2000
# sum the cost together
n_cost = int(n_heuristic + n_punish)
s_cost = int(s_heuristic + s_punish)
e_cost = int(e_heuristic + e_punish)
w_cost = int(w_heuristic + w_punish)
cost = [n_cost, s_cost, e_cost, w_cost]
# there will be some situations should be taken into consideration
index = np.argmin(cost) # where the smallest cost is located
mincost = cost[index]
# First only one direction cost is less than 1000, then just pick that
if mincost<=1000: # there must be at least one solution
sumcheck = cost[0]+cost[1]+cost[2]+cost[3]
if sumcheck >= 6000: # only one next choice
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# updat the current
current = next
elif (sumcheck >= 4000 and sumcheck < 6000) : # two posible choices
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path choose the one have the least cost
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# save the branch to the back up [current, branch]
fakecost = cost
fakecost[index] = 2000 # mannually fake the minimum cost choice
fakeindex = np.argmin(fakecost) # where the smallest cost is located
if fakeindex == 0: branch = north
elif fakeindex == 1: branch = south
elif fakeindex == 2: branch = east
elif fakeindex == 3: branch = west
backup = np.append([[current[0],current[1],branch[0],branch[1]]], backup, axis=0)
# updat the current
current = next
elif (sumcheck >= 2000 and sumcheck < 4000) : # three posible choices
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path choose the one have the least cost
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# save the branch to the back up [current, branch]
# second cost
secondcost = cost
secondcost[index] = 2000 # mannually fake the minimum cost choice
secondindex = np.argmin(secondcost) # where the smallest cost is located
if secondindex == 0: branch1 = north
elif secondindex == 1: branch1 = south
elif secondindex == 2: branch1 = east
elif secondindex == 3: branch1 = west
thirdcost = secondcost
thirdcost[secondindex] = 2000 # mannually fake the minimum cost choice
thirdindex = np.argmin(thirdcost) # where the smallest cost is located
if thirdindex == 0: branch2 = north
elif thirdindex == 1: branch2 = south
elif thirdindex == 2: branch2 = east
elif thirdindex == 3: branch2 = west
# update branch based on cost difference
backup = np.append([[current[0],current[1],branch2[0],branch2[1]]], backup, axis=0)
backup = np.append([[current[0],current[1],branch1[0],branch1[1]]], backup, axis=0)
# updat the current
current = next
elif mincost>=2000: # there is no next choice we have go to backup branchs
# next step is the first ranking branch
next = [backup[0,2],backup[0,3]]
# cut the path back
current = [backup[0,0],backup[0,1]]
compare = abs(path-current)
summation = sum(np.transpose(compare))
index = np.argmin(summation)
# cut the path from 0 to current one
path = path[:index+1]
# update the path with next step
path = np.append(path,[next],axis=0)
# delete the first backup
backup = backup[1:]
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# updat the current
current = next
# A* algorithm is ended
steps = path.shape[0]
i = 0
while i < steps-1:
cv2.line(map_ref,(20*path[i,0]-10,20*path[i,1]-10),(20*path[i+1,0]-10,20*path[i+1,1]-10),(255,0,0),3)
i = i+1
cv2.imshow("Map Image", map_ref)
cv2.waitKey(1)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, encoding="bgr8"))
except CvBridgeError, e:
print e
def main(args):
| ic = labyrinth_solver()
rospy.init_node('labyrinth_solver', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print "Shutting down"
cv2.destroyAllWindows() | identifier_body |
|
backup.py | while check!=0:
# generate the potential candidate
north = [current[0],current[1]-1]
south = [current[0],current[1]+1]
east = [current[0]+1,current[1]]
west = [current[0]-1,current[1]]
#print current
# calculate the heuristic
n_heuristic = math.sqrt(pow(north[0]-goal[0],2)+pow(north[1]-goal[1],2))
s_heuristic = math.sqrt(pow(south[0]-goal[0],2)+pow(south[1]-goal[1],2))
e_heuristic = math.sqrt(pow(east[0]-goal[0],2)+pow(east[1]-goal[1],2))
w_heuristic = math.sqrt(pow(west[0]-goal[0],2)+pow(west[1]-goal[1],2))
# check the punishment of obstacle
if MAP[north[1]-1,north[0]-1]==0: n_punish = 2000
else: n_punish = 0
if MAP[south[1]-1,south[0]-1]==0: s_punish = 2000
else: s_punish = 0
if MAP[east[1]-1,east[0]-1]==0: e_punish = 2000
else: e_punish = 0
if MAP[west[1]-1,west[0]-1]==0: w_punish = 2000
else: w_punish = 0
#print n_punish, s_punish, e_punish, w_punish
# check last node never go back
num = path.shape[0] # get the path step number
if num!=1:
last_step = path[-2]
n_check = north - last_step
s_check = south - last_step
e_check = east - last_step
w_check = west - last_step
if ( n_check[0]==0 and n_check[1]==0): n_punish = 2000
if ( s_check[0]==0 and s_check[1]==0): s_punish = 2000
if ( e_check[0]==0 and e_check[1]==0): e_punish = 2000
if ( w_check[0]==0 and w_check[1]==0): w_punish = 2000
# sum the cost together
n_cost = int(n_heuristic + n_punish)
s_cost = int(s_heuristic + s_punish)
e_cost = int(e_heuristic + e_punish)
w_cost = int(w_heuristic + w_punish)
cost = [n_cost, s_cost, e_cost, w_cost]
# there will be some situations should be taken into consideration
index = np.argmin(cost) # where the smallest cost is located
mincost = cost[index]
# First only one direction cost is less than 1000, then just pick that
if mincost<=1000: # there must be at least one solution
sumcheck = cost[0]+cost[1]+cost[2]+cost[3]
if sumcheck >= 6000: # only one next choice
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# updat the current
current = next
elif (sumcheck >= 4000 and sumcheck < 6000) : # two posible choices
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path choose the one have the least cost
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# save the branch to the back up [current, branch]
fakecost = cost
fakecost[index] = 2000 # mannually fake the minimum cost choice
fakeindex = np.argmin(fakecost) # where the smallest cost is located
if fakeindex == 0: branch = north
elif fakeindex == 1: branch = south
elif fakeindex == 2: branch = east
elif fakeindex == 3: branch = west
backup = np.append([[current[0],current[1],branch[0],branch[1]]], backup, axis=0)
# updat the current
current = next
elif (sumcheck >= 2000 and sumcheck < 4000) : # three posible choices
if index == 0: next = north
elif index == 1: next = south
elif index == 2: next = east
elif index == 3: next = west
# update the path choose the one have the least cost
path = np.append(path,[next],axis=0)
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# save the branch to the back up [current, branch]
# second cost
secondcost = cost
secondcost[index] = 2000 # mannually fake the minimum cost choice
secondindex = np.argmin(secondcost) # where the smallest cost is located
if secondindex == 0: branch1 = north
elif secondindex == 1: branch1 = south
elif secondindex == 2: branch1 = east
elif secondindex == 3: branch1 = west
thirdcost = secondcost
thirdcost[secondindex] = 2000 # mannually fake the minimum cost choice
thirdindex = np.argmin(thirdcost) # where the smallest cost is located
if thirdindex == 0: branch2 = north
elif thirdindex == 1: branch2 = south
elif thirdindex == 2: branch2 = east
elif thirdindex == 3: branch2 = west
# update branch based on cost difference
backup = np.append([[current[0],current[1],branch2[0],branch2[1]]], backup, axis=0)
backup = np.append([[current[0],current[1],branch1[0],branch1[1]]], backup, axis=0)
# updat the current
current = next
elif mincost>=2000: # there is no next choice we have go to backup branchs
# next step is the first ranking branch
next = [backup[0,2],backup[0,3]]
# cut the path back
current = [backup[0,0],backup[0,1]]
compare = abs(path-current)
summation = sum(np.transpose(compare))
index = np.argmin(summation)
# cut the path from 0 to current one
path = path[:index+1]
# update the path with next step
path = np.append(path,[next],axis=0)
# delete the first backup
backup = backup[1:]
# update the check for next while
precheck = abs(next[0]-goal[0])+abs(next[1]-goal[1])
if precheck == 0:
check = 0
# updat the current
current = next
# A* algorithm is ended
steps = path.shape[0]
i = 0
while i < steps-1:
cv2.line(map_ref,(20*path[i,0]-10,20*path[i,1]-10),(20*path[i+1,0]-10,20*path[i+1,1]-10),(255,0,0),3)
i = i+1
cv2.imshow("Map Image", map_ref)
cv2.waitKey(1)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, encoding="bgr8"))
except CvBridgeError, e:
print e
def | main | identifier_name |
|
backup.py | 0,0,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0],[0,1,1,1,1,1,0,1,1,1,0,1,0,1,1,1,1,1,1,0],[0,1,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0],[0,1,0,1,1,1,1,1,1,0,1,1,1,0,1,1,1,0,1,0],[0,1,0,1,0,0,0,0,1,0,1,0,1,0,1,0,1,0,1,0],[0,1,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0],[0,1,0,1,1,1,1,0,1,1,1,0,1,1,1,0,1,1,1,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
position_history = (0,0)
class labyrinth_solver:
def __init__(self):
self.image_pub = rospy.Publisher("final_image",Image)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/usb_cam/image_raw",Image,self.callback)
def callback(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, desired_encoding="bgr8")
except CvBridgeError, e:
print e
# crop out the labyrinth region (y by x)
cv_image = cv_image[22:240, 44:268]
# resize the image to 200x200 each region is 10x10
cv_image = cv2.resize(cv_image, (400, 400))
# transfer the image from RGB to HSV
hsv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)
# Red Ball Segmentation
lower_red = np.array([0,50,150])
upper_red = np.array([50,150,250])
temp_ball = cv2.inRange(hsv_image,lower_red,upper_red)
# Erosion and Dilation processing
kernel = np.ones((3,3),np.uint8)
temp_ball = cv2.dilate(temp_ball,kernel,iterations = 2)
#cv2.imshow("Red Ball", temp_ball)
# Calculate the contour
contours,hierarcy = cv2.findContours(temp_ball,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Select the biggest contout as the target
max_area = 0
for cnt in contours:
area=cv2.contourArea(cnt)
if area > max_area:
max_area=area
target = cnt
global position_history # calling global variable
# handling with target missing
if max_area >= 10:
(x,y),radius = cv2.minEnclosingCircle(target)
center = (int(x),int(y))
else:
center = position_history
# Compensate with some noise
radius = 10
if abs(center[0]-position_history[0])+abs(center[1]-position_history[1])<=4:
center = position_history
cv2.circle(cv_image,center,radius,(0,255,0),2)
position_history = center
cv2.imshow("Ball tracking", cv_image)
# manipulate the center coordinate to be the nearest 10 while extract the position in 20 by 20
# FIRST check who is more close to 0
checkx = center[0]%20-10
checky = center[1]%20-15
if abs(checkx) <= abs(checky):
newx = center[0] - checkx
newy = center[1]*0.955
elif abs(checkx) > abs(checky):
newx = center[0]
newy = 0.955*(center[1] - checky)
newcenter = (newx, int(newy))
# read the reference map for animation
map_ref = cv2.imread('/home/sunyue/catkin_ws/src/tracking/map.png')
cv2.circle(map_ref,newcenter,radius,(0,0,255),-5)
# SECOND transfer the real location to the 20x20 grid
gridx = newcenter[0]/20+1
gridy = newcenter[1]/20+1
# A* for path planning
goal = [10,2]
current = [gridx, gridy]
precheck = abs(current[0]-goal[0])+abs(current[1]-goal[1])
if precheck == 0: check = 0
else: |
path = np.array([current])
backup = np.array([[0,0,0,0]])
while check!=0:
# generate the potential candidate
north = [current[0],current[1]-1]
south = [current[0],current[1]+1]
east = [current[0]+1,current[1]]
west = [current[0]-1,current[1]]
#print current
# calculate the heuristic
n_heuristic = math.sqrt(pow(north[0]-goal[0],2)+pow(north[1]-goal[1],2))
s_heuristic = math.sqrt(pow(south[0]-goal[0],2)+pow(south[1]-goal[1],2))
e_heuristic = math.sqrt(pow(east[0]-goal[0],2)+pow(east[1]-goal[1],2))
w_heuristic = math.sqrt(pow(west[0]-goal[0],2)+pow(west[1]-goal[1],2))
# check the punishment of obstacle
if MAP[north[1]-1,north[0]-1]==0: n_punish = 2000
else: n_punish = 0
if MAP[south[1]-1,south[0]-1]==0: s_punish = 2000
else: s_punish = 0
if MAP[east[1]-1,east[0]-1]==0: e_punish = 2000
else: e_punish = 0
if MAP[west[1]-1,west[0]-1]==0: w_punish = 2000
else: w_punish = 0
#print n_punish, s_punish, e_punish, w_punish
# check last node never go back
num = path.shape[0] # get the path step number
if num!=1:
last_step = path[-2]
n_check = north - last_step
s_check = south - last_step
e_check = east - last_step
w_check = west - last_step
if ( n_check[0]==0 and n_check[1]==0): n_punish = 2000
if ( s_check[0]==0 and s_check[1]==0): s_punish = 2000
if ( e_check[0]==0 and e_check[1]==0): e_punish = 2000
if ( w_check[0]==0 and w_check[1]==0): w_punish = 2000
# sum the cost together
n_cost = int(n_heuristic + n_punish)
s_cost = int(s_heuristic + s_punish)
e_cost = int(e_heuristic + e_punish)
w_cost = int(w_heuristic + w_punish)
cost = [n_cost, s_cost, e_cost, w_cost]
# there will be some situations should be taken into consideration
index = np.argmin(cost) # where the smallest cost is located
mincost = cost[index]
# First only one direction cost is less than 1000, then just pick that
if mincost<=1000: # there must be at least one solution
sumcheck = cost[0]+cost[1]+cost[2]+cost[3]
if sumcheck >= 6000: # only one next choice
if index == 0: next = north
elif index == 1: next = south
elif index == 2: | check = 100 | conditional_block |
utils_test.go | 1"
)
// -----------------------------------------------------------------------------
// Testing Timeouts
// -----------------------------------------------------------------------------
const (
// waitTick is the default timeout tick interval for checking on ingress resources.
waitTick = time.Second * 1
// ingressWait is the default amount of time to wait for any particular ingress resource to be provisioned.
ingressWait = time.Minute * 3
// httpcTimeout is the default client timeout for HTTP clients used in tests.
httpcTimeout = time.Second * 3
// environmentCleanupTimeout is the amount of time that will be given by the test suite to the
// testing environment to perform its cleanup when the test suite is shutting down.
environmentCleanupTimeout = time.Minute * 3
)
// -----------------------------------------------------------------------------
// Testing Variables
// -----------------------------------------------------------------------------
var (
// ctx the topical context of the test suite, can be used by test cases if they don't need
// any special context as a function of the test
ctx context.Context
// cancel is the cancel function for the above global test context
cancel context.CancelFunc
// httpBinImage is the container image name we use for deploying the "httpbin" HTTP testing tool.
// if you need a simple HTTP server for tests you're writing, use this and check the documentation.
// See: https://github.com/postmanlabs/httpbin
httpBinImage = "kennethreitz/httpbin"
// ingressClass indicates the ingress class name which the tests will use for supported object reconciliation
ingressClass = "kongtests"
// elsewhere is the name of an alternative namespace
elsewhere = "elsewhere"
// controllerNamespace is the Kubernetes namespace where the controller is deployed
controllerNamespace = "kong-system"
// httpc is the default HTTP client to use for tests
httpc = http.Client{Timeout: httpcTimeout}
// watchNamespaces is a list of namespaces the controller watches
// NOTE: more namespaces will be loaded dynamically by the test.Main
// during runtime. In general, avoid adding hardcoded namespaces
// to this list as that's reserved for special cases.
watchNamespaces = elsewhere
// env is the primary testing environment object which includes access to the Kubernetes cluster
// and all the addons deployed in support of the tests.
env environments.Environment
// proxyURL provides access to the proxy endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyURL *url.URL
// proxyAdminURL provides access to the Admin API endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyAdminURL *url.URL
// proxyUDPURL provides access to the UDP API endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyUDPURL *url.URL
// clusterVersion is a convenience var where the found version of the env.Cluster is stored.
clusterVersion semver.Version
)
// -----------------------------------------------------------------------------
// Testing Variables - Environment Overrides
// -----------------------------------------------------------------------------
var (
// dbmode indicates the database backend of the test cluster ("off" and "postgres" are supported)
dbmode = os.Getenv("TEST_DATABASE_MODE")
// clusterVersion indicates the version of Kubernetes to use for the tests (if the cluster was not provided by the caller)
clusterVersionStr = os.Getenv("KONG_CLUSTER_VERSION")
// existingCluster indicates whether or not the caller is providing their own cluster for running the tests.
// These need to come in the format <TYPE>:<NAME> (e.g. "kind:<NAME>", "gke:<NAME>", e.t.c.).
existingCluster = os.Getenv("KONG_TEST_CLUSTER")
// keepTestCluster indicates whether the caller wants the cluster created by the test suite
// to persist after the test for inspection. This has a nil effect when an existing cluster
// is provided, as cleanup is not performed for existing clusters.
keepTestCluster = os.Getenv("KONG_TEST_CLUSTER_PERSIST")
// kongEnterpriseEnabled enables Enterprise-specific tests when set to "true"
kongEnterpriseEnabled = os.Getenv("TEST_KONG_ENTERPRISE")
)
// -----------------------------------------------------------------------------
// Test Suite Exit Codes
// -----------------------------------------------------------------------------
const (
// ExitCodeIncompatibleOptions is a POSIX compliant exit code for the test suite to
// indicate that some combination of provided configurations were not compatible.
ExitCodeIncompatibleOptions = 100
// ExitCodeInvalidOptions is a POSIX compliant exit code for the test suite to indicate
// that some of the provided runtime options were not valid and the tests could not run.
ExitCodeInvalidOptions = 101
// ExitCodeCantUseExistingCluster is a POSIX compliant exit code for the test suite to
// indicate that an existing cluster provided for the tests was not usable.
ExitCodeCantUseExistingCluster = 101
// ExitCodeCantCreateCluster is a POSIX compliant exit code for the test suite to indicate
// that a failure occurred when trying to create a Kubernetes cluster to run the tests.
ExitCodeCantCreateCluster = 102
// ExitCodeCleanupFailed is a POSIX compliant exit code for the test suite to indicate
// that a failure occurred during cluster cleanup.
ExitCodeCleanupFailed = 103
// ExitCodeEnvSetupFailed is a generic exit code that can be used as a fallback for general
// problems setting up the testing environment and/or cluster.
ExitCodeEnvSetupFailed = 104
// kongTestPassword is used as a password only within the context of transient integration test runs
// and is left static to help developers debug failures in those testing environments.
kongTestPassword = "password"
)
// -----------------------------------------------------------------------------
// Testing Utility Functions - Namespaces
// -----------------------------------------------------------------------------
var (
// namespaces is a map of test case names to a namespace that was generated specifically for them to use.
// each test case in the test run gets its own unique namespace.
namespaces = make(map[string]*corev1.Namespace)
)
// namespace provides the namespace provisioned for each test case given their t.Name as the "testCase".
func namespace(t *testing.T) (*corev1.Namespace, func()) {
namespace, ok := namespaces[t.Name()]
if !ok {
fmt.Fprintf(os.Stderr, "Error: test case %s did not have a namespace set up\n", t.Name())
os.Exit(ExitCodeCantCreateCluster)
}
cleanup := func() {
assert.NoError(t, clusters.CleanupGeneratedResources(ctx, env.Cluster(), t.Name()))
}
return namespace, cleanup
}
// -----------------------------------------------------------------------------
// Testing Utility Functions - Identifying Test Cases
// -----------------------------------------------------------------------------
// identifyTestCasesForDir finds the Go function names for any Go test files in the given directory
func identifyTestCasesForDir(dir string) ([]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var testCasesForDir []string
for _, fileInfo := range files {
if !fileInfo.IsDir() {
if strings.HasSuffix(fileInfo.Name(), "test.go") {
testCasesForFile, err := identifyTestCasesForFile(dir + fileInfo.Name())
if err != nil {
return nil, err
}
testCasesForDir = append(testCasesForDir, testCasesForFile...)
}
}
}
return testCasesForDir, nil
}
// testCaseRegexp is a regex to identify Go test cases in test files
var testCaseRegexp = regexp.MustCompile(`func (Test.*)\(`)
// identifyTestCasesForFile searches the given file for any Golang test cases
func identifyTestCasesForFile(filePath string) ([]string, error) {
if !strings.HasSuffix(filePath, "test.go") {
return nil, fmt.Errorf("%s does not look like a Golang test file", filePath)
}
b, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
matches := testCaseRegexp.FindAllSubmatch(b, -1)
if len(matches) < 1 {
return nil, nil
}
var testCasesForFile []string
for _, submatches := range matches {
if len(submatches) > 1 {
testCaseName := string(submatches[1])
if testCaseName != "TestMain" { // don't count TestMains
testCasesForFile = append(testCasesForFile, testCaseName)
}
}
}
return testCasesForFile, nil
}
// -----------------------------------------------------------------------------
// Testing Utility Functions - HTTP Requests
// -----------------------------------------------------------------------------
// expect404WithNoRoute is used to check whether a given http response is (specifically) a Kong 404.
func expect404WithNoRoute(t *testing.T, proxyURL string, resp *http.Response) bool {
if resp.StatusCode == http.StatusNotFound |
return false
}
// -----------------------------------------------------------------------------
// Test.MAIN Utility Functions
// -----------------------------------------------------------------------------
// exitOnErrWithCode is a helper function meant | {
// once the route is torn down and returning 404's, ensure that we got the expected response body back from Kong
// Expected: {"message":"no Route matched with those values"}
b := new(bytes.Buffer)
_, err := b.ReadFrom(resp.Body)
require.NoError(t, err)
body := struct {
Message string `json:"message"`
}{}
if err := json.Unmarshal(b.Bytes(), &body); err != nil {
t.Logf("WARNING: error decoding JSON from proxy while waiting for %s: %v", proxyURL, err)
return false
}
return body.Message == "no Route matched with those values"
} | conditional_block |
utils_test.go | ingressWait is the default amount of time to wait for any particular ingress resource to be provisioned.
ingressWait = time.Minute * 3
// httpcTimeout is the default client timeout for HTTP clients used in tests.
httpcTimeout = time.Second * 3
// environmentCleanupTimeout is the amount of time that will be given by the test suite to the
// testing environment to perform its cleanup when the test suite is shutting down.
environmentCleanupTimeout = time.Minute * 3
)
// -----------------------------------------------------------------------------
// Testing Variables
// -----------------------------------------------------------------------------
var (
// ctx the topical context of the test suite, can be used by test cases if they don't need
// any special context as a function of the test
ctx context.Context
// cancel is the cancel function for the above global test context
cancel context.CancelFunc
// httpBinImage is the container image name we use for deploying the "httpbin" HTTP testing tool.
// if you need a simple HTTP server for tests you're writing, use this and check the documentation.
// See: https://github.com/postmanlabs/httpbin
httpBinImage = "kennethreitz/httpbin"
// ingressClass indicates the ingress class name which the tests will use for supported object reconciliation
ingressClass = "kongtests"
// elsewhere is the name of an alternative namespace
elsewhere = "elsewhere"
// controllerNamespace is the Kubernetes namespace where the controller is deployed
controllerNamespace = "kong-system"
// httpc is the default HTTP client to use for tests
httpc = http.Client{Timeout: httpcTimeout}
// watchNamespaces is a list of namespaces the controller watches
// NOTE: more namespaces will be loaded dynamically by the test.Main
// during runtime. In general, avoid adding hardcoded namespaces
// to this list as that's reserved for special cases.
watchNamespaces = elsewhere
// env is the primary testing environment object which includes access to the Kubernetes cluster
// and all the addons deployed in support of the tests.
env environments.Environment
// proxyURL provides access to the proxy endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyURL *url.URL
// proxyAdminURL provides access to the Admin API endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyAdminURL *url.URL
// proxyUDPURL provides access to the UDP API endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyUDPURL *url.URL
// clusterVersion is a convenience var where the found version of the env.Cluster is stored.
clusterVersion semver.Version
)
// -----------------------------------------------------------------------------
// Testing Variables - Environment Overrides
// -----------------------------------------------------------------------------
var (
// dbmode indicates the database backend of the test cluster ("off" and "postgres" are supported)
dbmode = os.Getenv("TEST_DATABASE_MODE")
// clusterVersion indicates the version of Kubernetes to use for the tests (if the cluster was not provided by the caller)
clusterVersionStr = os.Getenv("KONG_CLUSTER_VERSION")
// existingCluster indicates whether or not the caller is providing their own cluster for running the tests.
// These need to come in the format <TYPE>:<NAME> (e.g. "kind:<NAME>", "gke:<NAME>", e.t.c.).
existingCluster = os.Getenv("KONG_TEST_CLUSTER")
// keepTestCluster indicates whether the caller wants the cluster created by the test suite
// to persist after the test for inspection. This has a nil effect when an existing cluster
// is provided, as cleanup is not performed for existing clusters.
keepTestCluster = os.Getenv("KONG_TEST_CLUSTER_PERSIST")
// kongEnterpriseEnabled enables Enterprise-specific tests when set to "true"
kongEnterpriseEnabled = os.Getenv("TEST_KONG_ENTERPRISE")
)
// -----------------------------------------------------------------------------
// Test Suite Exit Codes
// -----------------------------------------------------------------------------
const (
// ExitCodeIncompatibleOptions is a POSIX compliant exit code for the test suite to
// indicate that some combination of provided configurations were not compatible.
ExitCodeIncompatibleOptions = 100
// ExitCodeInvalidOptions is a POSIX compliant exit code for the test suite to indicate
// that some of the provided runtime options were not valid and the tests could not run.
ExitCodeInvalidOptions = 101
// ExitCodeCantUseExistingCluster is a POSIX compliant exit code for the test suite to
// indicate that an existing cluster provided for the tests was not usable.
ExitCodeCantUseExistingCluster = 101
// ExitCodeCantCreateCluster is a POSIX compliant exit code for the test suite to indicate
// that a failure occurred when trying to create a Kubernetes cluster to run the tests.
ExitCodeCantCreateCluster = 102
// ExitCodeCleanupFailed is a POSIX compliant exit code for the test suite to indicate
// that a failure occurred during cluster cleanup.
ExitCodeCleanupFailed = 103
// ExitCodeEnvSetupFailed is a generic exit code that can be used as a fallback for general
// problems setting up the testing environment and/or cluster.
ExitCodeEnvSetupFailed = 104
// kongTestPassword is used as a password only within the context of transient integration test runs
// and is left static to help developers debug failures in those testing environments.
kongTestPassword = "password"
)
// -----------------------------------------------------------------------------
// Testing Utility Functions - Namespaces
// -----------------------------------------------------------------------------
var (
// namespaces is a map of test case names to a namespace that was generated specifically for them to use.
// each test case in the test run gets its own unique namespace.
namespaces = make(map[string]*corev1.Namespace)
)
// namespace provides the namespace provisioned for each test case given their t.Name as the "testCase".
func namespace(t *testing.T) (*corev1.Namespace, func()) {
namespace, ok := namespaces[t.Name()]
if !ok {
fmt.Fprintf(os.Stderr, "Error: test case %s did not have a namespace set up\n", t.Name())
os.Exit(ExitCodeCantCreateCluster)
}
cleanup := func() {
assert.NoError(t, clusters.CleanupGeneratedResources(ctx, env.Cluster(), t.Name()))
}
return namespace, cleanup
}
// -----------------------------------------------------------------------------
// Testing Utility Functions - Identifying Test Cases
// -----------------------------------------------------------------------------
// identifyTestCasesForDir finds the Go function names for any Go test files in the given directory
func identifyTestCasesForDir(dir string) ([]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var testCasesForDir []string
for _, fileInfo := range files {
if !fileInfo.IsDir() {
if strings.HasSuffix(fileInfo.Name(), "test.go") {
testCasesForFile, err := identifyTestCasesForFile(dir + fileInfo.Name())
if err != nil {
return nil, err
}
testCasesForDir = append(testCasesForDir, testCasesForFile...)
}
}
}
return testCasesForDir, nil
}
// testCaseRegexp is a regex to identify Go test cases in test files
var testCaseRegexp = regexp.MustCompile(`func (Test.*)\(`)
// identifyTestCasesForFile searches the given file for any Golang test cases
func identifyTestCasesForFile(filePath string) ([]string, error) {
if !strings.HasSuffix(filePath, "test.go") {
return nil, fmt.Errorf("%s does not look like a Golang test file", filePath)
}
b, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
matches := testCaseRegexp.FindAllSubmatch(b, -1)
if len(matches) < 1 {
return nil, nil
}
var testCasesForFile []string
for _, submatches := range matches {
if len(submatches) > 1 {
testCaseName := string(submatches[1])
if testCaseName != "TestMain" { // don't count TestMains
testCasesForFile = append(testCasesForFile, testCaseName)
}
}
}
return testCasesForFile, nil
}
// -----------------------------------------------------------------------------
// Testing Utility Functions - HTTP Requests
// -----------------------------------------------------------------------------
// expect404WithNoRoute is used to check whether a given http response is (specifically) a Kong 404.
func expect404WithNoRoute(t *testing.T, proxyURL string, resp *http.Response) bool {
if resp.StatusCode == http.StatusNotFound {
// once the route is torn down and returning 404's, ensure that we got the expected response body back from Kong
// Expected: {"message":"no Route matched with those values"}
b := new(bytes.Buffer)
_, err := b.ReadFrom(resp.Body)
require.NoError(t, err)
body := struct {
Message string `json:"message"`
}{}
if err := json.Unmarshal(b.Bytes(), &body); err != nil {
t.Logf("WARNING: error decoding JSON from proxy while waiting for %s: %v", proxyURL, err)
return false
}
return body.Message == "no Route matched with those values"
}
return false
}
// -----------------------------------------------------------------------------
// Test.MAIN Utility Functions
// -----------------------------------------------------------------------------
// exitOnErrWithCode is a helper function meant for us in the test.Main to simplify failing and exiting
// the tests under unrecoverable error conditions. It will also attempt to perform any cluster
// cleaning necessary before exiting.
func | exitOnErrWithCode | identifier_name |
|
utils_test.go | /v1"
)
// -----------------------------------------------------------------------------
// Testing Timeouts
// -----------------------------------------------------------------------------
const (
// waitTick is the default timeout tick interval for checking on ingress resources.
waitTick = time.Second * 1
// ingressWait is the default amount of time to wait for any particular ingress resource to be provisioned.
ingressWait = time.Minute * 3
// httpcTimeout is the default client timeout for HTTP clients used in tests.
httpcTimeout = time.Second * 3
// environmentCleanupTimeout is the amount of time that will be given by the test suite to the
// testing environment to perform its cleanup when the test suite is shutting down.
environmentCleanupTimeout = time.Minute * 3
)
// -----------------------------------------------------------------------------
// Testing Variables
// -----------------------------------------------------------------------------
var (
// ctx the topical context of the test suite, can be used by test cases if they don't need
// any special context as a function of the test
ctx context.Context
// cancel is the cancel function for the above global test context
cancel context.CancelFunc
// httpBinImage is the container image name we use for deploying the "httpbin" HTTP testing tool.
// if you need a simple HTTP server for tests you're writing, use this and check the documentation.
// See: https://github.com/postmanlabs/httpbin
httpBinImage = "kennethreitz/httpbin"
// ingressClass indicates the ingress class name which the tests will use for supported object reconciliation
ingressClass = "kongtests"
// elsewhere is the name of an alternative namespace
elsewhere = "elsewhere"
// controllerNamespace is the Kubernetes namespace where the controller is deployed
controllerNamespace = "kong-system"
// httpc is the default HTTP client to use for tests
httpc = http.Client{Timeout: httpcTimeout}
// watchNamespaces is a list of namespaces the controller watches
// NOTE: more namespaces will be loaded dynamically by the test.Main
// during runtime. In general, avoid adding hardcoded namespaces
// to this list as that's reserved for special cases.
watchNamespaces = elsewhere
// env is the primary testing environment object which includes access to the Kubernetes cluster
// and all the addons deployed in support of the tests.
env environments.Environment
// proxyURL provides access to the proxy endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyURL *url.URL
// proxyAdminURL provides access to the Admin API endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyAdminURL *url.URL
// proxyUDPURL provides access to the UDP API endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyUDPURL *url.URL
// clusterVersion is a convenience var where the found version of the env.Cluster is stored.
clusterVersion semver.Version
)
// -----------------------------------------------------------------------------
// Testing Variables - Environment Overrides
// -----------------------------------------------------------------------------
var (
// dbmode indicates the database backend of the test cluster ("off" and "postgres" are supported)
dbmode = os.Getenv("TEST_DATABASE_MODE")
// clusterVersion indicates the version of Kubernetes to use for the tests (if the cluster was not provided by the caller)
clusterVersionStr = os.Getenv("KONG_CLUSTER_VERSION")
// existingCluster indicates whether or not the caller is providing their own cluster for running the tests.
// These need to come in the format <TYPE>:<NAME> (e.g. "kind:<NAME>", "gke:<NAME>", e.t.c.).
existingCluster = os.Getenv("KONG_TEST_CLUSTER")
// keepTestCluster indicates whether the caller wants the cluster created by the test suite
// to persist after the test for inspection. This has a nil effect when an existing cluster
// is provided, as cleanup is not performed for existing clusters.
keepTestCluster = os.Getenv("KONG_TEST_CLUSTER_PERSIST")
// kongEnterpriseEnabled enables Enterprise-specific tests when set to "true"
kongEnterpriseEnabled = os.Getenv("TEST_KONG_ENTERPRISE")
)
// -----------------------------------------------------------------------------
// Test Suite Exit Codes
// -----------------------------------------------------------------------------
const (
// ExitCodeIncompatibleOptions is a POSIX compliant exit code for the test suite to
// indicate that some combination of provided configurations were not compatible.
ExitCodeIncompatibleOptions = 100
// ExitCodeInvalidOptions is a POSIX compliant exit code for the test suite to indicate
// that some of the provided runtime options were not valid and the tests could not run.
ExitCodeInvalidOptions = 101
// ExitCodeCantUseExistingCluster is a POSIX compliant exit code for the test suite to
// indicate that an existing cluster provided for the tests was not usable.
ExitCodeCantUseExistingCluster = 101
// ExitCodeCantCreateCluster is a POSIX compliant exit code for the test suite to indicate
// that a failure occurred when trying to create a Kubernetes cluster to run the tests.
ExitCodeCantCreateCluster = 102
// ExitCodeCleanupFailed is a POSIX compliant exit code for the test suite to indicate
// that a failure occurred during cluster cleanup.
ExitCodeCleanupFailed = 103
// ExitCodeEnvSetupFailed is a generic exit code that can be used as a fallback for general
// problems setting up the testing environment and/or cluster.
ExitCodeEnvSetupFailed = 104
// kongTestPassword is used as a password only within the context of transient integration test runs
// and is left static to help developers debug failures in those testing environments.
kongTestPassword = "password"
)
// -----------------------------------------------------------------------------
// Testing Utility Functions - Namespaces
// -----------------------------------------------------------------------------
var (
// namespaces is a map of test case names to a namespace that was generated specifically for them to use.
// each test case in the test run gets its own unique namespace.
namespaces = make(map[string]*corev1.Namespace)
)
// namespace provides the namespace provisioned for each test case given their t.Name as the "testCase".
func namespace(t *testing.T) (*corev1.Namespace, func()) {
namespace, ok := namespaces[t.Name()]
if !ok {
fmt.Fprintf(os.Stderr, "Error: test case %s did not have a namespace set up\n", t.Name())
os.Exit(ExitCodeCantCreateCluster)
}
cleanup := func() {
assert.NoError(t, clusters.CleanupGeneratedResources(ctx, env.Cluster(), t.Name()))
}
return namespace, cleanup
}
// -----------------------------------------------------------------------------
// Testing Utility Functions - Identifying Test Cases
// -----------------------------------------------------------------------------
// identifyTestCasesForDir finds the Go function names for any Go test files in the given directory
func identifyTestCasesForDir(dir string) ([]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var testCasesForDir []string
for _, fileInfo := range files {
if !fileInfo.IsDir() {
if strings.HasSuffix(fileInfo.Name(), "test.go") {
testCasesForFile, err := identifyTestCasesForFile(dir + fileInfo.Name())
if err != nil {
return nil, err
}
testCasesForDir = append(testCasesForDir, testCasesForFile...)
}
}
}
return testCasesForDir, nil
}
// testCaseRegexp is a regex to identify Go test cases in test files
var testCaseRegexp = regexp.MustCompile(`func (Test.*)\(`)
// identifyTestCasesForFile searches the given file for any Golang test cases
func identifyTestCasesForFile(filePath string) ([]string, error) {
if !strings.HasSuffix(filePath, "test.go") {
return nil, fmt.Errorf("%s does not look like a Golang test file", filePath)
}
b, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
matches := testCaseRegexp.FindAllSubmatch(b, -1)
if len(matches) < 1 {
return nil, nil
}
var testCasesForFile []string
for _, submatches := range matches {
if len(submatches) > 1 {
testCaseName := string(submatches[1])
if testCaseName != "TestMain" { // don't count TestMains
testCasesForFile = append(testCasesForFile, testCaseName)
}
} | }
// -----------------------------------------------------------------------------
// Testing Utility Functions - HTTP Requests
// -----------------------------------------------------------------------------
// expect404WithNoRoute is used to check whether a given http response is (specifically) a Kong 404.
func expect404WithNoRoute(t *testing.T, proxyURL string, resp *http.Response) bool {
if resp.StatusCode == http.StatusNotFound {
// once the route is torn down and returning 404's, ensure that we got the expected response body back from Kong
// Expected: {"message":"no Route matched with those values"}
b := new(bytes.Buffer)
_, err := b.ReadFrom(resp.Body)
require.NoError(t, err)
body := struct {
Message string `json:"message"`
}{}
if err := json.Unmarshal(b.Bytes(), &body); err != nil {
t.Logf("WARNING: error decoding JSON from proxy while waiting for %s: %v", proxyURL, err)
return false
}
return body.Message == "no Route matched with those values"
}
return false
}
// -----------------------------------------------------------------------------
// Test.MAIN Utility Functions
// -----------------------------------------------------------------------------
// exitOnErrWithCode is a helper function meant for us | }
return testCasesForFile, nil | random_line_split |
utils_test.go | manlabs/httpbin
httpBinImage = "kennethreitz/httpbin"
// ingressClass indicates the ingress class name which the tests will use for supported object reconciliation
ingressClass = "kongtests"
// elsewhere is the name of an alternative namespace
elsewhere = "elsewhere"
// controllerNamespace is the Kubernetes namespace where the controller is deployed
controllerNamespace = "kong-system"
// httpc is the default HTTP client to use for tests
httpc = http.Client{Timeout: httpcTimeout}
// watchNamespaces is a list of namespaces the controller watches
// NOTE: more namespaces will be loaded dynamically by the test.Main
// during runtime. In general, avoid adding hardcoded namespaces
// to this list as that's reserved for special cases.
watchNamespaces = elsewhere
// env is the primary testing environment object which includes access to the Kubernetes cluster
// and all the addons deployed in support of the tests.
env environments.Environment
// proxyURL provides access to the proxy endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyURL *url.URL
// proxyAdminURL provides access to the Admin API endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyAdminURL *url.URL
// proxyUDPURL provides access to the UDP API endpoint for the Kong Addon which is deployed to the test environment's cluster.
proxyUDPURL *url.URL
// clusterVersion is a convenience var where the found version of the env.Cluster is stored.
clusterVersion semver.Version
)
// -----------------------------------------------------------------------------
// Testing Variables - Environment Overrides
// -----------------------------------------------------------------------------
var (
// dbmode indicates the database backend of the test cluster ("off" and "postgres" are supported)
dbmode = os.Getenv("TEST_DATABASE_MODE")
// clusterVersion indicates the version of Kubernetes to use for the tests (if the cluster was not provided by the caller)
clusterVersionStr = os.Getenv("KONG_CLUSTER_VERSION")
// existingCluster indicates whether or not the caller is providing their own cluster for running the tests.
// These need to come in the format <TYPE>:<NAME> (e.g. "kind:<NAME>", "gke:<NAME>", e.t.c.).
existingCluster = os.Getenv("KONG_TEST_CLUSTER")
// keepTestCluster indicates whether the caller wants the cluster created by the test suite
// to persist after the test for inspection. This has a nil effect when an existing cluster
// is provided, as cleanup is not performed for existing clusters.
keepTestCluster = os.Getenv("KONG_TEST_CLUSTER_PERSIST")
// kongEnterpriseEnabled enables Enterprise-specific tests when set to "true"
kongEnterpriseEnabled = os.Getenv("TEST_KONG_ENTERPRISE")
)
// -----------------------------------------------------------------------------
// Test Suite Exit Codes
// -----------------------------------------------------------------------------
const (
// ExitCodeIncompatibleOptions is a POSIX compliant exit code for the test suite to
// indicate that some combination of provided configurations were not compatible.
ExitCodeIncompatibleOptions = 100
// ExitCodeInvalidOptions is a POSIX compliant exit code for the test suite to indicate
// that some of the provided runtime options were not valid and the tests could not run.
ExitCodeInvalidOptions = 101
// ExitCodeCantUseExistingCluster is a POSIX compliant exit code for the test suite to
// indicate that an existing cluster provided for the tests was not usable.
ExitCodeCantUseExistingCluster = 101
// ExitCodeCantCreateCluster is a POSIX compliant exit code for the test suite to indicate
// that a failure occurred when trying to create a Kubernetes cluster to run the tests.
ExitCodeCantCreateCluster = 102
// ExitCodeCleanupFailed is a POSIX compliant exit code for the test suite to indicate
// that a failure occurred during cluster cleanup.
ExitCodeCleanupFailed = 103
// ExitCodeEnvSetupFailed is a generic exit code that can be used as a fallback for general
// problems setting up the testing environment and/or cluster.
ExitCodeEnvSetupFailed = 104
// kongTestPassword is used as a password only within the context of transient integration test runs
// and is left static to help developers debug failures in those testing environments.
kongTestPassword = "password"
)
// -----------------------------------------------------------------------------
// Testing Utility Functions - Namespaces
// -----------------------------------------------------------------------------
var (
// namespaces is a map of test case names to a namespace that was generated specifically for them to use.
// each test case in the test run gets its own unique namespace.
namespaces = make(map[string]*corev1.Namespace)
)
// namespace provides the namespace provisioned for each test case given their t.Name as the "testCase".
func namespace(t *testing.T) (*corev1.Namespace, func()) {
namespace, ok := namespaces[t.Name()]
if !ok {
fmt.Fprintf(os.Stderr, "Error: test case %s did not have a namespace set up\n", t.Name())
os.Exit(ExitCodeCantCreateCluster)
}
cleanup := func() {
assert.NoError(t, clusters.CleanupGeneratedResources(ctx, env.Cluster(), t.Name()))
}
return namespace, cleanup
}
// -----------------------------------------------------------------------------
// Testing Utility Functions - Identifying Test Cases
// -----------------------------------------------------------------------------
// identifyTestCasesForDir finds the Go function names for any Go test files in the given directory
func identifyTestCasesForDir(dir string) ([]string, error) {
files, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
var testCasesForDir []string
for _, fileInfo := range files {
if !fileInfo.IsDir() {
if strings.HasSuffix(fileInfo.Name(), "test.go") {
testCasesForFile, err := identifyTestCasesForFile(dir + fileInfo.Name())
if err != nil {
return nil, err
}
testCasesForDir = append(testCasesForDir, testCasesForFile...)
}
}
}
return testCasesForDir, nil
}
// testCaseRegexp is a regex to identify Go test cases in test files
var testCaseRegexp = regexp.MustCompile(`func (Test.*)\(`)
// identifyTestCasesForFile searches the given file for any Golang test cases
func identifyTestCasesForFile(filePath string) ([]string, error) {
if !strings.HasSuffix(filePath, "test.go") {
return nil, fmt.Errorf("%s does not look like a Golang test file", filePath)
}
b, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
matches := testCaseRegexp.FindAllSubmatch(b, -1)
if len(matches) < 1 {
return nil, nil
}
var testCasesForFile []string
for _, submatches := range matches {
if len(submatches) > 1 {
testCaseName := string(submatches[1])
if testCaseName != "TestMain" { // don't count TestMains
testCasesForFile = append(testCasesForFile, testCaseName)
}
}
}
return testCasesForFile, nil
}
// -----------------------------------------------------------------------------
// Testing Utility Functions - HTTP Requests
// -----------------------------------------------------------------------------
// expect404WithNoRoute is used to check whether a given http response is (specifically) a Kong 404.
func expect404WithNoRoute(t *testing.T, proxyURL string, resp *http.Response) bool {
if resp.StatusCode == http.StatusNotFound {
// once the route is torn down and returning 404's, ensure that we got the expected response body back from Kong
// Expected: {"message":"no Route matched with those values"}
b := new(bytes.Buffer)
_, err := b.ReadFrom(resp.Body)
require.NoError(t, err)
body := struct {
Message string `json:"message"`
}{}
if err := json.Unmarshal(b.Bytes(), &body); err != nil {
t.Logf("WARNING: error decoding JSON from proxy while waiting for %s: %v", proxyURL, err)
return false
}
return body.Message == "no Route matched with those values"
}
return false
}
// -----------------------------------------------------------------------------
// Test.MAIN Utility Functions
// -----------------------------------------------------------------------------
// exitOnErrWithCode is a helper function meant for us in the test.Main to simplify failing and exiting
// the tests under unrecoverable error conditions. It will also attempt to perform any cluster
// cleaning necessary before exiting.
func exitOnErrWithCode(err error, exitCode int) {
if err == nil {
return
}
fmt.Println("WARNING: failure occurred, performing test cleanup")
if env != nil && existingCluster == "" && keepTestCluster == "" {
ctx, cancel := context.WithTimeout(context.Background(), environmentCleanupTimeout)
defer cancel()
fmt.Printf("INFO: cluster %s is being deleted\n", env.Cluster().Name())
if cleanupErr := env.Cleanup(ctx); cleanupErr != nil {
err = fmt.Errorf("cleanup failed after test failure occurred CLEANUP_FAILURE=(%s): %w", cleanupErr, err)
}
}
fmt.Fprintf(os.Stderr, "Error: tests failed: %s\n", err)
os.Exit(exitCode)
}
// exitOnErr is a wrapper around exitOnErrorWithCode that defaults to using the ExitCodeEnvSetupFailed
// exit code. This function is meant for convenience to wrap errors in setup that are hard to predict.
func exitOnErr(err error) | {
if err == nil {
return
}
exitOnErrWithCode(err, ExitCodeEnvSetupFailed)
} | identifier_body |
|
20.rs | : &Tile) -> bool {
assert!(self.map[pos].is_none());
let x = pos % self.dim;
let y = pos / self.dim;
if y > 0 && self.bottom_border(self.coord(x, y - 1)).map(|border| border !=
tile.borders.0).unwrap_or(false) {
return false;
}
if y < self.dim - 1 && self.top_border(self.coord(x, y + 1)).map(|border| border !=
tile.borders.1).unwrap_or(false) {
return false;
}
if x > 0 && self.right_border(self.coord(x - 1, y)).map(|border| border !=
tile.borders.2).unwrap_or(false) {
return false;
}
if x < self.dim - 1 && self.left_border(self.coord(x + 1, y)).map(|border| border !=
tile.borders.3).unwrap_or(false) {
return false;
}
true
}
}
fn flipbits(mut bits: u16) -> u16 {
// careful, just the lowest 10 bits, not 16
// 0123456789
// 9876543210
let mut out = 0;
for _ in 0..(IMAGEDIM + 2) {
out <<= 1;
out |= bits & 1;
bits >>= 1;
}
out
}
// counting from the right, MSB is top
fn img_column(image: &Image, col: usize) -> u16 {
image.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[y] = img_column(&image, y);
}
out
}
fn flip_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[IMAGEDIM - 1 - y] = image[y];
}
out
}
fn orient_image(original: Image, ori: Orientation) -> Image {
use Orientation::*;
match ori {
Up(false) => original,
Left(false) => rotate_img(original),
Down(false) => rotate_img(rotate_img(original)),
Right(false) => rotate_img(rotate_img(rotate_img(original))),
Up(true) => rotate_img(rotate_img(flip_img(original))),
Left(true) => rotate_img(rotate_img(rotate_img(flip_img(original)))),
Down(true) => flip_img(original),
Right(true) => rotate_img(flip_img(original)),
}
}
// rotate 90 degrees ccw, keep the bit order. could also store all ccw and do flips in comparisons
fn rotate(tile: Tile) -> Tile {
Tile {
name: tile.name,
// top, bottom, left, right; bits left to right, top to bottom
borders: (tile.borders.3, tile.borders.2, flipbits(tile.borders.0), flipbits(tile.borders.1)),
orientation: rot_orientation(tile.orientation),
}
}
// along x axis: top and bottom swap, left and right are mirrored
fn flipx(tile: Tile) -> Tile {
Tile {
name: tile.name,
borders: (tile.borders.1, tile.borders.0, flipbits(tile.borders.2), flipbits(tile.borders.3)),
orientation: flip_orientation(tile.orientation),
}
}
fn search(current_state: State, remaining_tiles: Vec<Tile>) -> Option<State> {
if false {
println!("---");
for y in 0..current_state.dim {
for x in 0..current_state.dim {
if let Some(tile) = current_state.at(x, y) {
print!("{} ", tile.name);
} else {
print!(".... ");
}
}
println!();
}
}
if remaining_tiles.is_empty() {
// all consumed, this is a valid solution
return Some(current_state);
}
// if remaining tiles, the map also has equivalent number of remaining open slots
let nextpos = current_state.map.iter().position(|x| x.is_none()).unwrap();
let run_search = |tile_ix: usize, tile: Tile| {
if current_state.accepts(nextpos, &tile) {
let mut next_state = current_state.clone();
let mut next_tiles = remaining_tiles.clone();
next_state.map[nextpos] = Some(tile);
next_tiles.remove(tile_ix);
search(next_state, next_tiles)
} else {
None
}
};
for (tile_ix, &tile) in remaining_tiles.iter().enumerate() {
for &t1 in &[tile, flipx(tile)] {
for &t2 in &[t1, rotate(t1), rotate(rotate(t1)), rotate(rotate(rotate(t1)))] {
let s = run_search(tile_ix, t2);
if s.is_some() {
// many solutions could exist due to symmetry, but any of them is acceptable
// because they're equivalent so pick the first when one is found
return s;
}
}
}
}
None
}
type Sea = Vec<u128>;
/* epic sea monster
* 98765432109876543210
* #
* # ## ## ###
* # # # # # #
*/
const MONS0: u128 = 0b00000000000000000010;
const MONS1: u128 = 0b10000110000110000111;
const MONS2: u128 = 0b01001001001001001000;
const MONS_LEN: usize = 20; // bits
fn monster_x_position(a: u128, b: u128, c: u128, x: usize) -> Option<usize> {
for shift in x..=(128 - MONS_LEN) {
let abits = (a >> shift) & MONS0;
let bbits = (b >> shift) & MONS1;
let cbits = (c >> shift) & MONS2;
if abits == MONS0 && bbits == MONS1 && cbits == MONS2 {
return Some(shift);
}
}
None
}
fn sea_monsters(sea: &Sea) -> Vec<(usize, usize)> {
// can the monsters overlap? Not specified, hopefully it doesn't matter
let mut mons = Vec::new();
for (y, rows) in sea.windows(3).enumerate() {
let mut x0 = 0;
while let Some(shift) = monster_x_position(rows[0], rows[1], rows[2], x0) {
mons.push((shift, y));
x0 = shift + 1;
}
}
mons
}
fn flip_sea(sea: &Sea) -> Sea {
sea.iter().rev().copied().collect()
}
fn sea_column(sea: &Sea, col: usize) -> u128 {
sea.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_sea(sea: &Sea) -> Sea {
let mut out = Vec::new();
for y in 0..128 {
out.push(sea_column(sea, y));
}
out
}
fn dump_sea(sea: &Sea) {
for row in sea.iter() {
for c in (0..128).rev() {
print!("{}", if (row & (1 << c)) != 0 { '#' } else { '.' });
}
println!();
}
}
fn water_roughness(sea: &Sea) -> usize {
let mut seas = [
sea.clone(),
rotate_sea(sea),
rotate_sea(&rotate_sea(sea)),
rotate_sea(&rotate_sea(&rotate_sea(sea))),
flip_sea(sea),
rotate_sea(&flip_sea(sea)),
rotate_sea(&rotate_sea(&flip_sea(sea))),
rotate_sea(&rotate_sea(&rotate_sea(&flip_sea(sea)))),
];
let monster_locations: Vec<Vec<_>> = seas.iter().map(sea_monsters).collect();
assert!(monster_locations.iter().filter(|x| !x.is_empty()).count() == 1);
let (sea, monsters): (&mut Sea, &Vec<_>) = seas.iter_mut().zip(monster_locations.iter())
.find(|(_s, m)| !m.is_empty()).unwrap();
let initial_roughness: usize = sea.iter().map(|waves| waves.count_ones() as usize).sum(); | println!("rouff with monsters {}, {} total", initial_roughness, monsters.len()); | random_line_split |
|
20.rs | fn accepts(&self, pos: usize, tile: &Tile) -> bool | true
}
}
fn flipbits(mut bits: u16) -> u16 {
// careful, just the lowest 10 bits, not 16
// 0123456789
// 9876543210
let mut out = 0;
for _ in 0..(IMAGEDIM + 2) {
out <<= 1;
out |= bits & 1;
bits >>= 1;
}
out
}
// counting from the right, MSB is top
fn img_column(image: &Image, col: usize) -> u16 {
image.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[y] = img_column(&image, y);
}
out
}
fn flip_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[IMAGEDIM - 1 - y] = image[y];
}
out
}
fn orient_image(original: Image, ori: Orientation) -> Image {
use Orientation::*;
match ori {
Up(false) => original,
Left(false) => rotate_img(original),
Down(false) => rotate_img(rotate_img(original)),
Right(false) => rotate_img(rotate_img(rotate_img(original))),
Up(true) => rotate_img(rotate_img(flip_img(original))),
Left(true) => rotate_img(rotate_img(rotate_img(flip_img(original)))),
Down(true) => flip_img(original),
Right(true) => rotate_img(flip_img(original)),
}
}
// rotate 90 degrees ccw, keep the bit order. could also store all ccw and do flips in comparisons
fn rotate(tile: Tile) -> Tile {
Tile {
name: tile.name,
// top, bottom, left, right; bits left to right, top to bottom
borders: (tile.borders.3, tile.borders.2, flipbits(tile.borders.0), flipbits(tile.borders.1)),
orientation: rot_orientation(tile.orientation),
}
}
// along x axis: top and bottom swap, left and right are mirrored
fn flipx(tile: Tile) -> Tile {
Tile {
name: tile.name,
borders: (tile.borders.1, tile.borders.0, flipbits(tile.borders.2), flipbits(tile.borders.3)),
orientation: flip_orientation(tile.orientation),
}
}
fn search(current_state: State, remaining_tiles: Vec<Tile>) -> Option<State> {
if false {
println!("---");
for y in 0..current_state.dim {
for x in 0..current_state.dim {
if let Some(tile) = current_state.at(x, y) {
print!("{} ", tile.name);
} else {
print!(".... ");
}
}
println!();
}
}
if remaining_tiles.is_empty() {
// all consumed, this is a valid solution
return Some(current_state);
}
// if remaining tiles, the map also has equivalent number of remaining open slots
let nextpos = current_state.map.iter().position(|x| x.is_none()).unwrap();
let run_search = |tile_ix: usize, tile: Tile| {
if current_state.accepts(nextpos, &tile) {
let mut next_state = current_state.clone();
let mut next_tiles = remaining_tiles.clone();
next_state.map[nextpos] = Some(tile);
next_tiles.remove(tile_ix);
search(next_state, next_tiles)
} else {
None
}
};
for (tile_ix, &tile) in remaining_tiles.iter().enumerate() {
for &t1 in &[tile, flipx(tile)] {
for &t2 in &[t1, rotate(t1), rotate(rotate(t1)), rotate(rotate(rotate(t1)))] {
let s = run_search(tile_ix, t2);
if s.is_some() {
// many solutions could exist due to symmetry, but any of them is acceptable
// because they're equivalent so pick the first when one is found
return s;
}
}
}
}
None
}
type Sea = Vec<u128>;
/* epic sea monster
* 98765432109876543210
* #
* # ## ## ###
* # # # # # #
*/
const MONS0: u128 = 0b00000000000000000010;
const MONS1: u128 = 0b10000110000110000111;
const MONS2: u128 = 0b01001001001001001000;
const MONS_LEN: usize = 20; // bits
fn monster_x_position(a: u128, b: u128, c: u128, x: usize) -> Option<usize> {
for shift in x..=(128 - MONS_LEN) {
let abits = (a >> shift) & MONS0;
let bbits = (b >> shift) & MONS1;
let cbits = (c >> shift) & MONS2;
if abits == MONS0 && bbits == MONS1 && cbits == MONS2 {
return Some(shift);
}
}
None
}
fn sea_monsters(sea: &Sea) -> Vec<(usize, usize)> {
// can the monsters overlap? Not specified, hopefully it doesn't matter
let mut mons = Vec::new();
for (y, rows) in sea.windows(3).enumerate() {
let mut x0 = 0;
while let Some(shift) = monster_x_position(rows[0], rows[1], rows[2], x0) {
mons.push((shift, y));
x0 = shift + 1;
}
}
mons
}
fn flip_sea(sea: &Sea) -> Sea {
sea.iter().rev().copied().collect()
}
fn sea_column(sea: &Sea, col: usize) -> u128 {
sea.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_sea(sea: &Sea) -> Sea {
let mut out = Vec::new();
for y in 0..128 {
out.push(sea_column(sea, y));
}
out
}
fn dump_sea(sea: &Sea) {
for row in sea.iter() {
for c in (0..128).rev() {
print!("{}", if (row & (1 << c)) != 0 { '#' } else { '.' });
}
println!();
}
}
fn water_roughness(sea: &Sea) -> usize {
let mut seas = [
sea.clone(),
rotate_sea(sea),
rotate_sea(&rotate_sea(sea)),
rotate_sea(&rotate_sea(&rotate_sea(sea))),
flip_sea(sea),
rotate_sea(&flip_sea(sea)),
rotate_sea(&rotate_sea(&flip_sea(sea))),
rotate_sea(&rotate_sea(&rotate_sea(&flip_sea(sea)))),
];
let monster_locations: Vec<Vec<_>> = seas.iter().map(sea_monsters).collect();
assert!(monster_locations.iter().filter(|x| !x.is_empty()).count() == 1);
let (sea, monsters): (&mut Sea, &Vec<_>) = seas.iter_mut().zip(monster_locations.iter())
.find(|(_s, m)| !m.is_empty()).unwrap();
let initial_roughness: usize = sea.iter().map(|waves| waves.count_ones() as usize).sum();
println!("rouff with monsters | {
assert!(self.map[pos].is_none());
let x = pos % self.dim;
let y = pos / self.dim;
if y > 0 && self.bottom_border(self.coord(x, y - 1)).map(|border| border !=
tile.borders.0).unwrap_or(false) {
return false;
}
if y < self.dim - 1 && self.top_border(self.coord(x, y + 1)).map(|border| border !=
tile.borders.1).unwrap_or(false) {
return false;
}
if x > 0 && self.right_border(self.coord(x - 1, y)).map(|border| border !=
tile.borders.2).unwrap_or(false) {
return false;
}
if x < self.dim - 1 && self.left_border(self.coord(x + 1, y)).map(|border| border !=
tile.borders.3).unwrap_or(false) {
return false;
} | identifier_body |
20.rs | fn | (&self, pos: usize, tile: &Tile) -> bool {
assert!(self.map[pos].is_none());
let x = pos % self.dim;
let y = pos / self.dim;
if y > 0 && self.bottom_border(self.coord(x, y - 1)).map(|border| border !=
tile.borders.0).unwrap_or(false) {
return false;
}
if y < self.dim - 1 && self.top_border(self.coord(x, y + 1)).map(|border| border !=
tile.borders.1).unwrap_or(false) {
return false;
}
if x > 0 && self.right_border(self.coord(x - 1, y)).map(|border| border !=
tile.borders.2).unwrap_or(false) {
return false;
}
if x < self.dim - 1 && self.left_border(self.coord(x + 1, y)).map(|border| border !=
tile.borders.3).unwrap_or(false) {
return false;
}
true
}
}
fn flipbits(mut bits: u16) -> u16 {
// careful, just the lowest 10 bits, not 16
// 0123456789
// 9876543210
let mut out = 0;
for _ in 0..(IMAGEDIM + 2) {
out <<= 1;
out |= bits & 1;
bits >>= 1;
}
out
}
// counting from the right, MSB is top
fn img_column(image: &Image, col: usize) -> u16 {
image.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[y] = img_column(&image, y);
}
out
}
fn flip_img(image: Image) -> Image {
let mut out = [0; IMAGEDIM];
for y in 0..IMAGEDIM {
out[IMAGEDIM - 1 - y] = image[y];
}
out
}
fn orient_image(original: Image, ori: Orientation) -> Image {
use Orientation::*;
match ori {
Up(false) => original,
Left(false) => rotate_img(original),
Down(false) => rotate_img(rotate_img(original)),
Right(false) => rotate_img(rotate_img(rotate_img(original))),
Up(true) => rotate_img(rotate_img(flip_img(original))),
Left(true) => rotate_img(rotate_img(rotate_img(flip_img(original)))),
Down(true) => flip_img(original),
Right(true) => rotate_img(flip_img(original)),
}
}
// rotate 90 degrees ccw, keep the bit order. could also store all ccw and do flips in comparisons
fn rotate(tile: Tile) -> Tile {
Tile {
name: tile.name,
// top, bottom, left, right; bits left to right, top to bottom
borders: (tile.borders.3, tile.borders.2, flipbits(tile.borders.0), flipbits(tile.borders.1)),
orientation: rot_orientation(tile.orientation),
}
}
// along x axis: top and bottom swap, left and right are mirrored
fn flipx(tile: Tile) -> Tile {
Tile {
name: tile.name,
borders: (tile.borders.1, tile.borders.0, flipbits(tile.borders.2), flipbits(tile.borders.3)),
orientation: flip_orientation(tile.orientation),
}
}
fn search(current_state: State, remaining_tiles: Vec<Tile>) -> Option<State> {
if false {
println!("---");
for y in 0..current_state.dim {
for x in 0..current_state.dim {
if let Some(tile) = current_state.at(x, y) {
print!("{} ", tile.name);
} else {
print!(".... ");
}
}
println!();
}
}
if remaining_tiles.is_empty() {
// all consumed, this is a valid solution
return Some(current_state);
}
// if remaining tiles, the map also has equivalent number of remaining open slots
let nextpos = current_state.map.iter().position(|x| x.is_none()).unwrap();
let run_search = |tile_ix: usize, tile: Tile| {
if current_state.accepts(nextpos, &tile) {
let mut next_state = current_state.clone();
let mut next_tiles = remaining_tiles.clone();
next_state.map[nextpos] = Some(tile);
next_tiles.remove(tile_ix);
search(next_state, next_tiles)
} else {
None
}
};
for (tile_ix, &tile) in remaining_tiles.iter().enumerate() {
for &t1 in &[tile, flipx(tile)] {
for &t2 in &[t1, rotate(t1), rotate(rotate(t1)), rotate(rotate(rotate(t1)))] {
let s = run_search(tile_ix, t2);
if s.is_some() {
// many solutions could exist due to symmetry, but any of them is acceptable
// because they're equivalent so pick the first when one is found
return s;
}
}
}
}
None
}
type Sea = Vec<u128>;
/* epic sea monster
* 98765432109876543210
* #
* # ## ## ###
* # # # # # #
*/
const MONS0: u128 = 0b00000000000000000010;
const MONS1: u128 = 0b10000110000110000111;
const MONS2: u128 = 0b01001001001001001000;
const MONS_LEN: usize = 20; // bits
fn monster_x_position(a: u128, b: u128, c: u128, x: usize) -> Option<usize> {
for shift in x..=(128 - MONS_LEN) {
let abits = (a >> shift) & MONS0;
let bbits = (b >> shift) & MONS1;
let cbits = (c >> shift) & MONS2;
if abits == MONS0 && bbits == MONS1 && cbits == MONS2 {
return Some(shift);
}
}
None
}
fn sea_monsters(sea: &Sea) -> Vec<(usize, usize)> {
// can the monsters overlap? Not specified, hopefully it doesn't matter
let mut mons = Vec::new();
for (y, rows) in sea.windows(3).enumerate() {
let mut x0 = 0;
while let Some(shift) = monster_x_position(rows[0], rows[1], rows[2], x0) {
mons.push((shift, y));
x0 = shift + 1;
}
}
mons
}
fn flip_sea(sea: &Sea) -> Sea {
sea.iter().rev().copied().collect()
}
fn sea_column(sea: &Sea, col: usize) -> u128 {
sea.iter().fold(0, |dst, srcrow| (dst << 1) | ((srcrow >> col) & 1))
}
fn rotate_sea(sea: &Sea) -> Sea {
let mut out = Vec::new();
for y in 0..128 {
out.push(sea_column(sea, y));
}
out
}
fn dump_sea(sea: &Sea) {
for row in sea.iter() {
for c in (0..128).rev() {
print!("{}", if (row & (1 << c)) != 0 { '#' } else { '.' });
}
println!();
}
}
fn water_roughness(sea: &Sea) -> usize {
let mut seas = [
sea.clone(),
rotate_sea(sea),
rotate_sea(&rotate_sea(sea)),
rotate_sea(&rotate_sea(&rotate_sea(sea))),
flip_sea(sea),
rotate_sea(&flip_sea(sea)),
rotate_sea(&rotate_sea(&flip_sea(sea))),
rotate_sea(&rotate_sea(&rotate_sea(&flip_sea(sea)))),
];
let monster_locations: Vec<Vec<_>> = seas.iter().map(sea_monsters).collect();
assert!(monster_locations.iter().filter(|x| !x.is_empty()).count() == 1);
let (sea, monsters): (&mut Sea, &Vec<_>) = seas.iter_mut().zip(monster_locations.iter())
.find(|(_s, m)| !m.is_empty()).unwrap();
let initial_roughness: usize = sea.iter().map(|waves| waves.count_ones() as usize).sum();
println!("rouff with monsters | accepts | identifier_name |
snapshot_cmd.rs | //! Snapshot and restoration commands.
//use std::time::Duration;
//use std::path::{Path, PathBuf};
//use std::sync::Arc;
//use client_traits::SnapshotClient;
//use hash::keccak;
//use snapshot::{SnapshotConfiguration, SnapshotService as SS};
//use snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
//use snapshot::service::Service as SnapshotService;
//use ethcore::client::{Client, DatabaseCompactionProfile, VMType};
//use ethcore::miner::Miner;
//use ethcore_service::ClientService;
use types::{
ids::BlockId,
// snapshot::Progress,
// client_types::Mode,
// snapshot::RestorationStatus,
};
use crate::cache::CacheConfig;
use crate::params::{SpecType, Pruning, Switch/*, tracing_switch_to_bool, fatdb_switch_to_bool*/};
//use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
//use user_defaults::UserDefaults;
//use ethcore_private_tx;
//use db;
/// Kinds of snapshot commands.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Kind {
// Take a snapshot.
// Take,
// Restore a snapshot.
// Restore
}
/// Command for snapshot creation or restoration.
#[derive(Debug, PartialEq)]
pub struct SnapshotCommand {
pub cache_config: CacheConfig,
pub dirs: Directories,
pub spec: SpecType,
pub pruning: Pruning,
pub pruning_history: u64,
pub pruning_memory: usize,
pub tracing: Switch,
pub fat_db: Switch,
// pub compaction: DatabaseCompactionProfile,
pub file_path: Option<String>,
pub kind: Kind,
pub block_at: BlockId,
pub max_round_blocks_to_import: usize,
// pub snapshot_conf: SnapshotConfiguration,
}
// helper for reading chunks from arbitrary reader and feeding them into the
// service.
//fn restore_using<R: SnapshotReader>(snapshot: Arc<SnapshotService<Client>>, reader: &R, recover: bool) -> Result<(), String> {
// let manifest = reader.manifest();
//
// info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash);
//
// snapshot.init_restore(manifest.clone(), recover).map_err(|e| {
// format!("Failed to begin restoration: {}", e)
// })?;
//
// let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
//
// let informant_handle = snapshot.clone();
// ::std::thread::spawn(move || {
// while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } = informant_handle.status() {
// info!("Processed {}/{} state chunks and {}/{} block chunks.",
// state_chunks_done, num_state, block_chunks_done, num_blocks);
// ::std::thread::sleep(Duration::from_secs(5));
// }
// });
//
// info!("Restoring state");
// for &state_hash in &manifest.state_hashes {
// if snapshot.status() == RestorationStatus::Failed {
// return Err("Restoration failed".into());
// }
//
// let chunk = reader.chunk(state_hash)
// .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))?;
//
// let hash = keccak(&chunk);
// if hash != state_hash {
// return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", state_hash, hash));
// }
//
// snapshot.feed_state_chunk(state_hash, &chunk);
// }
//
// info!("Restoring blocks");
// for &block_hash in &manifest.block_hashes {
// if snapshot.status() == RestorationStatus::Failed {
// return Err("Restoration failed".into());
// }
//
// let chunk = reader.chunk(block_hash)
// .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))?;
//
// let hash = keccak(&chunk);
// if hash != block_hash {
// return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", block_hash, hash));
// }
// snapshot.feed_block_chunk(block_hash, &chunk);
// }
//
// match snapshot.status() {
// RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()),
// RestorationStatus::Initializing { .. } => Err("Snapshot restoration is still initializing.".into()),
// RestorationStatus::Finalizing => Err("Snapshot restoration is still finalizing.".into()),
// RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
// RestorationStatus::Inactive => {
// info!("Restoration complete.");
// Ok(())
// }
// }
//}
impl SnapshotCommand {
// shared portion of snapshot commands: start the client service
// fn start_service(self) -> Result<ClientService, String> {
// // load spec file
// let spec = self.spec.spec(&self.dirs.cache)?;
//
// // load genesis hash
// let genesis_hash = spec.genesis_header().hash();
//
// // database paths
// let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone());
//
// // user defaults path
// let user_defaults_path = db_dirs.user_defaults_path();
//
// // load user defaults
// let user_defaults = UserDefaults::load(&user_defaults_path)?;
//
// // select pruning algorithm
// let algorithm = self.pruning.to_algorithm(&user_defaults);
//
// // check if tracing is on
// let tracing = tracing_switch_to_bool(self.tracing, &user_defaults)?;
//
// // check if fatdb is on
// let fat_db = fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)?;
//
// // prepare client and snapshot paths.
// let client_path = db_dirs.client_path(algorithm);
// let snapshot_path = db_dirs.snapshot_path();
//
// // execute upgrades
// execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?;
//
// // prepare client config
// let mut client_config = to_client_config(
// &self.cache_config,
// spec.name.to_lowercase(),
// Mode::Active,
// tracing,
// fat_db,
// self.compaction,
// VMType::default(),
// "".into(),
// algorithm,
// self.pruning_history,
// self.pruning_memory, | // true,
// self.max_round_blocks_to_import,
// );
//
// client_config.snapshot = self.snapshot_conf;
//
// let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
// let client_db = restoration_db_handler.open(&client_path)
// .map_err(|e| format!("Failed to open database {:?}", e))?;
//
// let service = ClientService::start(
// client_config,
// &spec,
// client_db,
// &snapshot_path,
// restoration_db_handler,
// &self.dirs.ipc_path(),
// // TODO [ToDr] don't use test miner here
// // (actually don't require miner at all)
// Arc::new(Miner::new_for_tests(&spec, None)),
// Arc::new(ethcore_private_tx::DummySigner),
// Box::new(ethcore_private_tx::NoopEncryptor),
// Default::default(),
// Default::default(),
// ).map_err(|e| format!("Client service error: {:?}", e))?;
//
// Ok(service)
// }
// restore from a snapshot
// pub fn restore(self) -> Result<(), String> {
// let file = self.file_path.clone();
// let service = self.start_service()?;
//
// warn!("Snapshot restoration is experimental and the format may be subject to change.");
// warn!("On encountering an unexpected error, please ensure that you have a recent snapshot.");
//
// let snapshot = service.snapshot_service();
//
// if let Some(file) = file {
// info!("Attempting to restore from snapshot at '{}'", file);
//
// let reader = PackedReader::new(Path::new(&file))
// .map_err(|e| format!("Couldn't open snapshot file: {}", e))
// .and_then(|x| x.ok_or("Snapshot file has invalid format.".into()));
//
// let reader = reader?;
// restore_using(snapshot, &reader, true)?;
// } else {
// info!("Attempting to restore from local snapshot.");
//
// // attempting restoration with recovery will lead to deadlock
// // as we currently hold a read lock on the service's reader.
// match *snapshot.reader() {
// Some(ref reader) => restore_using(snapshot.clone(), reader, false)?,
// None => return Err("No local snapshot found.".into()),
// }
// }
//
// Ok(())
// }
// Take a snapshot from the head of the chain.
// pub fn take_snapshot(self) -> Result<(), String> {
// let file_path = self.file_path.clone().ok_or("No file path provided.".to_owned())?;
// let file_path: PathBuf = file_path.into();
// let block_at = | random_line_split |
|
snapshot_cmd.rs | //! Snapshot and restoration commands.
//use std::time::Duration;
//use std::path::{Path, PathBuf};
//use std::sync::Arc;
//use client_traits::SnapshotClient;
//use hash::keccak;
//use snapshot::{SnapshotConfiguration, SnapshotService as SS};
//use snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
//use snapshot::service::Service as SnapshotService;
//use ethcore::client::{Client, DatabaseCompactionProfile, VMType};
//use ethcore::miner::Miner;
//use ethcore_service::ClientService;
use types::{
ids::BlockId,
// snapshot::Progress,
// client_types::Mode,
// snapshot::RestorationStatus,
};
use crate::cache::CacheConfig;
use crate::params::{SpecType, Pruning, Switch/*, tracing_switch_to_bool, fatdb_switch_to_bool*/};
//use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
//use user_defaults::UserDefaults;
//use ethcore_private_tx;
//use db;
/// Kinds of snapshot commands.
#[derive(Debug, PartialEq, Clone, Copy)]
pub enum Kind {
// Take a snapshot.
// Take,
// Restore a snapshot.
// Restore
}
/// Command for snapshot creation or restoration.
#[derive(Debug, PartialEq)]
pub struct | {
pub cache_config: CacheConfig,
pub dirs: Directories,
pub spec: SpecType,
pub pruning: Pruning,
pub pruning_history: u64,
pub pruning_memory: usize,
pub tracing: Switch,
pub fat_db: Switch,
// pub compaction: DatabaseCompactionProfile,
pub file_path: Option<String>,
pub kind: Kind,
pub block_at: BlockId,
pub max_round_blocks_to_import: usize,
// pub snapshot_conf: SnapshotConfiguration,
}
// helper for reading chunks from arbitrary reader and feeding them into the
// service.
//fn restore_using<R: SnapshotReader>(snapshot: Arc<SnapshotService<Client>>, reader: &R, recover: bool) -> Result<(), String> {
// let manifest = reader.manifest();
//
// info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash);
//
// snapshot.init_restore(manifest.clone(), recover).map_err(|e| {
// format!("Failed to begin restoration: {}", e)
// })?;
//
// let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
//
// let informant_handle = snapshot.clone();
// ::std::thread::spawn(move || {
// while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done, .. } = informant_handle.status() {
// info!("Processed {}/{} state chunks and {}/{} block chunks.",
// state_chunks_done, num_state, block_chunks_done, num_blocks);
// ::std::thread::sleep(Duration::from_secs(5));
// }
// });
//
// info!("Restoring state");
// for &state_hash in &manifest.state_hashes {
// if snapshot.status() == RestorationStatus::Failed {
// return Err("Restoration failed".into());
// }
//
// let chunk = reader.chunk(state_hash)
// .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))?;
//
// let hash = keccak(&chunk);
// if hash != state_hash {
// return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", state_hash, hash));
// }
//
// snapshot.feed_state_chunk(state_hash, &chunk);
// }
//
// info!("Restoring blocks");
// for &block_hash in &manifest.block_hashes {
// if snapshot.status() == RestorationStatus::Failed {
// return Err("Restoration failed".into());
// }
//
// let chunk = reader.chunk(block_hash)
// .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))?;
//
// let hash = keccak(&chunk);
// if hash != block_hash {
// return Err(format!("Mismatched chunk hash. Expected {:?}, got {:?}", block_hash, hash));
// }
// snapshot.feed_block_chunk(block_hash, &chunk);
// }
//
// match snapshot.status() {
// RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()),
// RestorationStatus::Initializing { .. } => Err("Snapshot restoration is still initializing.".into()),
// RestorationStatus::Finalizing => Err("Snapshot restoration is still finalizing.".into()),
// RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
// RestorationStatus::Inactive => {
// info!("Restoration complete.");
// Ok(())
// }
// }
//}
impl SnapshotCommand {
// shared portion of snapshot commands: start the client service
// fn start_service(self) -> Result<ClientService, String> {
// // load spec file
// let spec = self.spec.spec(&self.dirs.cache)?;
//
// // load genesis hash
// let genesis_hash = spec.genesis_header().hash();
//
// // database paths
// let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone());
//
// // user defaults path
// let user_defaults_path = db_dirs.user_defaults_path();
//
// // load user defaults
// let user_defaults = UserDefaults::load(&user_defaults_path)?;
//
// // select pruning algorithm
// let algorithm = self.pruning.to_algorithm(&user_defaults);
//
// // check if tracing is on
// let tracing = tracing_switch_to_bool(self.tracing, &user_defaults)?;
//
// // check if fatdb is on
// let fat_db = fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)?;
//
// // prepare client and snapshot paths.
// let client_path = db_dirs.client_path(algorithm);
// let snapshot_path = db_dirs.snapshot_path();
//
// // execute upgrades
// execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?;
//
// // prepare client config
// let mut client_config = to_client_config(
// &self.cache_config,
// spec.name.to_lowercase(),
// Mode::Active,
// tracing,
// fat_db,
// self.compaction,
// VMType::default(),
// "".into(),
// algorithm,
// self.pruning_history,
// self.pruning_memory,
// true,
// self.max_round_blocks_to_import,
// );
//
// client_config.snapshot = self.snapshot_conf;
//
// let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
// let client_db = restoration_db_handler.open(&client_path)
// .map_err(|e| format!("Failed to open database {:?}", e))?;
//
// let service = ClientService::start(
// client_config,
// &spec,
// client_db,
// &snapshot_path,
// restoration_db_handler,
// &self.dirs.ipc_path(),
// // TODO [ToDr] don't use test miner here
// // (actually don't require miner at all)
// Arc::new(Miner::new_for_tests(&spec, None)),
// Arc::new(ethcore_private_tx::DummySigner),
// Box::new(ethcore_private_tx::NoopEncryptor),
// Default::default(),
// Default::default(),
// ).map_err(|e| format!("Client service error: {:?}", e))?;
//
// Ok(service)
// }
// restore from a snapshot
// pub fn restore(self) -> Result<(), String> {
// let file = self.file_path.clone();
// let service = self.start_service()?;
//
// warn!("Snapshot restoration is experimental and the format may be subject to change.");
// warn!("On encountering an unexpected error, please ensure that you have a recent snapshot.");
//
// let snapshot = service.snapshot_service();
//
// if let Some(file) = file {
// info!("Attempting to restore from snapshot at '{}'", file);
//
// let reader = PackedReader::new(Path::new(&file))
// .map_err(|e| format!("Couldn't open snapshot file: {}", e))
// .and_then(|x| x.ok_or("Snapshot file has invalid format.".into()));
//
// let reader = reader?;
// restore_using(snapshot, &reader, true)?;
// } else {
// info!("Attempting to restore from local snapshot.");
//
// // attempting restoration with recovery will lead to deadlock
// // as we currently hold a read lock on the service's reader.
// match *snapshot.reader() {
// Some(ref reader) => restore_using(snapshot.clone(), reader, false)?,
// None => return Err("No local snapshot found.".into()),
// }
// }
//
// Ok(())
// }
// Take a snapshot from the head of the chain.
// pub fn take_snapshot(self) -> Result<(), String> {
// let file_path = self.file_path.clone().ok_or("No file path provided.".to_owned())?;
// let file_path: PathBuf = file_path.into();
// let block_at | SnapshotCommand | identifier_name |
main.py |
GAMMA = 0.1 # Multiplicative factor for learning rate step-down
LOG_FREQUENCY = 5
# ----------------------------------
# Hyperparameters for grid search
BATCH_SIZE = 256 # Higher batch sizes allows for larger learning rates. An empirical heuristic suggests that, when changing
# the batch size, learning rate should change by the same factor to have comparable results
LR = 1e-2 # The initial Learning Rate
NUM_EPOCHS = 30 # Total number of training epochs (iterations over dataset)
STEP_SIZE = 20 # How many epochs before decreasing learning rate (if using a step-down policy)
MODE = '4C' # '3A', '3B', '4A', '4C'
ALPHA = 0.25 # alpha
ALPHA_EXP = False
EVAL_ACCURACY_ON_TRAINING = False
SHOW_IMG = True # if 'True' show images and graphs on output
SHOW_RESULTS = True # if 'True' show images and graphs on output
# Define Data Preprocessing
# means and standard deviations ImageNet because the network is pretrained
means, stds = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
# Define transforms to apply to each image
transf = transforms.Compose([ #transforms.Resize(227), # Resizes short size of the PIL image to 256
transforms.CenterCrop(224), # Crops a central square patch of the image 224 because torchvision's AlexNet needs a 224x224 input!
transforms.ToTensor(), # Turn PIL Image to torch.Tensor
transforms.Normalize(means,stds) # Normalizes tensor with mean and standard deviation
])
# Prepare Dataset
# Clone github repository with data
if not os.path.isdir('./Homework3-PACS'):
!git clone https://github.com/MachineLearning2020/Homework3-PACS
# Define datasets root
DIR_PHOTO = 'Homework3-PACS/PACS/photo'
DIR_ART = 'Homework3-PACS/PACS/art_painting'
DIR_CARTOON = 'Homework3-PACS/PACS/cartoon'
DIR_SKETCH = 'Homework3-PACS/PACS/sketch'
# Prepare Pytorch train/test Datasets
photo_dataset = torchvision.datasets.ImageFolder(DIR_PHOTO, transform=transf)
art_dataset = torchvision.datasets.ImageFolder(DIR_ART, transform=transf)
cartoon_dataset = torchvision.datasets.ImageFolder(DIR_CARTOON, transform=transf)
sketch_dataset = torchvision.datasets.ImageFolder(DIR_SKETCH, transform=transf)
# Check dataset sizes
print(f"Photo Dataset: {len(photo_dataset)}")
print(f"Art Dataset: {len(art_dataset)}")
print(f"Cartoon Dataset: {len(cartoon_dataset)}")
print(f"Sketch Dataset: {len(sketch_dataset)}")
# Data exploration
photo_dataset.imgs # same of print(photo_dataset.samples)
# [('Homework3-PACS/PACS/photo/dog/056_0001.jpg', 0),
# ('Homework3-PACS/PACS/photo/dog/056_0002.jpg', 0) ... ]
photo_dataset.classes
# 'dog', 'elephant', 'giraffe', 'guitar', 'horse', 'house', 'person'
photo_dataset.class_to_idx
# {'dog': 0,
# 'elephant': 1,
# 'giraffe': 2,
# 'guitar': 3,
# 'horse': 4,
# 'house': 5,
# 'person': 6}
# dimension of an image 3x227x227
# torch.Size([3, 227, 227])
# plot images distribution
plotImageDistribution(photo_dataset.targets, art_dataset.targets, cartoon_dataset.targets, sketch_dataset.targets, DATASETS_NAMES, CLASSES_NAMES, show=SHOW_IMG)
# Prepare Dataloaders
# Dataloaders iterate over pytorch datasets and transparently provide useful functions (e.g. parallelization and shuffling)
photo_dataloader = DataLoader(photo_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)
art_dataloader = DataLoader(art_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=False)
cartoon_dataloader = DataLoader(cartoon_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=False)
sketch_dataloader = DataLoader(sketch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=False)
# check dimensions of images
# cnt = 0
# for img, _ in dataloader :
# print(img.shape)
# cnt+=1
# print(cnt)
### Prepare Network for training
cudnn.benchmark # Calling this optimizes runtime
if MODE == None :
raise RuntimeError("Select a MODE")
elif MODE == '3A':
# 3A) SENZA DANN
USE_DOMAIN_ADAPTATION = False
CROSS_DOMAIN_VALIDATION = False
USE_VALIDATION = False
ALPHA = None
transfer_set = None
elif MODE == '3B' :
# 3B) Train DANN on Photo and test on Art painting with DANN adaptation
USE_DOMAIN_ADAPTATION = True
transfer_set = "art painting"
elif MODE == '4A':
# 4A) Run a grid search on Photo to Cartoon and Photo to Sketch, without Domain Adaptation, and average results for each set of hyperparameters
transfer_set = 'sketch' # Photo to 'cartoon' or 'sketch'
USE_VALIDATION = True # validation on transfer_set
USE_DOMAIN_ADAPTATION = False
CROSS_DOMAIN_VALIDATION = False
ALPHA = None
# 4B) when testing
elif MODE == '4C':
# 4C) Run a grid search on Photo to Cartoon and Photo to Sketch, with Domain Adaptation, and average results for each set of hyperparameters
USE_VALIDATION = True # validation on transfer_set
USE_DOMAIN_ADAPTATION = True
CROSS_DOMAIN_VALIDATION = True
# edit the following hyperparams:
transfer_set = 'sketch' # Photo to 'cartoon' or 'sketch'
EVAL_ACCURACY_ON_TRAINING = False
SHOW_RESULTS = True
source_dataloader = photo_dataloader
test_dataloader = art_dataloader
# Loading model
net = dann_net(pretrained=True).to(DEVICE)
#print(net) #check size output layer OK
# Define loss function: CrossEntrpy for classification
criterion = nn.CrossEntropyLoss()
# Choose parameters to optimize
parameters_to_optimize = net.parameters() # In this case we optimize over all the parameters of AlexNet
# Define optimizer: updates the weights based on loss (SDG with momentum)
optimizer = optim.SGD(parameters_to_optimize, lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
# Define scheduler -> step-down policy which multiplies learning rate by gamma every STEP_SIZE epochs
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=GAMMA)
if USE_DOMAIN_ADAPTATION and ALPHA == None :
raise RuntimeError("To use domain adaptation you must define parameter ALPHA")
if transfer_set == 'cartoon':
target_dataloader = cartoon_dataloader
elif transfer_set == 'sketch':
|
else :
target_dataloader = test_dataloader # art_dataloader
### TRAIN
current_step = 0
accuracies_train = []
accuracies_validation = []
loss_class_list = []
loss_target_list = []
loss_source_list = []
# Start iterating over the epochs
for epoch in range(NUM_EPOCHS):
net.train(True)
print(f"--- Epoch {epoch+1}/{NUM_EPOCHS}, LR = {scheduler.get_last_lr()}")
# Iterate over the dataset
for source_images, source_labels in source_dataloader:
source_images = source_images.to(DEVICE)
source_labels = source_labels.to(DEVICE)
optimizer.zero_grad() # Zero-ing the gradients
# STEP 1: train the classifier
outputs = net(source_images)
loss_class = criterion(outputs, source_labels)
loss_class_list.append(loss_class.item())
# if current_step % LOG_FREQUENCY == 0:
# print('Step {}, Loss Classifier {}'.format(current_step+1, loss_class.item()))
loss_class.backward() # backward pass: computes gradients
# Domain Adaptation (Cross Domain Validation)
if USE_DOMAIN_ADAPTATION :
# Load target batch
target_images, target_labels = next(iter(target_dataloader))
target_images = target_images.to(DEVICE)
# if ALPHA_EXP :
# # ALPHA exponential decaying as described in the paper
# p = float(i + epoch * len_dataloader) / NUM_EPOCHS / len_dataloader
# ALPHA = 2. / (1. + np.exp(-10 * p)) - 1
# STEP 2: train the discriminator: forward SOURCE data to Gd
outputs = net.forward(source_images, alpha=ALPHA)
# source's label is 0 for all data
labels_discr_source = torch.zeros | target_dataloader = sketch_dataloader | conditional_block |
main.py | 1,
# 'giraffe': 2,
# 'guitar': 3,
# 'horse': 4,
# 'house': 5,
# 'person': 6}
# dimension of an image 3x227x227
# torch.Size([3, 227, 227])
# plot images distribution
plotImageDistribution(photo_dataset.targets, art_dataset.targets, cartoon_dataset.targets, sketch_dataset.targets, DATASETS_NAMES, CLASSES_NAMES, show=SHOW_IMG)
# Prepare Dataloaders
# Dataloaders iterate over pytorch datasets and transparently provide useful functions (e.g. parallelization and shuffling)
photo_dataloader = DataLoader(photo_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)
art_dataloader = DataLoader(art_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=False)
cartoon_dataloader = DataLoader(cartoon_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=False)
sketch_dataloader = DataLoader(sketch_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=False)
# check dimensions of images
# cnt = 0
# for img, _ in dataloader :
# print(img.shape)
# cnt+=1
# print(cnt)
### Prepare Network for training
cudnn.benchmark # Calling this optimizes runtime
if MODE == None :
raise RuntimeError("Select a MODE")
elif MODE == '3A':
# 3A) SENZA DANN
USE_DOMAIN_ADAPTATION = False
CROSS_DOMAIN_VALIDATION = False
USE_VALIDATION = False
ALPHA = None
transfer_set = None
elif MODE == '3B' :
# 3B) Train DANN on Photo and test on Art painting with DANN adaptation
USE_DOMAIN_ADAPTATION = True
transfer_set = "art painting"
elif MODE == '4A':
# 4A) Run a grid search on Photo to Cartoon and Photo to Sketch, without Domain Adaptation, and average results for each set of hyperparameters
transfer_set = 'sketch' # Photo to 'cartoon' or 'sketch'
USE_VALIDATION = True # validation on transfer_set
USE_DOMAIN_ADAPTATION = False
CROSS_DOMAIN_VALIDATION = False
ALPHA = None
# 4B) when testing
elif MODE == '4C':
# 4C) Run a grid search on Photo to Cartoon and Photo to Sketch, with Domain Adaptation, and average results for each set of hyperparameters
USE_VALIDATION = True # validation on transfer_set
USE_DOMAIN_ADAPTATION = True
CROSS_DOMAIN_VALIDATION = True
# edit the following hyperparams:
transfer_set = 'sketch' # Photo to 'cartoon' or 'sketch'
EVAL_ACCURACY_ON_TRAINING = False
SHOW_RESULTS = True
source_dataloader = photo_dataloader
test_dataloader = art_dataloader
# Loading model
net = dann_net(pretrained=True).to(DEVICE)
#print(net) #check size output layer OK
# Define loss function: CrossEntrpy for classification
criterion = nn.CrossEntropyLoss()
# Choose parameters to optimize
parameters_to_optimize = net.parameters() # In this case we optimize over all the parameters of AlexNet
# Define optimizer: updates the weights based on loss (SDG with momentum)
optimizer = optim.SGD(parameters_to_optimize, lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
# Define scheduler -> step-down policy which multiplies learning rate by gamma every STEP_SIZE epochs
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=GAMMA)
if USE_DOMAIN_ADAPTATION and ALPHA == None :
raise RuntimeError("To use domain adaptation you must define parameter ALPHA")
if transfer_set == 'cartoon':
target_dataloader = cartoon_dataloader
elif transfer_set == 'sketch':
target_dataloader = sketch_dataloader
else :
target_dataloader = test_dataloader # art_dataloader
### TRAIN
current_step = 0
accuracies_train = []
accuracies_validation = []
loss_class_list = []
loss_target_list = []
loss_source_list = []
# Start iterating over the epochs
for epoch in range(NUM_EPOCHS):
net.train(True)
print(f"--- Epoch {epoch+1}/{NUM_EPOCHS}, LR = {scheduler.get_last_lr()}")
# Iterate over the dataset
for source_images, source_labels in source_dataloader:
source_images = source_images.to(DEVICE)
source_labels = source_labels.to(DEVICE)
optimizer.zero_grad() # Zero-ing the gradients
# STEP 1: train the classifier
outputs = net(source_images)
loss_class = criterion(outputs, source_labels)
loss_class_list.append(loss_class.item())
# if current_step % LOG_FREQUENCY == 0:
# print('Step {}, Loss Classifier {}'.format(current_step+1, loss_class.item()))
loss_class.backward() # backward pass: computes gradients
# Domain Adaptation (Cross Domain Validation)
if USE_DOMAIN_ADAPTATION :
# Load target batch
target_images, target_labels = next(iter(target_dataloader))
target_images = target_images.to(DEVICE)
# if ALPHA_EXP :
# # ALPHA exponential decaying as described in the paper
# p = float(i + epoch * len_dataloader) / NUM_EPOCHS / len_dataloader
# ALPHA = 2. / (1. + np.exp(-10 * p)) - 1
# STEP 2: train the discriminator: forward SOURCE data to Gd
outputs = net.forward(source_images, alpha=ALPHA)
# source's label is 0 for all data
labels_discr_source = torch.zeros(BATCH_SIZE, dtype=torch.int64).to(DEVICE)
loss_discr_source = criterion(outputs, labels_discr_source)
loss_source_list.append(loss_discr_source.item())
# if current_step % LOG_FREQUENCY == 0:
# print('Step {}, Loss Discriminator Source {}'.format(current_step+1, loss_discr_source.item()))
loss_discr_source.backward()
# STEP 3: train the discriminator: forward TARGET to Gd
outputs = net.forward(target_images, alpha=ALPHA)
labels_discr_target = torch.ones(BATCH_SIZE, dtype=torch.int64).to(DEVICE) # target's label is 1
loss_discr_target = criterion(outputs, labels_discr_target)
loss_target_list.append(loss_discr_target.item())
# if current_step % LOG_FREQUENCY == 0:
# print('Step {}, Loss Discriminator Target {}'.format(current_step+1, loss_discr_target.item()))
loss_discr_target.backward() #update gradients
optimizer.step() # update weights based on accumulated gradients
# --- Accuracy on training
if EVAL_ACCURACY_ON_TRAINING:
with torch.no_grad():
net.train(False)
running_corrects_train = 0
for images_train, labels_train in source_dataloader:
# images, labels = next(iter(source_dataloader))
images_train = images_train.to(DEVICE)
labels_train = labels_train.to(DEVICE)
# Forward Pass
outputs_train = net(images_train)
# Get predictions
_, preds = torch.max(outputs_train.data, 1)
# Update Corrects
running_corrects_train += torch.sum(preds == labels_train.data).data.item()
# Calculate Accuracy
accuracy_train = running_corrects_train / float(len(source_dataloader)*(target_dataloader.batch_size))
accuracies_train.append(accuracy_train)
print('Accuracy on train (photo):', accuracy_train)
# --- VALIDATION SET
if USE_VALIDATION :
# now train is finished, evaluate the model on the target dataset
net.train(False) # Set Network to evaluation mode
running_corrects = 0
for images, labels in target_dataloader:
images = images.to(DEVICE)
labels = labels.to(DEVICE)
outputs = net(images)
_, preds = torch.max(outputs.data, 1)
running_corrects += torch.sum(preds == labels.data).data.item()
# Calculate Accuracy
accuracy = running_corrects / float( len(target_dataloader)*(target_dataloader.batch_size) )
accuracies_validation.append(accuracy)
print(f"Accuracy on validation ({transfer_set}): {accuracy}")
# Step the scheduler
current_step += 1
scheduler.step()
if SHOW_RESULTS:
print()
print("Loss classifier")
print(loss_class_list)
if USE_DOMAIN_ADAPTATION :
print("\nLoss discriminator source")
print(loss_source_list)
print("\nLoss discriminator target")
print(loss_target_list)
### TEST
net = net.to(DEVICE) # this will bring the network to GPU if DEVICE is cuda
net.train(False) # Set Network to evaluation mode
running_corrects = 0
for images, labels in tqdm(test_dataloader):
images = images.to(DEVICE)
labels = labels.to(DEVICE)
# Forward Pass
outputs = net(images)
# Get predictions
_, preds = torch.max(outputs.data, 1) |
# Update Corrects
running_corrects += torch.sum(preds == labels.data).data.item()
| random_line_split |
|
evaluate_offline.py | self.Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
try:
self.model = Darknet(model_def).to(self.device)
self.model.apply(weights_init_normal)
except Exception as e:
pass
if file_pretrained_weights:
# load user pretrained weights(checkpoint)
if file_pretrained_weights.endswith(".pth"):
self.model.load_state_dict(torch.load(file_pretrained_weights))
# load others pretrained weights
else:
self.model.load_darknet_weights(file_pretrained_weights)
self.model.eval()
def prepare_image(self, img, in_dim):
'''
图片预处理:将原图的整幅图片进行padding后缩放,作为神经网络的输入
'''
original_img = img
img_t = img[:,:,::-1].transpose((2,0,1)).copy()
img_t = torch.from_numpy(img_t).float().div(255.0)
#print(img_t.dtype)
img_t,_ = pad_to_square(img_t,0)
img_t = resize(img_t, in_dim)
img_t = img_t.unsqueeze(0)
return img_t,original_img
def add_layer1(self, img, detections, in_dim):
'''
直接在原始图片上叠加目标识别和分类效果
img: 原始图片
detection: yolov3输出的bounding box
in_dim: yolov3 | cv2.putText(img, label, p1, cv2.FONT_HERSHEY_SIMPLEX, 2.0,
self.class_colors[int(detect[-1])], 2)
return img
def add_layer2(self, img, detections, in_dim):
'''
直接在网络输入图片上叠加目标识别和分类效果
'''
img = img.cpu().squeeze(0).numpy().transpose((1,2,0))
img = img
[:,:,::-1].copy()
detections = detections[0].numpy()
for i in range(detections.shape[0]):
detect = detections[i]
label = dict_names_of_class[int(detect[-1])]
p1 = (int(detect[0]), int(detect[1]))
p2 = (int(detect[2]), int(detect[3]))
cv2.rectangle(img, p1, p2, dict_colors_of_class[int(detect[-1])],2)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_SIMPLEX, 1.0,
dict_colors_of_class[int(detect[-1])])
return img
def detect(self,frame):
'''
基础识别功能函数,输入frame,输出识别结果
'''
#print(f"detect.py,detect: {frame.shape}")
img, original_img = self.prepare_image(frame, self.img_size)
#cv2.imshow('prepare img', img)
#cv2.waitKey(200)
input_img = Variable(img.type(self.Tensor))
detections = None
with torch.no_grad():
detections = self.model(input_img)
#print(f"detect.py,detect: detections.size {detections.size()}")
#print(f"detect.py,detect: {detections[0]}")
detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
return detections
def get_img_bboxs(self, img, detections, in_dim):
'''
获取在原始图片上对应的bbox,并且叠加检测效果
Args:
img: 原始图片
detection: yolov3输出的bounding box
in_dim: yolov3输出图片大小
Returns:
img: 叠加bbox后的图片
bbox: 检测结果
'''
h,w = img.shape[:2]
#print(h,w)
detections = detections[0].numpy()
#print(detections.shape)
diff_dim = int(np.abs(h - w)/2)
scaler = (h / in_dim) if h>w else (w / in_dim)
#print('scaler is: ',scaler)
#print(detections)
obj_numbers = detections.shape[0]
bboxs = np.zeros((obj_numbers, 6))
for i in range(obj_numbers):
detect = detections[i]
label = self.class_names[int(detect[-1])]
if h > w:
p1 = (int(detect[0] * scaler) - diff_dim, int(detect[1] * scaler))
p2 = (int(detect[2] * scaler) - diff_dim, int(detect[3] * scaler))
else:
p1 = (int(detect[0] * scaler), int(detect[1] * scaler) - diff_dim)
p2 = (int(detect[2] * scaler), int(detect[3] * scaler) - diff_dim)
cv2.rectangle(img, p1, p2, self.class_colors[int(detect[-1])],2)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_PLAIN, 1.0,
self.class_colors[int(detect[-1])], 2)
bboxs[i] = np.array([int(detect[-1]), detect[-2], p1[0], p1[1], p2[0], p2[1]])
return img, bboxs
def save_txt(self, f_name, datas, fmts):
with open(f_name, 'w') as f:
for x in datas:
line = "{} {:.2f} {} {} {} {}\n".format(int(x[0]),x[1], int(x[2]), int(x[3]), int(x[4]), int(x[5]))
f.write(line)
'''
for x in datas:
line = ""
for i,fmt in enumerate(fmts):
line += fmt.format(x[i])
line += "\n"
f.write(line)
'''
def evalute1(self):
"""
对文件夹中的一张张图片进行检测(推理inference),
并保存结果至detect_results文件夹中
"""
img_files = []
with open(self.valid_path) as f:
img_files = f.readlines()
img_files = [x.rstrip() for x in img_files]
#print(img_files)
for img_file in img_files:
#print(img_file)
img_name = os.path.basename(img_file)
# 保存inference后的bbox
dr_bbox_file = os.path.join(self.path_detection_results, 'labels',
img_name.replace('.png','.txt').replace('.jpg','.txt'))
# 保存inference后叠加bbox的图像
dr_img_file = os.path.join(self.path_detection_results, 'images', img_name)
img = cv2.imread(img_file)
detections = self.detect(img)
if detections[0] is not None:
img, bboxs = self.get_img_bboxs(img, detections, self.img_size)
cv2.imwrite(dr_img_file, img)
#np.savetxt(dr_bbox_file, bboxs, delimiter=' ', fmt='%d')
self.save_txt(dr_bbox_file, bboxs, ["{:d}","{:.2f}","{:d}","{:d}","{:d}","{:d}"])
if self.flag_show:
cv2.imshow('detecting result', img)
cv2.waitKey(100)
else:
#print(f"evaluate1-detections: {detections}")
pass
def evalute2(self, pic_path):
'''
使用dataloader加载图片,每次只加载一张进行识别。
'''
self.model.eval()
dataloader = DataLoader(
ImageFolder(pic_path, img_size=self.img_size),
batch_size=1,
shuffle=False,
num_workers=0,
)
#prev_time = time.time()
for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
print(f'batch_i:{batch_i}')
#print(f"type:{type(input_imgs)},shape:{input_imgs.shape}")
print(f"img_path:{img_paths}")
#
img_name = os.path.basename(img_paths[0 | 输出图片大小
'''
h,w = img.shape[:2]
#print(h,w)
detections = detections[0].numpy()
#print(detections.shape)
diff_dim = int(np.abs(h - w)/2)
scaler = (h / in_dim) if h>w else (w / in_dim)
#print('scaler is: ',scaler)
#print(detections)
for i in range(detections.shape[0]):
detect = detections[i]
label = self.class_names[int(detect[-1])]
if h > w:
p1 = (int(detect[0] * scaler) - diff_dim, int(detect[1] * scaler))
p2 = (int(detect[2] * scaler) - diff_dim, int(detect[3] * scaler))
else:
p1 = (int(detect[0] * scaler), int(detect[1] * scaler) - diff_dim)
p2 = (int(detect[2] * scaler), int(detect[3] * scaler) - diff_dim)
cv2.rectangle(img, p1, p2, self.class_colors[int(detect[-1])],4) | identifier_body |
evaluate_offline.py | etect[0]), int(detect[1]))
p2 = (int(detect[2]), int(detect[3]))
cv2.rectangle(img, p1, p2, dict_colors_of_class[int(detect[-1])],2)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_SIMPLEX, 1.0,
dict_colors_of_class[int(detect[-1])])
return img
def detect(self,frame):
'''
基础识别功能函数,输入frame,输出识别结果
'''
#print(f"detect.py,detect: {frame.shape}")
img, original_img = self.prepare_image(frame, self.img_size)
#cv2.imshow('prepare img', img)
#cv2.waitKey(200)
input_img = Variable(img.type(self.Tensor))
detections = None
with torch.no_grad():
detections = self.model(input_img)
#print(f"detect.py,detect: detections.size {detections.size()}")
#print(f"detect.py,detect: {detections[0]}")
detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
return detections
def get_img_bboxs(self, img, detections, in_dim):
'''
获取在原始图片上对应的bbox,并且叠加检测效果
Args:
img: 原始图片
detection: yolov3输出的bounding box
in_dim: yolov3输出图片大小
Returns:
img: 叠加bbox后的图片
bbox: 检测结果
'''
h,w = img.shape[:2]
#print(h,w)
detections = detections[0].numpy()
#print(detections.shape)
diff_dim = int(np.abs(h - w)/2)
scaler = (h / in_dim) if h>w else (w / in_dim)
#print('scaler is: ',scaler)
#print(detections)
obj_numbers = detections.shape[0]
bboxs = np.zeros((obj_numbers, 6))
for i in range(obj_numbers):
detect = detections[i]
label = self.class_names[int(detect[-1])]
if h > w:
p1 = (int(detect[0] * scaler) - diff_dim, int(detect[1] * scaler))
p2 = (int(detect[2] * scaler) - diff_dim, int(detect[3] * scaler))
else:
p1 = (int(detect[0] * scaler), int(detect[1] * scaler) - diff_dim)
p2 = (int(detect[2] * scaler), int(detect[3] * scaler) - diff_dim)
cv2.rectangle(img, p1, p2, self.class_colors[int(detect[-1])],2)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_PLAIN, 1.0,
self.class_colors[int(detect[-1])], 2)
bboxs[i] = np.array([int(detect[-1]), detect[-2], p1[0], p1[1], p2[0], p2[1]])
return img, bboxs
def save_txt(self, f_name, datas, fmts):
with open(f_name, 'w') as f:
for x in datas:
line = "{} {:.2f} {} {} {} {}\n".format(int(x[0]),x[1], int(x[2]), int(x[3]), int(x[4]), int(x[5]))
f.write(line)
'''
for x in datas:
line = ""
for i,fmt in enumerate(fmts):
line += fmt.format(x[i])
line += "\n"
f.write(line)
'''
def evalute1(self):
"""
对文件夹中的一张张图片进行检测(推理inference),
并保存结果至detect_results文件夹中
"""
img_files = []
with open(self.valid_path) as f:
img_files = f.readlines()
img_files = [x.rstrip() for x in img_files]
#print(img_files)
for img_file in img_files:
#print(img_file)
img_name = os.path.basename(img_file)
# 保存inference后的bbox
dr_bbox_file = os.path.join(self.path_detection_results, 'labels',
img_name.replace('.png','.txt').replace('.jpg','.txt'))
# 保存inference后叠加bbox的图像
dr_img_file = os.path.join(self.path_detection_results, 'images', img_name)
img = cv2.imread(img_file)
detections = self.detect(img)
if detections[0] is not None:
img, bboxs = self.get_img_bboxs(img, detections, self.img_size)
cv2.imwrite(dr_img_file, img)
#np.savetxt(dr_bbox_file, bboxs, delimiter=' ', fmt='%d')
self.save_txt(dr_bbox_file, bboxs, ["{:d}","{:.2f}","{:d}","{:d}","{:d}","{:d}"])
if self.flag_show:
cv2.imshow('detecting result', img)
cv2.waitKey(100)
else:
#print(f"evaluate1-detections: {detections}")
pass
def evalute2(self, pic_path):
'''
使用dataloader加载图片,每次只加载一张进行识别。
'''
self.model.eval()
dataloader = DataLoader(
ImageFolder(pic_path, img_size=self.img_size),
batch_size=1,
shuffle=False,
num_workers=0,
)
#prev_time = time.time()
for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
print(f'batch_i:{batch_i}')
#print(f"type:{type(input_imgs)},shape:{input_imgs.shape}")
print(f"img_path:{img_paths}")
#
img_name = os.path.basename(img_paths[0])
# 保存inference后的bbox
dr_bbox_file = os.path.join(self.path_detection_results, 'labels',
img_name.replace('.jpg','.txt').replace('.png','.txt'))
# 保存inference后叠加bbox的图像
dr_img_file = os.path.join(self.path_detection_results, 'images', img_name)
# debug
frame = cv2.imread(img_paths[0])
#cv2.imshow('cv read', frame)
#input_img = input_imgs[0].numpy().transpose((1,2,0)).copy()
#input_img = cv2.cvtColor(input_img, cv2.COLOR_RGB2BGR)
#cv2.imshow('dataload', input_img)
#cv2.waitKey(10000)
#return
# Configure input
input_imgs = Variable(input_imgs.type(self.Tensor))
# Get detections
detections = []
with torch.no_grad():
detections = self.model(input_imgs)
detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
#print(detections)
# Log progress
#current_time = time.time()
#fps = 1/(current_time-prev_time)
#prev_time = current_time
# Save image and detections
if detections[0] is not None:
img, bboxs = self.get_img_bboxs(frame, detections, self.img_size)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imwrite(dr_img_file, img)
#np.savetxt(dr_bbox_file, bboxs, delimiter=' ', fmt='%d')
self.save_txt(dr_bbox_file, bboxs, ["{:d}","{:.2f}","{:d}","{:d}","{:d}","{:d}"])
if self.flag_show:
cv2.imshow('res', img)
cv2.waitKey(10000)
return
path_weights = r"H:\deepLearning\dataset\COCO2014\yolov3\weights\yolov3.weights"
path_class_names = r"H:\deepLearning\dataset\COCO2014\yolov3\classes.names"
path_model_def = r"config\yolov3.cfg"
path_data_config = r"config\coco2014_val.data"
path_detection_results = r"H:\deepLearning\dataset\COCO2014\yolov3\map_yolov3\detection_results"
def test1():
if os.path.exists(path_detection_results):
shutil.rmtree(path_detection_results)
os.mkdir(path_detection_results)
os.mkdir(os.path.join(path_detection_results, 'labels'))
os.mkdir(os.path.join(path_detection_results, 'images'))
yolodetect = YoloDetect(model_def=path_model_def,
data_config=path_data_config,
path_detection_results=path_detection_results,
file_pretrained_weights=path_weights)
yolodetect.evalute1()
pic_path = r"H:\deepLearning\dataset\COCO2014\val2014\val2014"
def test2():
if os.path.exists(path_detection_results):
shutil.rmtree(path_detection_results)
os.mkdir(path_detection_results)
os.mkdir(os.path.join(path_detection_results, 'labels'))
os.mkdir(os.path.join(path_detection_results, 'images')) |
yolodetect = YoloDetect(model_def=path_model_def, | random_line_split |
|
evaluate_offline.py | etect[-1])],2)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_SIMPLEX, 1.0,
dict_colors_of_class[int(detect[-1])])
return img
def detect(self,frame):
'''
基础识别功能函数,输入frame,输出识别结果
'''
#print(f"detect.py,detect: {frame.shape}")
img, original_img = self.prepare_image(frame, self.img_size)
#cv2.imshow('prepare img', img)
#cv2.waitKey(200)
input_img = Variable(img.type(self.Tensor))
detections = None
with torch.no_grad():
detections = self.model(input_img)
#print(f"detect.py,detect: detections.size {detections.size()}")
#print(f"detect.py,detect: {detections[0]}")
detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
return detections
def get_img_bboxs(self, img, detections, in_dim):
'''
获取在原始图片上对应的bbox,并且叠加检测效果
Args:
img: 原始图片
detection: yolov3输出的bounding box
in_dim: yolov3输出图片大小
Returns:
img: 叠加bbox后的图片
bbox: 检测结果
'''
h,w = img.shape[:2]
#print(h,w)
detections = detections[0].numpy()
#print(detections.shape)
diff_dim = int(np.abs(h - w)/2)
scaler = (h / in_dim) if h>w else (w / in_dim)
#print('scaler is: ',scaler)
#print(detections)
obj_numbers = detections.shape[0]
bboxs = np.zeros((obj_numbers, 6))
for i in range(obj_numbers):
detect = detections[i]
label = self.class_names[int(detect[-1])]
if h > w:
p1 = (int(detect[0] * scaler) - diff_dim, int(detect[1] * scaler))
p2 = (int(detect[2] * scaler) - diff_dim, int(detect[3] * scaler))
else:
p1 = (int(detect[0] * scaler), int(detect[1] * scaler) - diff_dim)
p2 = (int(detect[2] * scaler), int(detect[3] * scaler) - diff_dim)
cv2.rectangle(img, p1, p2, self.class_colors[int(detect[-1])],2)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_PLAIN, 1.0,
self.class_colors[int(detect[-1])], 2)
bboxs[i] = np.array([int(detect[-1]), detect[-2], p1[0], p1[1], p2[0], p2[1]])
return img, bboxs
def save_txt(self, f_name, datas, fmts):
with open(f_name, 'w') as f:
for x in datas:
line = "{} {:.2f} {} {} {} {}\n".format(int(x[0]),x[1], int(x[2]), int(x[3]), int(x[4]), int(x[5]))
f.write(line)
'''
for x in datas:
line = ""
for i,fmt in enumerate(fmts):
line += fmt.format(x[i])
line += "\n"
f.write(line)
'''
def evalute1(self):
"""
对文件夹中的一张张图片进行检测(推理inference),
并保存结果至detect_results文件夹中
"""
img_files = []
with open(self.valid_path) as f:
img_files = f.readlines()
img_files = [x.rstrip() for x in img_files]
#print(img_files)
for img_file in img_files:
#print(img_file)
img_name = os.path.basename(img_file)
# 保存inference后的bbox
dr_bbox_file = os.path.join(self.path_detection_results, 'labels',
img_name.replace('.png','.txt').replace('.jpg','.txt'))
# 保存inference后叠加bbox的图像
dr_img_file = os.path.join(self.path_detection_results, 'images', img_name)
img = cv2.imread(img_file)
detections = self.detect(img)
if detections[0] is not None:
img, bboxs = self.get_img_bboxs(img, detections, self.img_size)
cv2.imwrite(dr_img_file, img)
#np.savetxt(dr_bbox_file, bboxs, delimiter=' ', fmt='%d')
self.save_txt(dr_bbox_file, bboxs, ["{:d}","{:.2f}","{:d}","{:d}","{:d}","{:d}"])
if self.flag_show:
cv2.imshow('detecting result', img)
cv2.waitKey(100)
else:
#print(f"evaluate1-detections: {detections}")
pass
def evalute2(self, pic_path):
'''
使用dataloader加载图片,每次只加载一张进行识别。
'''
self.model.eval()
dataloader = DataLoader(
ImageFolder(pic_path, img_size=self.img_size),
batch_size=1,
shuffle=False,
num_workers=0,
)
#prev_time = time.time()
for batch_i, (img_paths, input_imgs) in enumerate(dataloader):
print(f'batch_i:{batch_i}')
#print(f"type:{type(input_imgs)},shape:{input_imgs.shape}")
print(f"img_path:{img_paths}")
#
img_name = os.path.basename(img_paths[0])
# 保存inference后的bbox
dr_bbox_file = os.path.join(self.path_detection_results, 'labels',
img_name.replace('.jpg','.txt').replace('.png','.txt'))
# 保存inference后叠加bbox的图像
dr_img_file = os.path.join(self.path_detection_results, 'images', img_name)
# debug
frame = cv2.imread(img_paths[0])
#cv2.imshow('cv read', frame)
#input_img = input_imgs[0].numpy().transpose((1,2,0)).copy()
#input_img = cv2.cvtColor(input_img, cv2.COLOR_RGB2BGR)
#cv2.imshow('dataload', input_img)
#cv2.waitKey(10000)
#return
# Configure input
input_imgs = Variable(input_imgs.type(self.Tensor))
# Get detections
detections = []
with torch.no_grad():
detections = self.model(input_imgs)
detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
#print(detections)
# Log progress
#current_time = time.time()
#fps = 1/(current_time-prev_time)
#prev_time = current_time
# Save image and detections
if detections[0] is not None:
img, bboxs = self.get_img_bboxs(frame, detections, self.img_size)
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imwrite(dr_img_file, img)
#np.savetxt(dr_bbox_file, bboxs, delimiter=' ', fmt='%d')
self.save_txt(dr_bbox_file, bboxs, ["{:d}","{:.2f}","{:d}","{:d}","{:d}","{:d}"])
if self.flag_show:
cv2.imshow('res', img)
cv2.waitKey(10000)
return
path_weights = r"H:\deepLearning\dataset\COCO2014\yolov3\weights\yolov3.weights"
path_class_names = r"H:\deepLearning\dataset\COCO2014\yolov3\classes.names"
path_model_def = r"config\yolov3.cfg"
path_data_config = r"config\coco2014_val.data"
path_detection_results = r"H:\deepLearning\dataset\COCO2014\yolov3\map_yolov3\detection_results"
def test1():
if os.path.exists(path_detection_results):
shutil.rmtree(path_detection_results)
os.mkdir(path_detection_results)
os.mkdir(os.path.join(path_detection_results, 'labels'))
os.mkdir(os.path.join(path_detection_results, 'images'))
yolodetect = YoloDetect(model_def=path_model_def,
data_config=path_data_config,
path_detection_results=path_detection_results,
file_pretrained_weights=path_weights)
yolodetect.evalute1()
pic_path = r"H:\deepLearning\dataset\COCO2014\val2014\val2014"
def test2():
if os.path.exists(path_detection_results):
shutil.rmtree(path_detection_results)
os.mkdir(path_detection_results)
os.mkdir(os.path.join(path_detection_results, 'labels'))
os.mkdir(os.path.join(path_detection_results, 'images'))
yolodetect = YoloDetect(model_def=path_model_def,
data_config=path_data_config,
path_detection_results=path_detection_results,
file_pretrained_weights=path_weights)
yolodetect.evalute2(pic_path)
if __name__ == "__main__":
test1()
| conditional_block |
||
evaluate_offline.py | a_config,
file_pretrained_weights,
path_detection_results,
img_size=416,
):
self.flag_show = False
self.frame = None
self.detections = [None]
#
data_config = parse_data_config(data_config)
self.valid_path = data_config["valid"]
self.class_names = load_classes(data_config["names"])
self.path_detection_results = path_detection_results
self.class_colors = generate_colors()
self.img_size = img_size
self.conf_thres = 0.8
self.nms_thres = 0.4
# choose GPU or CPU
self.Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
try:
self.model = Darknet(model_def).to(self.device)
self.model.apply(weights_init_normal)
except Exception as e:
pass
if file_pretrained_weights:
# load user pretrained weights(checkpoint)
if file_pretrained_weights.endswith(".pth"):
self.model.load_state_dict(torch.load(file_pretrained_weights))
# load others pretrained weights
else:
self.model.load_darknet_weights(file_pretrained_weights)
self.model.eval()
def prepare_image(self, img, in_dim):
'''
图片预处理:将原图的整幅图片进行padding后缩放,作为神经网络的输入
'''
original_img = img
img_t = img[:,:,::-1].transpose((2,0,1)).copy()
img_t = torch.from_numpy(img_t).float().div(255.0)
#print(img_t.dtype)
img_t,_ = pad_to_square(img_t,0)
img_t = resize(img_t, in_dim)
img_t = img_t.unsqueeze(0)
return img_t,original_img
def add_layer1(self, img, detections, in_dim):
'''
直接在原始图片上叠加目标识别和分类效果
img: 原始图片
detection: yolov3输出的bounding box
in_dim: yolov3输出图片大小
'''
h,w = img.shape[:2]
#print(h,w)
detections = detections[0].numpy()
#print(detections.shape)
diff_dim = int(np.abs(h - w)/2)
scaler = (h / in_dim) if h>w else (w / in_dim)
#print('scaler is: ',scaler)
#print(detections)
for i in range(detections.shape[0]):
detect = detections[i]
label = self.class_names[int(detect[-1])]
if h > w:
p1 = (int(detect[0] * scaler) - diff_dim, int(detect[1] * scaler))
p2 = (int(detect[2] * scaler) - diff_dim, int(detect[3] * scaler))
else:
p1 = (int(detect[0] * scaler), int(detect[1] * scaler) - diff_dim)
p2 = (int(detect[2] * scaler), int(detect[3] * scaler) - diff_dim)
cv2.rectangle(img, p1, p2, self.class_colors[int(detect[-1])],4)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_SIMPLEX, 2.0,
self.class_colors[int(detect[-1])], 2)
return img
def add_layer2(self, img, detections, in_dim):
'''
直接在网络输入图片上叠加目标识别和分类效果
'''
img = img.cpu().squeeze(0).numpy().transpose((1,2,0))
img = img[:,:,::-1].copy()
detections = detections[0].numpy()
for i in range(detections.shape[0]):
detect = detections[i]
label = dict_names_of_class[int(detect[-1])]
p1 = (int(detect[0]), int(detect[1]))
p2 = (int(detect[2]), int(detect[3]))
cv2.rectangle(img, p1, p2, dict_colors_of_class[int(detect[-1])],2)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_SIMPLEX, 1.0,
dict_colors_of_class[int(detect[-1])])
return img
def detect(self,frame):
'''
基础识别功能函数,输入frame,输出识别结果
'''
#print(f"detect.py,detect: {frame.shape}")
img, original_img = self.prepare_image(frame, self.img_size)
#cv2.imshow('prepare img', img)
#cv2.waitKey(200)
input_img = Variable(img.type(self.Tensor))
detections = None
with torch.no_grad():
detections = self.model(input_img)
#print(f"detect.py,detect: detections.size {detections.size()}")
#print(f"detect.py,detect: {detections[0]}")
detections = non_max_suppression(detections, self.conf_thres, self.nms_thres)
return detections
def get_img_bboxs(self, img, detections, in_dim):
'''
获取在原始图片上对应的bbox,并且叠加检测效果
Args:
img: 原始图片
detection: yolov3输出的bounding box
in_dim: yolov3输出图片大小
Returns:
img: 叠加bbox后的图片
bbox: 检测结果
'''
h,w = img.shape[:2]
#print(h,w)
detections = detections[0].numpy()
#print(detections.shape)
diff_dim = int(np.abs(h - w)/2)
scaler = (h / in_dim) if h>w else (w / in_dim)
#print('scaler is: ',scaler)
#print(detections)
obj_numbers = detections.shape[0]
bboxs = np.zeros((obj_numbers, 6))
for i in range(obj_numbers):
detect = detections[i]
label = self.class_names[int(detect[-1])]
if h > w:
p1 = (int(detect[0] * scaler) - diff_dim, int(detect[1] * scaler))
p2 = (int(detect[2] * scaler) - diff_dim, int(detect[3] * scaler))
else:
p1 = (int(detect[0] * scaler), int(detect[1] * scaler) - diff_dim)
p2 = (int(detect[2] * scaler), int(detect[3] * scaler) - diff_dim)
cv2.rectangle(img, p1, p2, self.class_colors[int(detect[-1])],2)
cv2.putText(img, label, p1, cv2.FONT_HERSHEY_PLAIN, 1.0,
self.class_colors[int(detect[-1])], 2)
bboxs[i] = np.array([int(detect[-1]), detect[-2], p1[0], p1[1], p2[0], p2[1]])
return img, bboxs
def save_txt(self, f_name, datas, fmts):
with open(f_name, 'w') as f:
for x in datas:
line = "{} {:.2f} {} {} {} {}\n".format(int(x[0]),x[1], int(x[2]), int(x[3]), int(x[4]), int(x[5]))
f.write(line)
'''
for x in datas:
line = ""
for i,fmt in enumerate(fmts):
line += fmt.format(x[i])
line += "\n"
f.write(line)
'''
def evalute1(self):
"""
对文件夹中的一张张图片进行检测(推理inference),
并保存结果至detect_results文件夹中
"""
img_files = []
with open(self.valid_path) as f:
img_files = f.readlines()
img_files = [x.rstrip() for x in img_files]
#print(img_files)
for img_file in img_files:
#print(img_file)
img_name = os.path.basename(img_file)
# 保存inference后的bbox
dr_bbox_file = os.path.join(self.path_detection_results, 'labels',
img_name.replace('.png','.txt').replace('.jpg','.txt'))
# 保存inference后叠加bbox的图像
dr_img_file = os.path.join(self.path_detection_results, 'images', img_name)
img = cv2.imread(img_file)
detections = self.detect(img)
if detections[0] is not None:
img, bboxs = self.get_img_bboxs(img, detections, self.img_size)
cv2.imwrite(dr_img_file, img)
#np.savetxt(dr_bbox_file, bboxs, delimiter=' ', fmt='%d')
self.save_txt(dr_bbox_file, bboxs, ["{:d}","{:.2f}","{:d}","{:d}","{:d}","{:d}"])
if self.flag_show:
cv2.imshow('detecting result', img)
cv2.waitKey(100)
else:
#print(f"evaluate1-detections: {detections}")
pass
def eval | dat | identifier_name |
|
PMP.py | 224), torchvision.transforms.ToTensor()])),
# transform=transforms.Compose([
# transforms.Resize(224), # resnet默认图片输入大小224*224
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
root='./data',
train=False,
download=True,
transform=transforms.Compose([transforms.Resize(224), torchvision.transforms.ToTensor()])),
batch_size=args.batch_size, shuffle=False)
# test_x = torch.unsqueeze(test_dataset.data, dim=1).type(torch.Tensor)
# test_y = test_dataset.targets
class LeNet(nn.Module): # Using this module need to delete resize in dataloader
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(1, 6, 3, 1, 2), nn.ReLU(),
nn.MaxPool2d(2, 2))
self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5), nn.ReLU(),
nn.MaxPool2d(2, 2))
self.fc1 = nn.Sequential(nn.Linear(16 * 5 * 5, 120),
nn.BatchNorm1d(120), nn.ReLU())
self.fc2 = nn.Sequential(
nn.Linear(120, 84),
nn.BatchNorm1d(84),
nn.ReLU(),
nn.Linear(84, 10)) # 最后的结果一定要变为 10,因为数字的选项是 0 ~ 9
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
return x
class ModelParallelResNet50(ResNet):
def __init__(self, *args, **kwargs): # Bottleneck and [3, 4, 6, 3] refers to ResNet50
super(ModelParallelResNet50, self).__init__(
Bottleneck, [3, 4, 6, 3], num_classes=1000, *args, **kwargs)
self.conv = nn.Conv2d(1, 3, kernel_size=1) # add for MNIST
self.seq1 = nn.Sequential(
self.conv,
self.conv1,
self.bn1,
self.relu,
self.maxpool,
self.layer1,
self.layer2
).to('cuda:0')
self.seq2 = nn.Sequential(
self.layer3,
self.layer4,
self.avgpool,
).to('cuda:1')
self.fc.to('cuda:1')
def forward(self, x):
x = self.seq2(self.seq1(x).to('cuda:1'))
return self.fc(x.view(x.size(0), -1))
class PipelineParallelResNet50(ModelParallelResNet50):
def __init__(self, split_size=32, *args, **kwargs):
super(PipelineParallelResNet50, self).__init__(*args, **kwargs)
self.split_size = split_size
def forward(self, x):
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.seq1(s_next).to('cuda:1')
ret = []
for s_next in splits:
# A. s_prev r | cuda:1
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
# B. s_next runs on cuda:0, which can run concurrently with A
s_prev = self.seq1(s_next).to('cuda:1')
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
return torch.cat(ret)
model = PipelineParallelResNet50()
if not args.no_tensorboard:
writer = SummaryWriter('./runs/PMP')
data_iter = iter(train_loader)
images, labels = data_iter.next()
images = images.cuda()
writer.add_graph(model, images)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
train_acc_i = []
train_acc_list = []
a = []
ac_list = []
def train(epoch): # 定义每个epoch的训练细节
model.train() # 设置为trainning模式
running_loss = 0.0
running_accuracy = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
if use_gpu: # 如果要调用GPU模式,就把数据转存到GPU
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target) # 把数据转换成Variable
optimizer.zero_grad() # 优化器梯度初始化为零
# For MP use 从cuda:0输入数据
output = model(data.to('cuda:0')) # 把数据输入网络并得到输出,即进行前向传播
train_output = torch.max(output, dim=1)[1]
# 从output的device输入target进行反向传播
target = target.to(output.device)
loss = criterion(output, target) # 计算损失函数
loss.backward() # 反向传播梯度
running_accuracy += torch.sum(torch.eq(target, train_output)).item() / target.cpu().numpy().size
running_loss += loss.item()
optimizer.step() # 结束一次前传+反传之后,更新优化器参数
if batch_idx % args.log_interval == 0: # 准备打印相关信息,args.log_interval是最开头设置的好了的参数
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), running_accuracy / args.log_interval))
# ...log the running loss
if not args.no_tensorboard:
writer.add_scalar('training loss',
running_loss / args.log_interval,
epoch * len(train_loader.dataset) + batch_idx * args.batch_size)
writer.add_scalar('training accuracy', running_accuracy / args.log_interval,
epoch * len(train_loader.dataset) + batch_idx * args.batch_size)
running_loss = 0.0
running_accuracy = 0.0
def test(epoch):
model.eval() # 设置为test模式
test_loss = 0 # 初始化测试损失值为0
correct = 0 # 初始化预测正确的数据个数为0
running_accuracy = 0.0
total = 0
count = 0
for data, target in test_loader:
if use_gpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data.to('cuda:0'))
target = target.to(output.device)
test_loss += criterion(output, target).item() # sum up batch loss 把所有loss值进行累加
test_output = torch.max(output, dim=1)[1] # get the index of the max log-probability
# pred = output.data.max(1, keepdim=True)[1]
correct += torch.sum(torch.eq(test_output, target)).item()
total += target.size(0)
count += 1
# correct += test_output.eq(target.data.view_as(test_output)).cpu().sum() # 对预测正确的数据个数进行累加
# running_accuracy += torch.sum(torch.eq(target, test_output)).item() / target.cpu().numpy().size
test_loss /= count # 因为把所有loss值进行过累加,所以最后要除以总得数据长度才得平均loss
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}% )\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset), ))
if not args.no_tensorboard:
writer.add_scalar('testing loss', test_loss, epoch)
writer.add_scalar('testing accuracy', 100. * correct / len(test_loader.dataset), epoch)
def profile(dir_name='./runs/benchmark/', batch_size=args.batch_size):
for batch_idx, (train_x, train_y) in enumerate(train_loader): # 把取数据放在profile里会报错,所以放在外面
with profiler.profile() as prof:
with profiler.record_function("model_training"):
train_x = train_x.to('cuda:0')
train_y = train_y.to('cuda:1')
model.train()
optimizer.zero_grad()
loss = criterion(model(train_x), train_y)
loss.backward()
optimizer.step()
if use_gpu:
print(prof.key_averages | uns on | identifier_name |
PMP.py | , 1, 2), nn.ReLU(),
nn.MaxPool2d(2, 2))
self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5), nn.ReLU(),
nn.MaxPool2d(2, 2))
self.fc1 = nn.Sequential(nn.Linear(16 * 5 * 5, 120),
nn.BatchNorm1d(120), nn.ReLU())
self.fc2 = nn.Sequential(
nn.Linear(120, 84),
nn.BatchNorm1d(84),
nn.ReLU(),
nn.Linear(84, 10)) # 最后的结果一定要变为 10,因为数字的选项是 0 ~ 9
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
return x
class ModelParallelResNet50(ResNet):
def __init__(self, *args, **kwargs): # Bottleneck and [3, 4, 6, 3] refers to ResNet50
super(ModelParallelResNet50, self).__init__(
Bottleneck, [3, 4, 6, 3], num_classes=1000, *args, **kwargs)
self.conv = nn.Conv2d(1, 3, kernel_size=1) # add for MNIST
self.seq1 = nn.Sequential(
self.conv,
self.conv1,
self.bn1,
self.relu,
self.maxpool,
self.layer1,
self.layer2
).to('cuda:0')
self.seq2 = nn.Sequential(
self.layer3,
self.layer4,
self.avgpool,
).to('cuda:1')
self.fc.to('cuda:1')
def forward(self, x):
x = self.seq2(self.seq1(x).to('cuda:1'))
return self.fc(x.view(x.size(0), -1))
class PipelineParallelResNet50(ModelParallelResNet50):
def __init__(self, split_size=32, *args, **kwargs):
super(PipelineParallelResNet50, self).__init__(*args, **kwargs)
self.split_size = split_size
def forward(self, x):
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.seq1(s_next).to('cuda:1')
ret = []
for s_next in splits:
# A. s_prev runs on cuda:1
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
# B. s_next runs on cuda:0, which can run concurrently with A
s_prev = self.seq1(s_next).to('cuda:1')
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
return torch.cat(ret)
model = PipelineParallelResNet50()
if not args.no_tensorboard:
writer = SummaryWriter('./runs/PMP')
data_iter = iter(train_loader)
images, labels = data_iter.next()
images = images.cuda()
writer.add_graph(model, images)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
train_acc_i = []
train_acc_list = []
a = []
ac_list = []
def train(epoch): # 定义每个epoch的训练细节
model.train() # 设置为trainning模式
running_loss = 0.0
running_accuracy = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
if use_gpu: # 如果要调用GPU模式,就把数据转存到GPU
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target) # 把数据转换成Variable
optimizer.zero_grad() # 优化器梯度初始化为零
# For MP use 从cuda:0输入数据
output = model(data.to('cuda:0')) # 把数据输入网络并得到输出,即进行前向传播
train_output = torch.max(output, dim=1)[1]
# 从output的device输入target进行反向传播
target = target.to(output.device)
loss = criterion(output, target) # 计算损失函数
loss.backward() # 反向传播梯度
running_accuracy += torch.sum(torch.eq(target, train_output)).item() / target.cpu().numpy().size
running_loss += loss.item()
optimizer.step() # 结束一次前传+反传之后,更新优化器参数
if batch_idx % args.log_interval == 0: # 准备打印相关信息,args.log_interval是最开头设置的好了的参数
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), running_accuracy / args.log_interval))
# ...log the running loss
if not args.no_tensorboard:
writer.add_scalar('training loss',
running_loss / args.log_interval,
epoch * len(train_loader.dataset) + batch_idx * args.batch_size)
writer.add_scalar('training accuracy', running_accuracy / args.log_interval,
epoch * len(train_loader.dataset) + batch_idx * args.batch_size)
running_loss = 0.0
running_accuracy = 0.0
def test(epoch):
model.eval() # 设置为test模式
test_loss = 0 # 初始化测试损失值为0
correct = 0 # 初始化预测正确的数据个数为0
running_accuracy = 0.0
total = 0
count = 0
for data, target in test_loader:
if use_gpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data.to('cuda:0'))
target = target.to(output.device)
test_loss += criterion(output, target).item() # sum up batch loss 把所有loss值进行累加
test_output = torch.max(output, dim=1)[1] # get the index of the max log-probability
# pred = output.data.max(1, keepdim=True)[1]
correct += torch.sum(torch.eq(test_output, target)).item()
total += target.size(0)
count += 1
# correct += test_output.eq(target.data.view_as(test_output)).cpu().sum() # 对预测正确的数据个数进行累加
# running_accuracy += torch.sum(torch.eq(target, test_output)).item() / target.cpu().numpy().size
test_loss /= count # 因为把所有loss值进行过累加,所以最后要除以总得数据长度才得平均loss
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}% )\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset), ))
if not args.no_tensorboard:
writer.add_scalar('testing loss', test_loss, epoch)
writer.add_scalar('testing accuracy', 100. * correct / len(test_loader.dataset), epoch)
def profile(dir_name='./runs/benchmark/', batch_size=args.batch_size):
for batch_idx, (train_x, train_y) in enumerate(train_loader): # 把取数据放在profile里会报错,所以放在外面
with profiler.profile() as prof:
with profiler.record_function("model_training"):
train_x = train_x.to('cuda:0')
train_y = train_y.to('cuda:1')
model.train()
optimizer.zero_grad()
loss = criterion(model(train_x), train_y)
loss.backward()
optimizer.step()
if use_gpu:
print(prof.key_averages(group_by_input_shape=True).table(sort_by="cuda_time_total", row_limit=10))
prof.export_chrome_trace(
dir_name + "profiler/PMP_training_profiler_cuda_{}gpus_{}.json".format(torch.cuda.device_count(),
batch_size))
else:
print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10))
prof.export_chrome_trace(dir_name + "profiler/PMP_training_profiler_cpu.json")
for batch_idx, (test_x, test_y) in enumerate(test_loader):
with profiler.profile(use_cuda=use_gpu) as prof:
with profiler.record_function("model_inference"):
model.eval()
output = model(test_x.to('cuda:0'))
break
if use_gpu:
print(prof.key_averages(group_by_input_shape=True).table(sort_by="cuda_time_total", row_limit=10))
prof.export_chrome_trace(
dir_name + "profiler/PMP_inference_profiler_cuda_{}gpus_{}.json".format(torch.cuda.device_count(), | batch_size))
else: | random_line_split |
|
PMP.py | 224), torchvision.transforms.ToTensor()])),
# transform=transforms.Compose([
# transforms.Resize(224), # resnet默认图片输入大小224*224
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
root='./data',
train=False,
download=True,
transform=transforms.Compose([transforms.Resize(224), torchvision.transforms.ToTensor()])),
batch_size=args.batch_size, shuffle=False)
# test_x = torch.unsqueeze(test_dataset.data, dim=1).type(torch.Tensor)
# test_y = test_dataset.targets
class LeNet(nn.Module): # Using this module need to delete resize in dataloader
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(1, 6, 3, 1, 2), nn.ReLU(),
nn.MaxPool2d(2, 2))
self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5), nn.ReLU(),
nn.MaxPool2d(2, 2))
self.fc1 = nn.Sequential(nn.Linear(16 * 5 * 5, 120),
nn.BatchNorm1d(120), nn.ReLU())
self.fc2 = nn.Sequential(
nn.Linear(120, 84),
nn.BatchNorm1d(84),
nn.ReLU(),
nn.Linear(84, 10)) # 最后的结果一定要变为 10,因为数字的选项是 0 ~ 9
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
return x
class ModelParallelResNet50(ResNet):
def __init__(self, *args, **kwargs): # Bottleneck and [3, 4, 6, 3] refers to ResNet50
super(ModelParallelResNet50, self).__init__(
Bottleneck, [3, 4, 6, 3], num_classes=1000, *args, **kwargs)
self.conv = nn.Conv2d(1, 3, kernel_size=1) # add for MNIST
self.seq1 = nn.Sequential(
self.conv,
self.conv1,
self.bn1,
self.relu,
self.maxpool,
self.layer1,
self.layer2
).to('cuda:0')
self.seq2 = nn.Sequential(
self.layer3,
self.layer4,
self.avgpool,
).to('cuda:1')
self.fc.to('cuda:1')
def forward(self, x):
x = self.seq2(self.seq1(x).to('cuda:1'))
return self.fc(x.view(x.size(0), -1))
class PipelineParallelResNet50(ModelParallelResNet50):
def __init__(self, split_size=32, *args, **kwargs):
super(PipelineParallelResNet50, self).__init__(*args, **kwargs)
self.split_size = split_size
def forward(self, x):
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_prev = self.seq1(s_next).to('cuda:1')
ret = []
for s_next in splits:
# A. s_prev runs on cuda:1
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
# B. s_next runs on cuda:0, which can run concurrently with A
s_prev = self.seq1(s_next).to('cuda:1')
| /runs/PMP')
data_iter = iter(train_loader)
images, labels = data_iter.next()
images = images.cuda()
writer.add_graph(model, images)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
train_acc_i = []
train_acc_list = []
a = []
ac_list = []
def train(epoch): # 定义每个epoch的训练细节
model.train() # 设置为trainning模式
running_loss = 0.0
running_accuracy = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
if use_gpu: # 如果要调用GPU模式,就把数据转存到GPU
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target) # 把数据转换成Variable
optimizer.zero_grad() # 优化器梯度初始化为零
# For MP use 从cuda:0输入数据
output = model(data.to('cuda:0')) # 把数据输入网络并得到输出,即进行前向传播
train_output = torch.max(output, dim=1)[1]
# 从output的device输入target进行反向传播
target = target.to(output.device)
loss = criterion(output, target) # 计算损失函数
loss.backward() # 反向传播梯度
running_accuracy += torch.sum(torch.eq(target, train_output)).item() / target.cpu().numpy().size
running_loss += loss.item()
optimizer.step() # 结束一次前传+反传之后,更新优化器参数
if batch_idx % args.log_interval == 0: # 准备打印相关信息,args.log_interval是最开头设置的好了的参数
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), running_accuracy / args.log_interval))
# ...log the running loss
if not args.no_tensorboard:
writer.add_scalar('training loss',
running_loss / args.log_interval,
epoch * len(train_loader.dataset) + batch_idx * args.batch_size)
writer.add_scalar('training accuracy', running_accuracy / args.log_interval,
epoch * len(train_loader.dataset) + batch_idx * args.batch_size)
running_loss = 0.0
running_accuracy = 0.0
def test(epoch):
model.eval() # 设置为test模式
test_loss = 0 # 初始化测试损失值为0
correct = 0 # 初始化预测正确的数据个数为0
running_accuracy = 0.0
total = 0
count = 0
for data, target in test_loader:
if use_gpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data.to('cuda:0'))
target = target.to(output.device)
test_loss += criterion(output, target).item() # sum up batch loss 把所有loss值进行累加
test_output = torch.max(output, dim=1)[1] # get the index of the max log-probability
# pred = output.data.max(1, keepdim=True)[1]
correct += torch.sum(torch.eq(test_output, target)).item()
total += target.size(0)
count += 1
# correct += test_output.eq(target.data.view_as(test_output)).cpu().sum() # 对预测正确的数据个数进行累加
# running_accuracy += torch.sum(torch.eq(target, test_output)).item() / target.cpu().numpy().size
test_loss /= count # 因为把所有loss值进行过累加,所以最后要除以总得数据长度才得平均loss
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}% )\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset), ))
if not args.no_tensorboard:
writer.add_scalar('testing loss', test_loss, epoch)
writer.add_scalar('testing accuracy', 100. * correct / len(test_loader.dataset), epoch)
def profile(dir_name='./runs/benchmark/', batch_size=args.batch_size):
for batch_idx, (train_x, train_y) in enumerate(train_loader): # 把取数据放在profile里会报错,所以放在外面
with profiler.profile() as prof:
with profiler.record_function("model_training"):
train_x = train_x.to('cuda:0')
train_y = train_y.to('cuda:1')
model.train()
optimizer.zero_grad()
loss = criterion(model(train_x), train_y)
loss.backward()
optimizer.step()
if use_gpu:
print(prof.key_averages | s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
return torch.cat(ret)
model = PipelineParallelResNet50()
if not args.no_tensorboard:
writer = SummaryWriter('. | conditional_block |
PMP.py | 224), torchvision.transforms.ToTensor()])),
# transform=transforms.Compose([
# transforms.Resize(224), # resnet默认图片输入大小224*224
# transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
# ])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST(
root='./data',
train=False,
download=True,
transform=transforms.Compose([transforms.Resize(224), torchvision.transforms.ToTensor()])),
batch_size=args.batch_size, shuffle=False)
# test_x = torch.unsqueeze(test_dataset.data, dim=1).type(torch.Tensor)
# test_y = test_dataset.targets
class LeNet(nn.Module): # Using this module need to delete resize in dataloader
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Sequential(nn.Conv2d(1, 6, 3, 1, 2), nn.ReLU(),
nn.MaxPool2d(2, 2))
self.conv2 = nn.Sequential(nn.Conv2d(6, 16, 5), nn.ReLU(),
nn.MaxPool2d(2, 2))
self.fc1 = nn.Sequential(nn.Linear(16 * 5 * 5, 120),
nn.BatchNorm1d(120), nn.ReLU())
self.fc2 = nn.Sequential(
nn.Linear(120, 84),
nn.BatchNorm1d(84),
nn.ReLU(),
nn.Linear(84, 10)) # 最后的结果一定要变为 10,因为数字的选项是 0 ~ 9
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
return x
class ModelParallelResNet50(ResNet):
def __init__(self, *args, **kwargs): # Bottleneck and [3, 4, 6, 3] refers to ResNet50
super(ModelParallelResNet50, self).__init__(
Bottleneck, [3, 4, 6, 3], num_classes=1000, *args, **kwargs)
self.conv = nn.Conv2d(1, 3, kernel_size=1) # add for MNIST
self.seq1 = nn.Sequential(
self.conv,
self.conv1,
self.bn1,
self.relu,
self.maxpool,
self.layer1,
self.layer2
).to('cuda:0')
self.seq2 = nn.Sequential(
self.layer3,
self.layer4,
self.avgpool,
).to('cuda:1')
self.fc.to('cuda:1')
def forward(self, x):
x = self.seq2(self.seq1(x).to('cuda:1'))
return self.fc(x.view(x.size(0), -1))
class PipelineParallelResNet50(ModelParallelResNet50):
def __init__(self, split_size=32, *args, **kwargs):
super(PipelineParallelResNet50, self).__init__(*args, **kwargs)
self.split_size = split_size
def forward(self, x):
splits = iter(x.split(self.split_size, dim=0))
s_next = next(splits)
s_p | . s_prev runs on cuda:1
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
# B. s_next runs on cuda:0, which can run concurrently with A
s_prev = self.seq1(s_next).to('cuda:1')
s_prev = self.seq2(s_prev)
ret.append(self.fc(s_prev.view(s_prev.size(0), -1)))
return torch.cat(ret)
model = PipelineParallelResNet50()
if not args.no_tensorboard:
writer = SummaryWriter('./runs/PMP')
data_iter = iter(train_loader)
images, labels = data_iter.next()
images = images.cuda()
writer.add_graph(model, images)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
train_acc_i = []
train_acc_list = []
a = []
ac_list = []
def train(epoch): # 定义每个epoch的训练细节
model.train() # 设置为trainning模式
running_loss = 0.0
running_accuracy = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
if use_gpu: # 如果要调用GPU模式,就把数据转存到GPU
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target) # 把数据转换成Variable
optimizer.zero_grad() # 优化器梯度初始化为零
# For MP use 从cuda:0输入数据
output = model(data.to('cuda:0')) # 把数据输入网络并得到输出,即进行前向传播
train_output = torch.max(output, dim=1)[1]
# 从output的device输入target进行反向传播
target = target.to(output.device)
loss = criterion(output, target) # 计算损失函数
loss.backward() # 反向传播梯度
running_accuracy += torch.sum(torch.eq(target, train_output)).item() / target.cpu().numpy().size
running_loss += loss.item()
optimizer.step() # 结束一次前传+反传之后,更新优化器参数
if batch_idx % args.log_interval == 0: # 准备打印相关信息,args.log_interval是最开头设置的好了的参数
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\t Accuracy: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item(), running_accuracy / args.log_interval))
# ...log the running loss
if not args.no_tensorboard:
writer.add_scalar('training loss',
running_loss / args.log_interval,
epoch * len(train_loader.dataset) + batch_idx * args.batch_size)
writer.add_scalar('training accuracy', running_accuracy / args.log_interval,
epoch * len(train_loader.dataset) + batch_idx * args.batch_size)
running_loss = 0.0
running_accuracy = 0.0
def test(epoch):
model.eval() # 设置为test模式
test_loss = 0 # 初始化测试损失值为0
correct = 0 # 初始化预测正确的数据个数为0
running_accuracy = 0.0
total = 0
count = 0
for data, target in test_loader:
if use_gpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data.to('cuda:0'))
target = target.to(output.device)
test_loss += criterion(output, target).item() # sum up batch loss 把所有loss值进行累加
test_output = torch.max(output, dim=1)[1] # get the index of the max log-probability
# pred = output.data.max(1, keepdim=True)[1]
correct += torch.sum(torch.eq(test_output, target)).item()
total += target.size(0)
count += 1
# correct += test_output.eq(target.data.view_as(test_output)).cpu().sum() # 对预测正确的数据个数进行累加
# running_accuracy += torch.sum(torch.eq(target, test_output)).item() / target.cpu().numpy().size
test_loss /= count # 因为把所有loss值进行过累加,所以最后要除以总得数据长度才得平均loss
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}% )\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset), ))
if not args.no_tensorboard:
writer.add_scalar('testing loss', test_loss, epoch)
writer.add_scalar('testing accuracy', 100. * correct / len(test_loader.dataset), epoch)
def profile(dir_name='./runs/benchmark/', batch_size=args.batch_size):
for batch_idx, (train_x, train_y) in enumerate(train_loader): # 把取数据放在profile里会报错,所以放在外面
with profiler.profile() as prof:
with profiler.record_function("model_training"):
train_x = train_x.to('cuda:0')
train_y = train_y.to('cuda:1')
model.train()
optimizer.zero_grad()
loss = criterion(model(train_x), train_y)
loss.backward()
optimizer.step()
if use_gpu:
print(prof.key_averages(group | rev = self.seq1(s_next).to('cuda:1')
ret = []
for s_next in splits:
# A | identifier_body |
box.py | .un_set_glues)
@property
def natural_length(self):
# The natural width, x, of the box contents is determined by adding up
# the widths of the boxes and kerns inside, together with the natural
# widths of all the glue inside.
# I'm assuming this also applies to VBoxes, but adding heights instead
# of widths. Might not be true, considering depths exist.
w = 0
for item in self.contents:
if isinstance(item, Glue):
w += item.natural_length
elif isinstance(item, Kern):
w += item.length
else:
w += self.get_length(item)
return w
@property
def min_length(self):
"""
Non-Knuthian concept, used to decide if a box is over-full: the length
even if all glue is maximally shrunk.
"""
w = 0
for item in self.contents:
if isinstance(item, Glue):
w += item.min_length
elif isinstance(item, Kern):
w += item.length
else:
w += self.get_length(item)
return w
@property
def is_over_full(self):
return self.min_length > self.desired_length
@property
def desired_length(self):
if self.to is not None:
return self.to
w = self.natural_length
if self.spread is not None:
w += self.spread
return w
def append(self, *args, **kwargs):
self.contents.append(*args, **kwargs)
def extend(self, *args, **kwargs):
self.contents.extend(*args, **kwargs)
def copy(self, *args, **kwargs):
# If glue is set, need to tell the constructor that set_glue should be
# True, but that the glue is already set.
if self.set_glue:
raise NotImplementedError('Can only copy un-set boxes at the '
'moment, because that is all that is '
'needed')
return self.__class__(contents=self.contents[:],
to=self.to, spread=self.spread, set_glue=False)
def glue_set_ratio(self):
return glue_set_ratio(self.natural_length, self.desired_length,
tuple(self.stretch), tuple(self.shrink))
def scale_and_set(self):
line_state, glue_ratio, glue_set_order = self.glue_set_ratio()
# I undo the disobeyance I did in the glue set ratio logic, to align
# with the TeXbook from now on.
if glue_ratio in (GlueRatio.no_shrinkability,
GlueRatio.no_stretchability):
glue_ratio = 0.0
# Note I've quoted this from the TeXbook, talking about setting glue in
# an H Box. But it later says that this all applies to V Boxes, so I've
# changed 'width' to 'length'.
# Every glob of glue in the list being boxed is modified. Suppose the
# glue has natural length u, stretchability y, and shrinkability z,
# where y is a jth order infinity and z is a kth order infinity.
for i, item in enumerate(self.contents):
if (not isinstance(item, Glue)) or item.is_set:
continue
g = item
if line_state == LineState.naturally_good:
glue_diff = 0
elif line_state == LineState.should_stretch:
glue_order, glue_factor = extract_dimen(g.stretch)
# [Each] glue takes the new length u + ry if j=i;
# it keeps its natural length u if j != i.
if glue_order == glue_set_order:
glue_diff = glue_ratio * glue_factor
else:
glue_diff = 0
elif line_state == LineState.should_shrink:
glue_order, glue_factor = extract_dimen(g.shrink)
# [Each] glue takes the new length u-rz if k = i; it
# keeps its natural length u if k != i.
if glue_order == glue_set_order:
glue_diff = -glue_ratio * glue_factor
else:
glue_diff = 0
else:
raise ValueError(f'Unknown line state: {line_state}')
# Notice that stretching or shrinking occurs only when the glue
# has the highest order of infinity that doesn't cancel out.
self.contents[i].set(round(g.natural_length + glue_diff))
self.set_glue = True
def badness(self):
"""
Compute how bad this box would look if placed on a line. This is
high if the line is much shorter or longer than the page width.
"""
# Page 97 of TeXbook.
# "The badness of a line is an integer that is approximately 100 times
# the cube of the ratio by which the glue inside the line must stretch
# or shrink to make an hbox of the required size. For example, if the
# line has a total shrinkability of 10 points, and if the glue is being
# compressed by a total of 9 points, the badness is computed to be 73
# (since 100 * (9/10)^3 = 72.9); similarly, a line that stretches by
# twice its total stretchability has a badness of 800. But if the
# badness obtained by this method turns out to be more than 10000, the
# value 10000 is used. (See the discussion of glue set ratio and glue
# set order in Chapter 12; if i != 0, there is infinite stretchability
# or shrinkability, so the badness is zero, otherwise the badness is
# approximately min(100r^3, 10000).) Overfull boxes are considered to
# be infinitely bad; they are avoided whenever possible."
# Page 111 of TeXbook.
# "Vertical badness is computed by the same rules as horizontal
# badness; it is an integer between 0 and 10000, inclusive, except when
# the box is overfull, when it is infinity."
if self.is_over_full:
return math.inf
line_state, glue_ratio, glue_order = self.glue_set_ratio()
if glue_order > 0:
return 0
# I can't find this stated anywhere, but it seems intuitively correct:
# a single word on a line has no flexibility, but it is probably bad.
elif glue_ratio in (GlueRatio.no_stretchability,
GlueRatio.no_shrinkability):
return 10000
else:
return min(round(100 * glue_ratio ** 3), 10000)
class HBox(AbstractBox):
def get_length(self, item):
if isinstance(item, (Glue, Kern)):
return item.length
else:
return item.width
@property
def widths(self):
return [self.get_length(e) for e in self.contents]
@property
def heights(self):
return [0 if isinstance(e, (Glue, Kern)) else e.height
for e in self.contents]
@property
def depths(self):
return [0 if isinstance(e, (Glue, Kern)) else e.depth
for e in self.contents]
@property
def width(self):
if not self.set_glue:
raise AttributeError('HBox is not set yet, does not have a width')
return self.desired_length
# TODO: I'm not sure the height and depth definitions are correct.
@property
def height(self):
return max(self.heights, default=0)
@property
def depth(self):
return max(self.depths, default=0)
def demerit(self, break_item, line_penalty):
ten_k = 10000
el = line_penalty
b = self.badness()
p = get_penalty(self.contents, break_item)
d = (el + b)**2
if 0 <= p < ten_k:
d += p**2
elif -ten_k < p < 0:
d -= p**2
elif p <= -ten_k:
pass
else:
raise LogicError('Undefined condition state when computing '
'demerit')
return d
def considerable_as_line(self, tolerance, break_item):
return (get_penalty(self.contents, break_item) < 10000
and (self.badness() <= tolerance))
class VBox(AbstractBox):
def get_length(self, item):
if isinstance(item, (Glue, Kern)):
return item.length
else:
return item.height
@property
def widths(self):
return [0 if isinstance(e, (Glue, Kern)) else e.width
for e in self.contents]
@property
def heights(self):
return [self.get_length(e) for e in self.contents]
@property
def depths(self):
return [0 if isinstance(e, (Glue, Kern)) else e.width
for e in self.contents]
@property
def width(self):
| return max(self.widths) | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.