content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import math
import random
def print_n_whitespaces(n: int):
print(" " * n, end="")
def print_n_newlines(n: int):
for _ in range(n):
print()
def subroutine_1610():
B = 3 / A * random.random()
if B < 0.37:
C = 0.5
elif B < 0.5:
C = 0.4
elif B < 0.63:
C = 0.3
elif B < 0.87:
C = 0.2
else:
C = 0.1
T = math.floor(10 * C + 0.2)
print(f"THE {AS}{BS} DID A {LS[T]} JOB.")
if T >= 4:
if T == 5:
# 1800 & 1810 are unreachable, so it's not presented here
K = random.randint(1, 2)
if K == 1:
print(f"ONE OF THE {AS}{BS} WAS KILLED.")
elif K == 2:
print(f"NO {AS}{BS} WERE KILLED.")
else:
if AS != "TOREAD":
K = random.randint(1, 2)
print(f"{K} OF THE HORSES OF THE {AS}{BS} KILLED.")
K = random.randint(1, 2)
print(f"{K} OF THE {AS}{BS} KILLED.")
print()
return C
def FNC():
Q = (
4.5 + L / 6 - (D[1] + D[2]) * 2.5 + 4 * D[4] + 2 * D[5] - (D[3] ** 2) / 120 - A
) * random.random()
return Q
print_n_whitespaces(34)
print("BULL")
print_n_whitespaces(15)
print("CREATIVE COMPUTING MORRISTOWN, NEW JERSEY")
print_n_newlines(2)
L = 1
Z = input("DO YOU WANT INSTRUCTIONS? ")
if Z != "NO":
print("HELLO, ALL YOU BLOODLOVERS AND AFICIONADOS.")
print("HERE IS YOUR BIG CHANCE TO KILL A BULL.")
print()
print("ON EACH PASS OF THE BULL, YOU MAY TRY")
print("0 - VERONICA (DANGEROUS INSIDE MOVE OF THE CAPE)")
print("1 - LESS DANGEROUS OUTSIDE MOVE OF THE CAPE")
print("2 - ORDINARY SWIRL OF THE CAPE.")
print()
print("INSTEAD OF THE ABOVE, YOU MAY TRY TO KILL THE BULL")
print("ON ANY TURN: 4 (OVER THE HORNS), 5 (IN THE CHEST).")
print("BUT IF I WERE YOU,")
print("I WOULDN'T TRY IT BEFORE THE SEVENTH PASS.")
print()
print("THE CROWD WILL DETERMINE WHAT AWARD YOU DESERVE")
print("(POSTHUMOUSLY IF NECESSARY).")
print("THE BRAVER YOU ARE, THE BETTER THE AWARD YOU RECEIVE.")
print()
print("THE BETTER THE JOB THE PICADORES AND TOREADORES DO,")
print("THE BETTER YOUR CHANCES ARE.")
print_n_newlines(2)
D = {}
D[5] = 1
D[4] = 1
LS = ["", "SUPERB", "GOOD", "FAIR", "POOR", "AWFUL"]
A = random.randint(1, 5)
print(f"YOU HAVE DRAWN A {LS[A]} BULL.")
if A > 4:
print("YOU'RE LUCKY.")
elif A < 2:
print("GOOD LUCK. YOU'LL NEED IT.")
print()
print()
AS = "PICADO"
BS = "RES"
C = subroutine_1610()
D[1] = C
AS = "TOREAD"
BS = "ORES"
subroutine_1610()
D[2] = C
print_n_newlines(2)
D[3] = 0
while True:
D[3] = D[3] + 1 # 660
print(f"PASS NUMBER {D[3]}")
if D[3] >= 3:
while True: # 1930
AS = input("HERE COMES THE BULL. TRY FOR A KILL? ")
if AS not in ["YES", "NO"]:
print("INCORRECT ANSWER - - PLEASE TYPE 'YES' OR 'NO'.")
else:
break
Z1 = 1 if AS == "YES" else 2
if Z1 != 1:
print("CAPE MOVE? ", end="")
else:
pass
# goto 1130
else:
print("THE BULL IS CHARGING AT YOU! YOU ARE THE MATADOR--")
while True: # 1930
AS = input("DO YOU WANT TO KILL THE BULL? ")
if AS not in ["YES", "NO"]:
print("INCORRECT ANSWER - - PLEASE TYPE 'YES' OR 'NO'.")
else:
break
Z1 = 1 if AS == "YES" else 2
if Z1 != 1:
print("WHAT MOVE DO YOU MAKE WITH THE CAPE? ", end="")
else:
# goto 1130
pass
gore = 0
if Z1 != 1: # NO
while True:
E = float(input())
if E != float(int(abs(E))):
print("DON'T PANIC, YOU IDIOT! PUT DOWN A CORRECT NUMBER")
elif E < 3:
break
if E == 0:
M = 3
elif E == 1:
M = 2
else:
M = 0.5
L = L + M
F = (6 - A + M / 10) * random.random() / ((D[1] + D[2] + D[3] / 10) * 5)
if F < 0.51:
continue
gore = 1
else: # YES
print()
print("IT IS THE MOMENT OF TRUTH.")
print()
H = int(input("HOW DO YOU TRY TO KILL THE BULL? "))
if H not in [4, 5]:
print("YOU PANICKED. THE BULL GORED YOU.")
gore = 2
# goto 970
else:
K = (6 - A) * 10 * random.random() / ((D[1] + D[2]) * 5 * D[3])
if H == 4:
if K > 0.8:
gore = 1
else:
if K > 0.2:
gore = 1
if gore == 0:
print("YOU KILLED THE BULL!")
D[5] = 2
break
if gore > 0:
if gore == 1:
print("THE BULL HAS GORED YOU!")
death = False
while True:
_ = random.randint(1, 2) # 970
if _ == 1:
print("YOU ARE DEAD.")
D[4] = 1.5
# goto 1320
death = True
break
else:
print("YOU ARE STILL ALIVE.")
print()
print("DO YOU RUN FROM THE RING? ", end="")
while True: # 1930
AS = input()
if AS not in ["YES", "NO"]:
print("INCORRECT ANSWER - - PLEASE TYPE 'YES' OR 'NO'.")
else:
break
Z1 = 1 if AS == "YES" else 2
if Z1 == 2:
print("YOU ARE BRAVE. STUPID, BUT BRAVE.")
_ = random.randint(1, 2)
if _ == 1:
D[4] = 2
# goto 660, outter while loop
death = True
break
else:
print("YOU ARE GORED AGAIN!")
# goto 970
else:
print("COWARD")
D[4] = 0
# goto 1310, break outter while loop
death = True
break
if death == True:
break
# 1310
print_n_newlines(3)
if D[4] == 0:
print("THE CROWD BOOS FOR TEN MINUTES. IF YOU EVER DARE TO SHOW")
print("YOUR FACE IN A RING AGAIN, THEY SWEAR THEY WILL KILL YOU--")
print("UNLESS THE BULL DOES FIRST.")
else:
if D[4] == 2:
print("THE CROWD CHEERS WILDLY!")
elif D[5] == 2:
print("THE CROWD CHEERS!")
print()
print("THE CROWD AWARDS YOU")
if FNC() < 2.4:
print("NOTHING AT ALL.")
elif FNC() < 4.9:
print("ONE EAR OF THE BULL.")
elif FNC() < 7.4:
print("BOTH EARS OF THE BULL!")
print("OLE!")
else:
print("OLE! YOU ARE 'MUY HOMBRE'!! OLE! OLE!")
print()
print("ADIOS")
print_n_newlines(3)
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by Chouayakh Mahdi
25/06/2010
The package contains functions to analyse all sentence of a utterance
Functions:
dispatching : to distribute the sentence
separ_sentence : to process the beginning of the sentence
exclama_sentence : to process exclamatively sentence
w_quest_where : to process many different type of where question
w_quest_class : to process what question about classification
w_quest_what : to process many different type of what question
w_quest_quant : to process many different type of how question
w_quest_how : to process many different type of how question
w_quest_which : to process which question
stc_start_subsentence : to process the subsentence at the beginning of the sentence
w_quest_whose : to process many different type of whose question
w_quest_whom : to process whom question
y_n_ques : to process the yes or no question from of a sentence
other_sentence : to process the other from of a sentence
sentences_analyzer : is the basic function of parsing
"""
from dialogs.sentence import *
from dialogs.sentence_types import *
from dialogs.resources_manager import ResourcePool
from . import analyse_nominal_group
from . import analyse_nominal_structure
from . import analyse_verb
from . import analyse_verbal_structure
from . import other_functions
from . import preprocessing
def dispatching(sentence):
"""
distributes the sentence according to:
Their functionality and their type
Input=sentence, beginning sentence list Output=class Sentence
"""
if len(sentence) > 0:
#For ending dialogue
if sentence[0].endswith('bye'):
return [Sentence(END, '', [], [])]
#When others
for x in ResourcePool().sentence_starts:
#If we find a knowing case
if sentence[0] == x[0]:
#For
if x[1] == '1':
return [Sentence(START, '', [], [])]
#It's a w_question or subsentence
if x[1] == '2':
#If there is which or no nominal group it is a question
if sentence[0] != 'which' and analyse_nominal_group.find_sn_pos(sentence, 1) != []:
#Here we have the condition of the subsentences
return [stc_start_subsentence(sentence)]
#For 'when'
if x[2] == '1':
#If we remove the first word => it becomes like y_n_question
return [y_n_ques(W_QUESTION, 'date', sentence[1:])]
#For 'where'
elif x[2] == '2':
return [w_quest_where(W_QUESTION, 'place', sentence)]
#For 'what'
elif x[2] == '3':
#Here we have to use a specific processing for 'type' and 'kind'
if sentence[1] == 'type' or sentence[1] == 'kind':
#We start by processing the end of the sentence like a y_n_question
return [w_quest_class(sentence)]
#For other type of 'what' question
else:
return [w_quest_what(W_QUESTION, sentence)]
#For 'how'
elif x[2] == '4':
if sentence[1] == 'many' or sentence[1] == 'much':
return [w_quest_quant(W_QUESTION, 'quantity', sentence)]
elif sentence[1] == 'about':
#We replace 'about' by 'is' to have a y_n_question
sentence[1] = 'is'
return [y_n_ques(W_QUESTION, 'invitation', sentence[1:])]
#For other type of 'how' question
else:
return [w_quest_how(W_QUESTION, sentence)]
#For 'why'
elif x[2] == '5':
return [y_n_ques(W_QUESTION, 'reason', sentence[1:])]
#For 'whose'
elif x[2] == '6':
return [w_quest_whose(W_QUESTION, 'owner', sentence)]
#For 'who'
elif x[2] == '7':
return [y_n_ques(W_QUESTION, 'people', sentence[1:])]
#For 'which'
elif x[2] == '8':
return [w_quest_which(W_QUESTION, 'choice', sentence[1:])]
#For 'to whom'
elif x[2] == '9':
return [w_quest_whom(W_QUESTION, 'people', sentence[1:])]
#It's a y_n_question
elif x[1] == '3':
return [y_n_ques(YES_NO_QUESTION, '', sentence)]
#It's a conditional sentence
elif x[1] == '4':
return [stc_start_subsentence(sentence)]
#Agree
elif x[1] == '5':
return separ_sentence(sentence, AGREEMENT)
#Disagree
elif x[1] == '6':
return separ_sentence(sentence, DISAGREEMENT)
#Gratulation
elif x[1] == '7':
return separ_sentence(sentence, GRATULATION)
#Interjunction
elif x[1] == '8':
return [exclama_sentence(sentence)]
#For exclamatively
if sentence[len(sentence) - 1] == '!':
return [exclama_sentence(sentence)]
#It's a statement or an imperative sentence
return [other_sentence('', '', sentence)]
#Default case
return []
def separ_sentence(sentence, data_type):
"""
process the beginning of the sentence
Input=the sentence Output=class Sentence
"""
#If we have good followed by another word it can be start
if data_type == AGREEMENT and len(sentence) > 1 and (
sentence[1] == 'morning' or sentence[1] == 'evening' or sentence[1] == 'afternoon'):
sentences = [Sentence(START, '', [], [])]
else:
#init
sentences = [Sentence(data_type, '', [], [])]
for i in sentence:
if i == ';':
#We put the first sentence in the aim
sentences[0].aim = " ".join(sentence[:sentence.index(i)]).rstrip('; ') + '.'
sentence = sentence[sentence.index(i) + 1:]
#We process the end of the sentence as a complete sentence
sentence = preprocessing.process_and_beginning_sentence(sentence)
sentences = sentences + dispatching(sentence)
break
else:
#In this case, it is the end of the sentence
sentences[0].aim = " ".join(sentence).rstrip('. ') + '.'
return sentences
def exclama_sentence(sentence):
"""
process exclamatively sentence
Input=the sentence Output=class Sentence
"""
for i in ResourcePool().sentence_starts:
if i[0] == sentence[0]:
if i[1] == '0':
analysis = Sentence(INTERJECTION, '', [], [])
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 1)
return analysis
elif i[1] == '2':
#It is an exclamation sentence
analysis = Sentence(EXCLAMATION, '', [], [])
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
return analysis
#If we have an imperative it can be forced
analysis = other_sentence(INTERJECTION, '', sentence)
if analysis.data_type == INTERJECTION and not analysis.sv:
pass
else:
analysis.data_type = IMPERATIVE
return analysis
def w_quest_where(type, request, stc):
"""
process many different type of where question
Input=type and requesting of sentence, the sentence Output=class Sentence
"""
#If there is 'form' at the end => question about the origin
if stc[len(stc) - 1] == 'from' or (stc[len(stc) - 1] == '?' and stc[len(stc) - 2] == 'from'):
#If we remove the first word => it becomes like y_n_question
return y_n_ques(type, 'origin', stc[1:])
else:
#If we remove the first word => it becomes like y_n_question
return y_n_ques(type, request, stc[1:])
def w_quest_class(sentence):
"""
process what question about classification
Input=sentence Output=class Sentence
"""
analysis = y_n_ques(W_QUESTION, 'classification' + '+' + sentence[4], sentence[5:])
if analysis.sn:
#The direct object must be empty
if analysis.sv[0].d_obj:
analysis.sv[0].i_cmpl = analysis.sv[0].i_cmpl + [IndirectComplement([], analysis.sv[0].d_obj)]
analysis.sv[0].d_obj = []
return analysis
def w_quest_what(type, sentence):
"""
process many different type of what question
Input=type of sentence, the sentence and position of subject
Output=class Sentence
"""
aux_list = other_functions.recover_aux_list()
if sentence[1] in aux_list:
#We start with a processing with the function of y_n_question's case
analysis = y_n_ques(type, 'thing', sentence[1:])
vg = analysis.sv[0]
#The case when we have 'happen'
if analysis.sv[0].vrb_main[0].endswith('happen'):
analysis.aim = 'situation'
#The case when we have 'think'
elif analysis.sv[0].vrb_main[0].endswith('think+of') or analysis.sv[0].vrb_main[0].endswith('think+about'):
analysis.aim = 'opinion'
#The case when we have 'like' + conditional
elif analysis.sv[0].vrb_main[0].endswith('like') and not (analysis.sv[0].vrb_tense.endswith('conditional')):
analysis.aim = 'description'
#The case when we have 'do' + ing form
elif vg.vrb_main[0].endswith('do') and \
vg.i_cmpl != [] and \
vg.i_cmpl[0].gn[0].adj != [] and \
vg.i_cmpl[0].gn[0].adj[0][0].endswith('ing'):
analysis.aim = 'explication'
#There is a noun before the auxiliary
else:
#We will use the same code as the which questions
sentence = ['the'] + sentence[1:]
#We need to have a nominal group at the beginning
analysis = w_quest_which(type, 'thing', sentence)
return analysis
def w_quest_quant(type, request, sentence):
"""
process many different type of quantity question
Input=type and requesting of sentence, the sentence and beginning sentence list
Output=class Sentence
"""
for j in ResourcePool().sentence_starts:
if sentence[2] == j[0]:
if j[1] == '3':
#This case is the same with y_n_question
return y_n_ques(type, request, sentence[2:])
analysis = y_n_ques(type, request, sentence[3:])
#There is not sn in the sentence
if not analysis.sn:
analysis.sn = [NominalGroup(['a'], [sentence[2]], [], [], [])]
else:
#There is not direct object in the sentence
analysis.sv[0].d_obj = [NominalGroup(['a'], [sentence[2]], [], [], [])]
return analysis
def w_quest_how(type, sentence):
"""
process many different type of how question
Input=type of sentence, the sentence Output=class Sentence
"""
aux_list = other_functions.recover_aux_list()
if sentence[1] in aux_list:
analysis = y_n_ques(type, 'manner', sentence[1:])
#The case when we have 'do' + ing form
if analysis.sv[0].vrb_main[0].endswith('like'):
analysis.aim = 'opinion'
return analysis
analysis = y_n_ques(type, sentence[1], sentence[2:])
return analysis
def w_quest_which(type, request, sentence):
"""
process which question
Input=type of sentence, the sentence Output=class Sentence
"""
#We start by finding the nominal group
gr = preprocessing.determination_nominal_group(sentence, 0, 'of')
#If the nominal group contain just 2 elements
if len(gr) == 2:
return y_n_ques(type, sentence[1], sentence[2:])
else:
#After the first gr if there is no nominal group
if not analyse_nominal_group.find_sn_pos(sentence, len(gr)):
for i in ResourcePool().sentence_starts:
#If just after we have an a auxiliary
if sentence[len(gr)] == i[0] and i[1] == '3':
#With subject => it is a yes or no question form
if analyse_nominal_group.find_sn_pos(sentence, len(gr) + 1):
analysis = y_n_ques(type, request, sentence[len(gr):])
nominal_gr = other_sentence(type, request, gr)
analysis.sv[0].d_obj = nominal_gr.sn
return analysis
#Else it is like a statement
return other_sentence(type, request, sentence)
#Else if not, the first nominal group is the subject
else:
analysis = other_sentence(type, request, sentence[len(gr):])
nominal_gr = other_sentence(type, request, gr)
analysis.sv[0].d_obj = nominal_gr.sn
return analysis
def stc_start_subsentence(sentence):
"""
process the subsentence at the beginning of the sentence
Input=sentence Output=class Sentence
"""
#We have to add punctuation if there is not
if sentence[len(sentence) - 1] != '.' and sentence[len(sentence) - 1] != '?' and sentence[len(sentence) - 1] != '!':
sentence = sentence + ['.']
#We recover the subsentence
for i in sentence:
if i == ';' or i == '.' or i == '?' or i == '!':
subsentence = sentence[1:sentence.index(i)]
#We perform the 2 processing
if sentence.index(i) != len(sentence) - 1:
analysis = other_sentence(STATEMENT, '', sentence[sentence.index(i) + 1:])
else:
vg = VerbalGroup([], [], '', [], [], [], [], VerbalGroup.affirmative, [])
analysis = Sentence('', '', [], [vg])
break
#We process the subsentence
analysis.sv[0].vrb_sub_sentence = analysis.sv[0].vrb_sub_sentence + dispatching(subsentence)
if analysis.sv[0].vrb_sub_sentence:
analysis.sv[0].vrb_sub_sentence[len(analysis.sv[0].vrb_sub_sentence) - 1].data_type = SUBSENTENCE + '+' + \
analysis.sv[
0].vrb_sub_sentence[
len(analysis.sv[
0].vrb_sub_sentence) - 1].data_type
if sentence[0][0] == ':':
analysis.sv[0].vrb_sub_sentence[len(analysis.sv[0].vrb_sub_sentence) - 1].aim = sentence[0][1:]
else:
analysis.sv[0].vrb_sub_sentence[len(analysis.sv[0].vrb_sub_sentence) - 1].aim = sentence[0]
return analysis
def w_quest_whose(type, request, sentence):
"""
process many different type of whose question
Input=type and requesting of sentence and the sentence
Output=class Sentence
"""
#init
vg = VerbalGroup(['be'], [], '', [], [], [], [], VerbalGroup.affirmative, [])
analysis = Sentence(type, request, [], [])
#We replace 'whose' by 'that' to have a nominal group
sentence[0] = 'that'
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
if sentence[1] == 'not':
vg.state = 'negative'
analysis.sv = [vg]
return analysis
def w_quest_whom(type, request, sentence):
"""
process whom question
Input=type and requesting of sentence and the sentence
Output=class Sentence
"""
#It is the same with yes or no question
analysis = y_n_ques(type, request, sentence)
#We have to add 'to' to the verb
analysis.sv[0].vrb_main[0] += '+to'
return analysis
def y_n_ques(type, request, sentence):
"""
process the yes or no question from of a sentence
Input=type and requesting of sentence and the sentence
Output=class Sentence
"""
#init
vg = VerbalGroup([], [], '', [], [], [], [], VerbalGroup.affirmative, [])
analysis = Sentence(type, request, [], [])
modal = []
stc = sentence
#We start with determination of probably second verb in subsentence
sentence = other_functions.find_scd_verb_sub(sentence)
#We have to add punctuation if there is not
if sentence == [] or sentence[0] == '.' or sentence[0] == '?' or sentence[0] == '!':
#We have probably the aim as an adverb
analyse_verbal_structure.find_adv([request], vg)
analysis.aim = 'thing'
analysis.sv = [vg]
return analysis
#We recover the auxiliary
aux = sentence[0]
#We have to know if there is a modal
if aux in ResourcePool().modal:
modal = aux
#If we have a negative form
if sentence[1] == 'not':
vg.state = VerbalGroup.negative
#We remove 'not'
sentence = sentence[:1] + sentence[2:]
#Wrong is a noun but not followed by the determinant
if sentence[1] == 'wrong' and request == 'thing':
analysis.sn = [NominalGroup([], [], ['wrong'], [], [])]
sentence = [sentence[0]] + sentence[2:]
#In this case we have an imperative sentence
elif analyse_nominal_group.find_sn_pos(sentence, 1) == [] and type != W_QUESTION:
#We have to reput the 'not'
if vg.state == VerbalGroup.negative:
sentence = sentence[:1] + ['not'] + sentence[1:]
return other_sentence(type, request, sentence)
#We delete the auxiliary
sentence = sentence[1:]
#We have to separate the case using these, this or there
if sentence[0] in ResourcePool().demonstrative_det and analyse_verb.infinitive([aux], 'present simple') == ['be']:
#If we have a verb or an adverb just after (if not, we have a noun)
if sentence[0].endswith('ed') or sentence[0].endswith('ing') or sentence[0].endswith('ly') or sentence[
0] in ResourcePool().adverbs:
#We recover this information and remove it
analysis.sn = [NominalGroup([sentence[0]], [], [], [], [])]
if sentence[0] == 'there' and aux == 'are':
analysis.sn[0]._quantifier = 'SOME'
sentence = sentence[1:]
if not analysis.sn:
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
if aux == 'do' and not analyse_verbal_structure.can_be_imperative(sentence):
return other_sentence('', '', stc)
#If there is one element => it is an auxiliary => verb 'be'
if len(sentence) == 0:
vg.vrb_tense = analyse_verb.find_tense_statement(aux)
vg.vrb_main = ['be']
else:
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.vrb_tense = analyse_verb.find_tense_question(sentence, aux)
#We process the verb
verb = analyse_verb.find_verb_question(sentence, aux, vg.vrb_tense)
verb_main = analyse_verb.return_verb(sentence, verb, vg.vrb_tense)
vg.vrb_main = [other_functions.convert_to_string(verb_main)]
#We delete the verb if the aux is not the verb 'be'
if vg.vrb_main != ['be']:
sentence = sentence[sentence.index(verb[0]) + len(verb_main):]
elif sentence[0] == 'be':
sentence = sentence[1:]
#Here we have special processing for different cases
if sentence:
#For 'what' descrition case
if sentence[0] == 'like' and aux != 'would':
vg.vrb_main = ['like']
sentence = sentence[1:]
#For 'how' questions with often
elif sentence[0].endswith('ing') and not (sentence[0].endswith('thing')):
vg.vrb_main[0] = vg.vrb_main[0] + '+' + sentence[0]
sentence = sentence[1:]
#We recover the conjunctive subsentence
sentence = analyse_verbal_structure.process_conjunctive_sub(sentence, vg)
#It verifies if there is a secondary verb
sec_vrb = analyse_verbal_structure.find_scd_vrb(sentence)
if sec_vrb:
sentence = analyse_verbal_structure.process_scd_sentence(sentence, vg, sec_vrb)
#We recover the subsentence
sentence = analyse_verbal_structure.process_subsentence(sentence, vg)
#Process relative changes
sentence = analyse_verbal_structure.correct_i_compl(sentence, vg.vrb_main[0])
sentence = analyse_verbal_structure.process_compare(sentence, vg)
sentence = analyse_nominal_group.find_plural(sentence)
#We recover the direct, indirect complement and the adverbial
sentence = analyse_verbal_structure.recover_obj_iobj(sentence, vg)
#We have to take off adverbs form the sentence
sentence = analyse_verbal_structure.find_adv(sentence, vg)
#We perform the processing with the modal
if modal:
vg.vrb_main = [modal + '+' + vg.vrb_main[0]]
#If there is a forgotten
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
#In case there is a state verb followed by an adjective
sentence = analyse_verbal_structure.state_adjective(sentence, vg)
#We have to correct the mistake of the subject
for p in ResourcePool().demonstrative_det:
if analysis.sn and analysis.sn[0].det == [p] and analysis.sn[0].noun == []:
if sentence != [0] and sentence[0] == '.' and sentence[0] == '?' and sentence[0] == '!':
if sentence[0] in ResourcePool().proposals:
pass
else:
analysis.sn[0].noun = [sentence[0]]
sentence = sentence[1:]
sentence = analyse_verbal_structure.state_adjective(sentence, vg)
vg = analyse_verbal_structure.DOC_to_IOC(vg)
while len(sentence) > 1:
stc = analyse_verbal_structure.create_nom_gr(sentence, request)
#We recover the direct, indirect complement and the adverbial
stc = analyse_verbal_structure.recover_obj_iobj(stc, vg)
if stc == sentence:
#We leave the loop
break
else:
sentence = stc
vg = analyse_verbal_structure.refine_indirect_complement(vg)
vg = analyse_verbal_structure.refine_subsentence(vg)
vg = analyse_verbal_structure.DOC_to_IOC(vg)
analysis.sv = [vg]
return analysis
def other_sentence(type, request, sentence):
"""
process the other from of a sentence
Input=type and requesting of sentence and the sentence
Output=class Sentence
"""
#init
vg = VerbalGroup([], [], '', [], [], [], [], VerbalGroup.affirmative, [])
analysis = Sentence(type, request, [], [])
modal = []
if not sentence:
return []
#We have to add punctuation if there is not
if sentence[len(sentence) - 1] not in ['.', '?', '!']:
sentence = sentence + ['.']
#We start with determination of probably second verb in subsentence
sentence = other_functions.find_scd_verb_sub(sentence)
#We search the subject
sbj = analyse_nominal_group.find_sn_pos(sentence, 0)
if sbj != [] or type == RELATIVE:
#If we haven't a data type => it is a statement
if type == '':
analysis.data_type = STATEMENT
#We have to separate the case using these, this or there
if sentence[0] in ResourcePool().demonstrative_det and analyse_verb.infinitive([sentence[1]],
'present simple') == ['be']:
#We recover this information and remove it
analysis.sn = [NominalGroup([sentence[0]], [], [], [], [])]
if sentence[0] == 'there' and sentence[1] == 'are':
analysis.sn[0]._quantifier = 'SOME'
sentence = sentence[1:]
if not analysis.sn:
#We recover the subject
sentence = analyse_nominal_structure.recover_ns(sentence, analysis, 0)
#End of the sentence? -> nominal sentence
if sentence == [] or sentence[0] in ['.', '!', '?']:
analysis.sv = []
return analysis
#We have to know if there is a modal
if sentence[0] in ResourcePool().modal:
modal = sentence[0]
if modal == 'can' or modal == 'must' or modal == 'shall' or modal == 'may':
sentence = sentence[1:]
#We must take into account all possible cases to recover the sentence's tense
if len(sentence) > 1 and sentence[1] == 'not':
vg.state = VerbalGroup.negative
#Before the negative form we have an auxiliary for the negation
if sentence[0] == 'do' or sentence[0] == 'does' or sentence[0] == 'did':
vg.vrb_tense = analyse_verb.find_tense_statement([sentence[0]])
sentence = sentence[2:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
#There is a modal
elif modal:
sentence = [sentence[0]] + sentence[2:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.vrb_tense = analyse_verb.find_tense_statement(sentence)
else:
#We remove 'not' and find the tense
sentence = sentence[:1] + sentence[2:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.vrb_tense = analyse_verb.find_tense_statement(sentence)
#For the affirmative processing
else:
if sentence[0] == 'not':
vg.state = VerbalGroup.negative
sentence = sentence[1:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.vrb_tense = analyse_verb.find_tense_statement(sentence)
verb = analyse_verb.find_verb_statement(sentence, vg.vrb_tense)
verb_main = analyse_verb.return_verb(sentence, verb, vg.vrb_tense)
vg.vrb_main = [other_functions.convert_to_string(verb_main)]
#We delete the verb
sentence = sentence[sentence.index(verb[0]) + len(verb_main):]
#We perform the processing with the modal
if modal:
vg.vrb_main = [modal + '+' + vg.vrb_main[0]]
#This is a imperative form
else:
#re-init
analysis.data_type = IMPERATIVE
vg.vrb_tense = 'present simple'
if sentence[0] in ResourcePool().proposals:
sentence = ['.'] + sentence
#Negative form
if sentence[1] == 'not':
sentence = sentence[sentence.index('not') + 1:]
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg.state = VerbalGroup.negative
else:
sentence = analyse_verbal_structure.delete_unusable_word(sentence)
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
#We process the verb
verb = [sentence[0]]
verb_main = analyse_verb.return_verb(sentence, verb, vg.vrb_tense)
vg.vrb_main = [other_functions.convert_to_string(verb_main)]
#We delete the verb
sentence = sentence[sentence.index(verb[0]) + len(verb_main):]
if sentence and sentence[-1] == '?':
analysis.data_type = YES_NO_QUESTION
#We recover the conjunctive subsentence
sentence = analyse_verbal_structure.process_conjunctive_sub(sentence, vg)
#It verifies if there is a secondary verb
sec_vrb = analyse_verbal_structure.find_scd_vrb(sentence)
if sec_vrb:
sentence = analyse_verbal_structure.process_scd_sentence(sentence, vg, sec_vrb)
#We recover the subsentence
sentence = analyse_verbal_structure.process_subsentence(sentence, vg)
if sentence != [] and vg.vrb_main != []:
#Process relative changes
sentence = analyse_verbal_structure.correct_i_compl(sentence, vg.vrb_main[0])
sentence = analyse_verbal_structure.process_compare(sentence, vg)
sentence = analyse_nominal_group.find_plural(sentence)
#We recover the direct, indirect complement and the adverbial
sentence = analyse_verbal_structure.recover_obj_iobj(sentence, vg)
#We have to take off abverbs form the sentence
sentence = analyse_verbal_structure.find_adv(sentence, vg)
#In case there is a state verb followed by an adjective
sentence = analyse_verbal_structure.state_adjective(sentence, vg)
#If there is a forgotten
sentence = analyse_verbal_structure.find_vrb_adv(sentence, vg)
vg = analyse_verbal_structure.DOC_to_IOC(vg)
while len(sentence) > 1:
stc = analyse_verbal_structure.create_nom_gr(sentence, request)
#We recover the direct, indirect complement and the adverbial
stc = analyse_verbal_structure.recover_obj_iobj(stc, vg)
if stc == sentence:
#We leave the loop
break
else:
sentence = stc
vg = analyse_verbal_structure.refine_indirect_complement(vg)
vg = analyse_verbal_structure.refine_subsentence(vg)
vg = analyse_verbal_structure.DOC_to_IOC(vg)
analysis.sv = [vg]
return analysis
def sentences_analyzer(sentences):
"""
This function is the basic function of parsing
Input=list of sentences and beginning sentence list
Output=list of class Sentence
"""
#init
class_sentence_list = []
nom_gr = []
y = 0
#We process all sentences of the list
for i in sentences:
if i:
#We have to add punctuation if there is not
if i[-1] not in ['.', '?', '!']:
i = i + ['.']
class_sentence_list = class_sentence_list + dispatching(i)
#Add some information if there is an interjection
for s in class_sentence_list:
#If there is an interjection we have to take the nominal group
if s.data_type == INTERJECTION:
nom_gr = s.sn
#If there is an imperative sentence, we put the nominal group of interjection in the subject
if nom_gr != [] and s.data_type == IMPERATIVE:
s.sn = s.sn + nom_gr
#To simplify the interpretation, we have to perform some changes
for k in class_sentence_list:
#If subject is 'there', we change it by the object
if k.sn != [] and k.sn[0].det == ['there']:
k.sn = k.sv[0].d_obj
k.sv[0].d_obj = []
#If sentence is empty, we take off the verb
if k.sv != [] and (k.sv[0].vrb_main == ['.'] or k.sv[0].vrb_main == ['?'] or k.sv[0].vrb_main == ['!']):
k.sv[0].vrb_main = []
if k.data_type == IMPERATIVE:
k.data_type = STATEMENT
#If we have imperative with verb 'see' => end
if k.data_type == IMPERATIVE and \
k.sv[0].vrb_main == ['see'] and \
len(k.sv[0].d_obj) > 0 and \
k.sv[0].d_obj[0].noun == ['you']:
k.data_type = END
k.aim = ''
k.sv = []
k.sn = []
return class_sentence_list
|
python
|
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
#
#
# @param head ListNode类
# @param k int整型
# @return ListNode类
#
class Solution:
def reverseKGroup(self , head , k ):
def reverse(a,b):
pre = None
cur = a
while cur != b:
nex = cur.next
cur.next = pre
pre = cur
cur = nex
return pre
a,b = head,head
for i in range(k):
if not b: return head
b = b.next
newHead = reverse(a,b)
a.next = self.reverseKGroup(b,k)
return newHead
|
python
|
from .bignet import BigHouseModel
from .goal import BigGoalHouseModel, AuxiliaryBigGoalHouseModel
|
python
|
from modules import util
from modules.util import Failed
logger = util.logger
builders = ["stevenlu_popular"]
base_url = "https://s3.amazonaws.com/popular-movies/movies.json"
class StevenLu:
def __init__(self, config):
self.config = config
def get_stevenlu_ids(self, method):
if method == "stevenlu_popular":
logger.info(f"Processing StevenLu Popular Movies")
return [(i["imdb_id"], "imdb") for i in self.config.get_json(base_url)]
else:
raise Failed(f"StevenLu Error: Method {method} not supported")
|
python
|
# used as reference version, for comparison/correctness
import numpy as np
from timecheck import inittime, timecheck
from neoncl.util import math_helper
def calcU(W):
Ci = W.shape[0]
kH = W.shape[1]
kW = W.shape[2]
Co = W.shape[3]
G = np.array([[1/4,0,0],
[-1/6,-1/6,-1/6],
[-1/6,1/6,-1/6],
[1/24,1/12,1/6],
[1/24,-1/12,1/6],
[0,0,1]], dtype=np.float32)
Wfull = W
U2 = np.zeros((6, 6, Co, Ci), dtype=np.float32)
Utmp = np.zeros((6, 3), dtype=np.float32)
U = np.zeros((6, 6), dtype=np.float32) # transformed filter
timecheck('allocaed U')
for co in range(Co):
for ci in range(Ci):
W = Wfull[ci,:,:,co].reshape(3,3)
#for i in range(3):
#Utmp[0][i] = 1/4 * W[0][i]
#Utmp[1][i] = - 1/6 * (W[0][i] + W[1][i] + W[2][i])
#Utmp[2][i] = - 1/6 *W[0][i] + 1/6 * W[1][i] - 1/6 * W[2][i]
#Utmp[3][i] = 1/24 * W[0][i] + 1/12 * W[1][i] + 1/6 * W[2][i]
#Utmp[4][i] = 1/24 * W[0][i] - 1/12 * W[1][i] + 1/6 * W[2][i]
#Utmp[5][i] = W[2][i]
Utmp = G.dot(W)
#for i in range(6):
#U[i][0] = 1/4 * Utmp[i][0]
#U[i][1] = - 1/6 * Utmp[i][0] - 1/6 * Utmp[i][1] - 1/6 * Utmp[i][2]
#U[i][2] = - 1/6 * Utmp[i][0] + 1/ 6 * Utmp[i][1] - 1 / 6 * Utmp[i][2]
#U[i][3] = 1/24 * Utmp[i][0] + 1/12 * Utmp[i][1] + 1/6 * Utmp[i][2]
#U[i][4] = 1/24 * Utmp[i][0] - 1/12 * Utmp[i][1] + 1/6 * Utmp[i][2]
#U[i][5] = Utmp[i][2]
U = Utmp.dot(G.T)
U2[:,:,co,ci] = U
timecheck('calced U2')
# layout:
# [xi, nu, co, ci]
return U2
def calcV(I):
Ifull = I
Ci = I.shape[0]
iH = I.shape[1]
iW = I.shape[2]
N = I.shape[3]
tiles = iW // 4
oH = iH
oW = iW
padH = 1
padW = 1
BT = np.array([[4,0,-5,0,1,0],
[0,-4,-4,1,1,0],
[0,4,-4,-1,1,0],
[0,-2,-1,2,1,0],
[0,2,-1,-2,1,0],
[0,4,0,-5,0,1]], dtype=np.float32)
V2 = np.zeros((N, 6, 6, Ci, tiles, tiles), dtype=np.float32)
timecheck('allocaed V2')
for n in range(N):
V = np.zeros((6, 6), dtype=np.float32) # transformed image
Vtmp = np.zeros((6,6), dtype=np.float32)
for th in range(tiles):
hstart = -1 + 4 * th
hend = hstart + 6 - 1
hstarttrunc = max(0, hstart)
hendtrunc = min(hend, iH - 1)
hstartoffset = hstarttrunc - hstart
hendoffset = hendtrunc - hstart
for tw in range(tiles):
wstart = -1 + 4 * tw
wend = wstart + 6 - 1
wstarttrunc = max(0, wstart)
wendtrunc = min(wend, iW - 1)
wstartoffset = wstarttrunc - wstart
wendoffset = wendtrunc - wstart
Ipadded = np.zeros((6, 6), dtype=np.float32)
for ci in range(Ci):
Ipadded[hstartoffset:hendoffset + 1,wstartoffset:wendoffset + 1] = Ifull[ci,hstarttrunc:hendtrunc+1,wstarttrunc:wendtrunc+1,n]
I = Ipadded
#for i in range(6):
#Vtmp[0][i] = + 4 * I[0][i] - 5 * I[2][i] + I[4][i]
#Vtmp[1][i] = - 4 * I[1][i] - 4 * I[2][i] + I[3][i] + I[4][i]
#Vtmp[2][i] = + 4 * I[1][i] - 4 * I[2][i] - I[3][i] + I[4][i]
#Vtmp[3][i] = - 2 * I[1][i] - I[2][i] + 2 * I[3][i] + I[4][i]
#Vtmp[4][i] = + 2 * I[1][i] - I[2][i] - 2 * I[3][i] + I[4][i]
#Vtmp[5][i] = + 4 * I[1][i] - 5 * I[3][i] + I[5][i]
Vtmp = BT.dot(I)
# each i is a row of V
#for i in range(6):
#V[i][0] = + 4 * Vtmp[i][0] - 5 * Vtmp[i][2] + Vtmp[i][4]
#V[i][1] = - 4 * Vtmp[i][1] - 4 * Vtmp[i][2] + Vtmp[i][3] + Vtmp[i][4]
#V[i][2] = + 4 * Vtmp[i][1] - 4 * Vtmp[i][2] - Vtmp[i][3] + Vtmp[i][4]
#V[i][3] = - 2 * Vtmp[i][1] - Vtmp[i][2] + 2 * Vtmp[i][3] + Vtmp[i][4]
#V[i][4] = + 2 * Vtmp[i][1] - Vtmp[i][2] - 2 * Vtmp[i][3] + Vtmp[i][4]
#V[i][5] = + 4 * Vtmp[i][1] - 5 * Vtmp[i][3] + Vtmp[i][5]
V2[n, :,:,ci,th,tw] = Vtmp.dot(BT.T)
timecheck('calced V')
return V2
def calcM(N, Co, U, V):
GK = U.shape[2]
Ci = U.shape[3]
tiles = V.shape[3]
GN = V.shape[2]
print('calcM cpu GN', GN, 'N', N)
U = U.transpose(0,1,2,4,3).reshape(6,6,GK * 32,Ci)[:,:,:Co,:]
V = V.transpose(
2,6,0,1,5,3,4).reshape(
GN * 32, 6, 6, Ci, tiles, tiles)[:N]
M = np.zeros((N, Co, tiles, tiles, 6, 6), dtype=np.float32)
for n in range(N):
for xi in range(6):
for nu in range(6):
M[n,:, :, :, xi, nu] = np.tensordot(U[xi,nu], V[n,xi,nu], 1)
timecheck('calced M')
return M
def calcM_blocked_l2(U, V, axes):
R1 = np.tensordot(U, V, axes)
return R1
def calcM_blocked_l1(N, Co, U, V):
GK = U.shape[2]
Ci = U.shape[3]
tiles = V.shape[3]
GN = V.shape[2]
M = np.zeros((GN, 32, GK, 32, tiles, tiles, 6, 6), dtype=np.float32)
# new layouts:
# U
# [xi, nu, co // 32, ci, co % 32]
# V
# [xi, nu, n // 32, th, tw, ci, n % 32]
# each block:
# U [ci, co % 32]
# V [ci, ni % 32]
N_blocksize = 32
ci_blocksize = 32
Co_blocksize = 32
printed_size = False
for Co_block in range(GK):
U_block = U[:,:,Co_block]
for N_block in range(GN):
for th in range(tiles):
for tw in range(tiles):
V_block = V[:, :, N_block, th, tw]
M_block = M[N_block, :, Co_block, :, th, tw]
for mh in range(6):
for mw in range(6):
left = U_block[mh,mw]
right = V_block[mh,mw]
if not printed_size:
printed_size = True
print('left.shape', left.shape, 'right.shape', right.shape)
src = calcM_blocked_l2(left, right, ([0], [0]))
dst = M_block[:, :, mh, mw]
dst[:] = src.T
M = M.reshape(GN * 32, GK * 32, tiles, tiles, 6, 6)
M = M[:N, :Co]
timecheck('calced M')
return M
def calcO(M):
N = M.shape[0]
Co = M.shape[1]
tiles = M.shape[2]
oH = tiles * 4 # is this always true? anyway, it's true for now...
oW = tiles * 4
O = np.zeros((Co, oH, oW, N), dtype=np.float32)
Mfull = M
Ofull = O
AT = np.array([[1,1,1,1,1,0],
[0,1,-1,2,-2,0],
[0,1,1,4,4,0],
[0,1,-1,8,-8,1]], dtype=np.float32)
timecheck('allocated AT')
# inverse transform
Otmp = np.zeros((4, 6), dtype=np.float32)
for n in range(N):
for co in range(Co):
for th in range(tiles):
for tw in range(tiles):
O = Ofull[co,th * 4:(th+1)*4,tw*4:(tw+1)*4,n]
M = Mfull[n, co, th, tw]
#for i in range(6):
#Otmp[0][i] = M[0][i] + M[1][i] + M[2][i] + M[3][i] + M[4][i]
#Otmp[1][i] = + M[1][i] - M[2][i] + 2 * M[3][i] - 2 * M[4][i]
#Otmp[2][i] = + M[1][i] + M[2][i] + 4 * M[3][i] + 4 * M[4][i]
#Otmp[3][i] = + M[1][i] - M[2][i] + 8 * M[3][i] - 8 * M[4][i] + M[5][i]
#print('AT.shape', AT.shape, 'M.shape', M.shape)
Otmp = AT.dot(M)
#for i in range(4):
#O[i][0] = Otmp[i][0] + Otmp[i][1] + Otmp[i][2] + Otmp[i][3] + Otmp[i][4]
#O[i][1] = + Otmp[i][1] - Otmp[i][2] + 2 * Otmp[i][3] - 2 * Otmp[i][4]
#O[i][2] = + Otmp[i][1] + Otmp[i][2] + 4 * Otmp[i][3] + 4 * Otmp[i][4]
#O[i][3] = + Otmp[i][1] - Otmp[i][2] + 8 * Otmp[i][3] - 8 * Otmp[i][4] + Otmp[i][5]
#print('O.shape', O.shape, 'Otmp.shape', Otmp.shape, 'AT.T.shape', AT.T.shape)
O[:] = Otmp.dot(AT.T)
timecheck('calced O')
return Ofull
|
python
|
from nose.tools import eq_
from amo.tests import app_factory
class DynamicBoolFieldsTestMixin():
def setUp(self):
"""
Create an instance of the DynamicBoolFields model and call super
on the inheriting setUp.
(e.g. RatingDescriptors.objects.create(addon=self.app))
"""
self.app = app_factory()
self.model = None
self.related_name = '' # Related name of the bool table on the Webapp.
self.BOOL_DICT = []
self.flags = [] # Flag names.
self.expected = [] # Translation names.
def _get_related_bool_obj(self):
return getattr(self.app, self.related_name)
def _flag(self):
"""Flag app with a handful of flags for testing."""
self._get_related_bool_obj().update(
**dict(('has_%s' % f.lower(), True) for f in self.flags))
def _check(self, obj=None):
if not obj:
obj = self._get_related_bool_obj()
for bool_name in self.BOOL_DICT:
field = 'has_%s' % bool_name.lower()
value = bool_name in self.flags
if isinstance(obj, dict):
eq_(obj[field], value,
u'Unexpected value for field: %s' % field)
else:
eq_(getattr(obj, field), value,
u'Unexpected value for field: %s' % field)
def to_unicode(self, items):
"""
Force unicode evaluation of lazy items in the passed list, for set
comparison to a list of already-evaluated unicode strings.
"""
return [unicode(i) for i in items]
def test_bools_set(self):
self._flag()
self._check()
def test_to_dict(self):
self._flag()
self._check(self._get_related_bool_obj().to_dict())
def test_default_false(self):
obj = self.model(addon=self.app)
eq_(getattr(obj, 'has_%s' % self.flags[0].lower()), False)
|
python
|
import sys, json, os
BOARD_ID='Os1ByyJc'
def read_auth_info():
script_path = os.path.dirname(os.path.realpath(__file__))
auth_file = script_path + "/trello-auth.json"
if not os.path.exists(auth_file):
sys.stderr.write("Cannot access Trello: Missing {}\n".format(auth_file))
sys.exit(1)
with open(auth_file) as f:
auth = json.loads(f.read())
if not auth.get('key', '').strip():
sys.stderr.write("Cannot access Trello: Missing 'key' from {}\n".format(auth_file))
sys.exit(1)
if not auth.get('token', '').strip():
sys.stderr.write("Cannot access Trello: Missing 'token' from {}\n".format(auth_file))
sys.exit(1)
return auth['key'], auth['token']
def set_up():
if len(sys.argv) != 2:
sys.stderr.write("Usage: {} <episode-number>\n".format(sys.argv[0]))
sys.exit(1)
episode_number = sys.argv[1]
key, token = read_auth_info()
return episode_number, key, token
def get_cards(key, token, episode_number):
list_id = find_episode_list_id(key, token, episode_number)
if not list_id:
sys.stderr.write("Could not find Trello list for episode {}\n".format(episode_number))
sys.exit(1)
return get_json('https://api.trello.com/1/lists/{}/cards?key={}&token={}'.format(list_id, key, token))
def get_cards_by_label(key, token, episode_number, label_to_find):
cards = get_cards(key, token, episode_number)
return [card for card in cards if any(label['name'] == label_to_find for label in card['labels'])]
def get_question_cards(key, token, episode_number):
return get_cards_by_label(key, token, episode_number, label_to_find='question')
# assuming there is only a single note card if it exists
def get_show_notes(key, token, episode_number):
show_notes = get_cards_by_label(key, token, episode_number, label_to_find='notes')
return show_notes[0]['desc'] if len(show_notes) > 0 else ''
def find_episode_list_id(key, token, episode_number):
lists = get_json('https://api.trello.com/1/boards/{}/lists?key={}&token={}'.format(BOARD_ID, key, token))
for lst in lists:
# FIXME Using `startswith` will break at episode 1000 since it will match episode 100.
# At current weekly cadence, this will break in the year 2034.
if lst['name'].lower().startswith("episode {}".format(episode_number)):
return lst['id']
def get_json(url):
import requests
response = requests.get(url)
if response.status_code == 200:
try:
return json.loads(response.content)
except json.decoder.JSONDecodeError as e:
sys.stderr("Invalid JSON returned from Trello API: {}, JSON: {}".format(e, response.content))
sys.exit(1)
else:
sys.stderr.write("Got error from Trello API. HTTP status code: {}, response content: {}\n".format(response.status_code, response.content))
sys.exit(1)
|
python
|
import os
'''
user = os.environ['POSTGRES_USER']
password = os.environ['POSTGRES_PASSWORD']
host = os.environ['POSTGRES_HOST']
database = os.environ['POSTGRES_DB']
port = os.environ['POSTGRES_PORT']
'''
user = 'test'
password = 'password'
host = 'localhost'
database = 'example'
port = '5432'
DATABASE_CONNECTION_URI = f'postgresql+psycopg2://{user}:{password}@{host}:{port}/{database}'
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
from collections import OrderedDict
from datetime import timedelta
from itertools import chain
import datetime
from django.urls import reverse
from django.template.loader import render_to_string
from django.utils.timesince import timesince
from math import ceil
from casexml.apps.stock.models import StockTransaction
from corehq.apps.es import UserES
from corehq.apps.domain.models import Domain
from corehq.apps.commtrack.models import StockState
from corehq.apps.reports.commtrack.const import STOCK_SECTION_TYPE
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.graph_models import Axis
from corehq.apps.users.models import WebUser
from custom.common import ALL_OPTION
from custom.ewsghana.filters import ProductByProgramFilter, EWSDateFilter, EWSRestrictionLocationFilter
from custom.ewsghana.models import FacilityInCharge, EWSExtension
from custom.ewsghana.reports import EWSData, MultiReport, EWSLineChart, ProductSelectionPane
from custom.ewsghana.utils import has_input_stock_permissions, ews_date_format
from dimagi.utils.couch.database import iter_docs
from memoized import memoized
from django.utils.translation import ugettext as _
from corehq.apps.locations.dbaccessors import get_users_by_location_id
from corehq.apps.locations.models import get_location, SQLLocation
from six.moves import range
import six
class StockLevelsLegend(EWSData):
title = 'Legend'
slug = 'legend'
show_table = True
@property
def headers(self):
return DataTablesHeader(*[
DataTablesColumn(_('Icon')),
DataTablesColumn(_('Stock status')),
])
@property
def rows(self):
return [['<span class="fa fa-arrow-up" style="color:purple"/>', 'Overstock'],
['<span class="fa fa-check" style="color:green"/>', 'Adequate'],
['<span class="fa fa-exclamation-triangle" style="color:orange"/>', 'Low'],
['<span class="fa fa-remove" style="color:red"/>', 'Stockout']]
class FacilityReportData(EWSData):
slug = 'facility_report'
show_table = True
use_datatables = True
@property
def title(self):
return 'Facility Report - %s' % SQLLocation.objects.get(location_id=self.config['location_id']).name
@property
def headers(self):
return DataTablesHeader(*[
DataTablesColumn(_('Commodity')),
DataTablesColumn(_('Months of Stock')),
DataTablesColumn(_('Stockout Duration')),
DataTablesColumn(_('Current Stock')),
DataTablesColumn(_('Monthly Consumption')),
DataTablesColumn(_('Reorder Level')),
DataTablesColumn(_('Maximum Level')),
DataTablesColumn(_('Date of Last Report'))
])
def get_prod_data(self):
def get_months_until_stockout_icon(value, loc):
if float(value) == 0.0:
return '%s <span class="fa fa-remove" style="color:red"/>' % value
elif float(value) <= loc.location_type.understock_threshold:
return '%s <span class="fa fa-exclamation-triangle" style="color:orange"/>' % value
elif loc.location_type.understock_threshold < float(value) < loc.location_type.overstock_threshold:
return '%s <span class="fa fa-check" style="color:green"/>' % value
elif float(value) >= loc.location_type.overstock_threshold:
return '%s <span class="fa fa-arrow-up" style="color:purple"/>' % value
state_grouping = {}
loc = SQLLocation.objects.get(location_id=self.config['location_id'])
stock_states = StockState.objects.filter(
case_id=loc.supply_point_id,
section_id=STOCK_SECTION_TYPE,
sql_product__in=self.unique_products(SQLLocation.objects.filter(pk=loc.pk))
).order_by('-last_modified_date')
for state in stock_states:
monthly_consumption = state.get_monthly_consumption()
max_level = 0
if monthly_consumption:
monthly_consumption = round(monthly_consumption)
max_level = round(monthly_consumption * float(loc.location_type.overstock_threshold))
state_grouping[state.product_id] = {
'commodity': state.sql_product.name,
'months_until_stockout': "%.1f" % (float(state.stock_on_hand) / monthly_consumption)
if state.stock_on_hand and monthly_consumption else 0,
'stockout_duration': '',
'stockout_duration_helper': True,
'current_stock': state.stock_on_hand,
'monthly_consumption': monthly_consumption,
'reorder_level': round(max_level / 2.0),
'maximum_level': max_level,
'last_report': ews_date_format(state.last_modified_date)
}
if state.stock_on_hand == 0:
try:
st = StockTransaction.objects.filter(
case_id=loc.supply_point_id,
product_id=state.product_id,
stock_on_hand__gt=0
).latest('report__date')
state_grouping[state.product_id]['stockout_duration'] = timesince(
st.report.date, now=datetime.datetime.now()
)
except StockTransaction.DoesNotExist:
state_grouping[state.product_id]['stockout_duration'] = 'Always'
else:
state_grouping[state.product_id]['stockout_duration_helper'] = False
for values in state_grouping.values():
if values['monthly_consumption'] is not None or values['current_stock'] == 0:
months_until_stockout = get_months_until_stockout_icon(
values['months_until_stockout'] if values['months_until_stockout'] else 0.0, loc
)
else:
months_until_stockout = '-'
if values['monthly_consumption'] and values['monthly_consumption'] != 0.00:
monthly_consumption = int(values['monthly_consumption'])
else:
monthly_consumption = 'not enough data'
if values['maximum_level'] and values['maximum_level'] != 0.00:
maximum_level = int(values['maximum_level'])
else:
maximum_level = 'unknown'
if values['reorder_level'] and values['reorder_level'] != 0.00:
reorder_level = int(values['reorder_level'])
else:
reorder_level = 'unknown'
yield {
'commodity': values['commodity'],
'current_stock': int(values['current_stock']) if values['current_stock'] is not None else '--',
'monthly_consumption': monthly_consumption,
'months_until_stockout': months_until_stockout,
'stockout_duration': values['stockout_duration'],
'last_report': values['last_report'],
'reorder_level': reorder_level,
'maximum_level': maximum_level}
@property
def rows(self):
for row in self.get_prod_data():
yield [row['commodity'],
row['months_until_stockout'],
row['stockout_duration'],
row['current_stock'],
row['monthly_consumption'],
row['reorder_level'],
row['maximum_level'],
row['last_report']]
class InventoryManagementData(EWSData):
title = ''
slug = 'inventory_management'
show_table = False
show_chart = True
chart_x_label = 'Weeks'
chart_y_label = 'MOS'
@property
def rows(self):
return []
@property
def chart_data(self):
def calculate_weeks_remaining(state, daily_consumption, date):
if not daily_consumption:
return 0
consumption = round(float(daily_consumption) * 30.0)
quantity = float(state.stock_on_hand) - ((date - state.report.date).days // 7) * consumption
if consumption and consumption > 0 and quantity > 0:
return quantity / consumption
return 0
enddate = self.config['enddate']
startdate = self.config['startdate'] if 'custom_date' in self.config else enddate - timedelta(days=30)
loc = SQLLocation.objects.get(location_id=self.config['location_id'])
stoke_states = StockState.objects.filter(
case_id=loc.supply_point_id,
section_id=STOCK_SECTION_TYPE,
sql_product__in=loc.products,
)
consumptions = {ss.product_id: ss.get_daily_consumption() for ss in stoke_states}
st = StockTransaction.objects.filter(
case_id=loc.supply_point_id,
sql_product__in=loc.products,
type='stockonhand',
report__date__lte=enddate
).select_related('report', 'sql_product').order_by('report__date')
rows = OrderedDict()
weeks = ceil((enddate - startdate).days / 7)
for state in st:
product_name = '{0} ({1})'.format(state.sql_product.name, state.sql_product.code)
if product_name not in rows:
rows[product_name] = {}
for i in range(1, int(weeks + 1)):
date = startdate + timedelta(weeks=i)
if state.report.date < date:
rows[product_name][i] = calculate_weeks_remaining(
state, consumptions.get(state.product_id, None), date)
for k, v in six.iteritems(rows):
rows[k] = [{'x': key, 'y': value} for key, value in six.iteritems(v)]
rows['Understock'] = []
rows['Overstock'] = []
for i in range(1, int(weeks + 1)):
rows['Understock'].append({'x': i, 'y': float(loc.location_type.understock_threshold)})
rows['Overstock'].append({'x': i, 'y': float(loc.location_type.overstock_threshold)})
return rows
@property
def charts(self):
if self.show_chart:
loc = SQLLocation.objects.get(location_id=self.config['location_id'])
chart = EWSLineChart("Inventory Management Trends", x_axis=Axis(self.chart_x_label, 'd'),
y_axis=Axis(self.chart_y_label, '.1f'))
chart.height = 600
values = []
for product, value in six.iteritems(self.chart_data):
values.extend([a['y'] for a in value])
chart.add_dataset(product, value,
color='black' if product in ['Understock', 'Overstock'] else None)
chart.forceY = [0, loc.location_type.understock_threshold + loc.location_type.overstock_threshold]
chart.is_rendered_as_email = self.config.get('is_rendered_as_email', False)
return [chart]
return []
class InputStock(EWSData):
slug = 'input_stock'
show_table = True
@property
def rows(self):
link = reverse('input_stock', args=[self.domain, self.location.site_code])
rows = []
if has_input_stock_permissions(self.config['user'],
SQLLocation.objects.get(location_id=self.config['location_id']),
self.domain):
rows.append(["<a href='{}'>INPUT STOCK for {}</a>".format(link, self.location.name)])
try:
rows.append(
[
'The last report received was at <b>{}.</b>'.format(
StockState.objects.filter(case_id=self.location.supply_point_id)
.values('last_modified_date')
.latest('last_modified_date')['last_modified_date']
.strftime("%X on %b %d, %Y")
)
]
)
except StockState.DoesNotExist:
pass
return rows
class UsersData(EWSData):
custom_table = True
@property
def rendered_content(self):
from corehq.apps.users.views.mobile.users import EditCommCareUserView
users = get_users_by_location_id(self.config['domain'],
self.config['location_id'])
in_charges = FacilityInCharge.objects.filter(
location=self.location
).values_list('user_id', flat=True)
if self.location.parent.location_type.name == 'district':
children = self.location.parent.get_descendants()
availaible_in_charges = list(chain.from_iterable([
[u for u in get_users_by_location_id(self.config['domain'], child.location_id) if 'In Charge' in u.user_data.get('role', [])]
for child in children
]))
else:
availaible_in_charges = [u for u in get_users_by_location_id(self.domain, self.location_id) if 'In Charge' in u.user_data.get('role', [])]
user_to_dict = lambda sms_user: {
'id': sms_user.get_id,
'full_name': sms_user.full_name,
'phone_numbers': sms_user.phone_numbers,
'in_charge': sms_user.get_id in in_charges,
'location_name': sms_user.location.sql_location.name,
'url': reverse(EditCommCareUserView.urlname, args=[self.config['domain'], sms_user.get_id])
}
web_users_from_extension = list(iter_docs(
WebUser.get_db(),
EWSExtension.objects.filter(domain=self.domain,
location_id=self.location_id).values_list('user_id', flat=True)
))
WebUserInfo = collections.namedtuple('WebUserInfo', 'id first_name last_name email')
web_users = {
WebUserInfo(
id=web_user['_id'],
first_name=web_user['first_name'],
last_name=web_user['last_name'],
email=web_user['email']
)
for web_user in (UserES().web_users().domain(self.config['domain']).term(
"domain_memberships.location_id", self.location_id
).run().hits + web_users_from_extension)
}
return render_to_string('ewsghana/partials/users_tables.html', {
'users': [user_to_dict(user) for user in users],
'domain': self.domain,
'location_id': self.location_id,
'web_users': web_users,
'district_in_charges': [user_to_dict(user) for user in availaible_in_charges]
})
class StockLevelsReport(MultiReport):
title = "Aggregate Stock Report"
fields = [EWSRestrictionLocationFilter, ProductByProgramFilter, EWSDateFilter]
name = "Stock Levels Report"
slug = 'ews_stock_levels_report'
exportable = True
is_exportable = True
@property
def report_config(self):
report_config = super(StockLevelsReport, self).report_config
program = self.request.GET.get('filter_by_program')
products = self.request.GET.getlist('filter_by_product')
report_config.update(dict(
startdate=self.datespan.startdate_utc,
enddate=self.datespan.enddate_utc,
program=program if program != ALL_OPTION else None,
products=products if products and products[0] != ALL_OPTION else []
))
return report_config
@property
@memoized
def data_providers(self):
config = self.report_config
location_types = [loc_type.name for loc_type in [loc_type for loc_type in Domain.get_by_name(self.domain).location_types if not loc_type.administrative]]
if not self.needs_filters and get_location(config['location_id']).location_type_name in location_types:
if self.is_rendered_as_email:
return [FacilityReportData(config)]
else:
return [FacilityReportData(config),
StockLevelsLegend(config),
InputStock(config),
UsersData(config),
InventoryManagementData(config),
ProductSelectionPane(config, hide_columns=False)]
@classmethod
def show_in_navigation(cls, domain=None, project=None, user=None):
return False
|
python
|
from collections import namedtuple
import contextlib
import meshcat
import meshcat.geometry as meshcat_geom
import meshcat.transformations as meshcat_tf
import matplotlib.pyplot as plt
import logging
import numpy as np
import networkx as nx
import os
import yaml
import torch
import pydrake
from pydrake.common.cpp_param import List as DrakeBindingList
from pydrake.all import (
AddMultibodyPlantSceneGraph,
AngleAxis,
BasicVector,
BodyIndex,
ConnectMeshcatVisualizer,
CoulombFriction,
DiagramBuilder,
ExternallyAppliedSpatialForce,
LeafSystem,
InverseKinematics,
MeshcatVisualizer,
MinimumDistanceConstraint,
ModelInstanceIndex,
MultibodyPlant,
SpatialInertia,
Parser,
RigidTransform,
RotationMatrix,
SpatialForce,
Simulator,
SnoptSolver,
Solve,
SolverOptions,
UnitInertia,
Value
)
import pydrake.geometry as pydrake_geom
def torch_tf_to_drake_tf(tf):
return RigidTransform(tf.cpu().detach().numpy())
def drake_tf_to_torch_tf(tf):
return torch.tensor(tf.GetAsMatrix4())
default_spatial_inertia = SpatialInertia(
mass=1.0,
p_PScm_E=np.zeros(3), G_SP_E=UnitInertia(0.01, 0.01, 0.01)
)
default_friction = CoulombFriction(0.9, 0.8)
class PhysicsGeometryInfo():
'''
Container for physics and geometry info, providing simulator and
visualization interoperation.
Args:
- fixed: Whether this geometry is welded to the world (otherwise,
it will be mobilized by a 6DOF floating base).
- spatial_inertia: Spatial inertia of the body. If None,
will adopt a default mass of 1.0kg and 0.01x0.01x0.01 diagonal
rotational inertia.
- is_container: Flag whether this object will function as a
container for other objects for the purpose of collision
and stability checks. If so, then objects below this one
will be isolated from collision and clearance checks for
objects above this one, and instead only be checked against
this object's collision geometry and this object's
childrens' geometry. Valid for e.g. a cabinet full of
stuff that does not interact with anything outside of
the cabinet.
To construct a PhysicsGeometricInfo object, initialize the object
with the desired arguments above, and then use registration calls
to populate the model geometry of the following types:
- Model files (urdf/sdf), paired with a transform from the object
local origin, the name of the root body (which gets put at that
transform -- required if there's more than one body in the URDF),
and optionally, the initial joint configuration of
the model (as a dict of joint names to joint states). These
are added to the simulated scene with the specified link
welded (or translated, if not fixed) to the node transform.
- Visual and collision geometry (Drake Shape types), paired with
transforms from the object local origin and relevant color
and friction information.
- Clearance geometry (Drake Shape types), paired with transforms
from the object local origin. This represents the region around
this object that should not intersect with any other node's
clearance geometry: e.g., the space in front of a cabinet should
be clear so the doors can open.
'''
def __init__(self, fixed=True, spatial_inertia=None, is_container=False):
self.fixed = fixed
self.is_container = is_container
self.model_paths = []
self.spatial_inertia = spatial_inertia or default_spatial_inertia
self.visual_geometry = []
self.collision_geometry = []
self.clearance_geometry = []
def register_model_file(self, tf, model_path, root_body_name=None,
q0_dict={}):
self.model_paths.append((tf, model_path, root_body_name, q0_dict))
def register_geometry(self, tf, geometry, color=np.ones(4), friction=default_friction):
# Shorthand for registering the same geometry as collision + visual.
self.register_visual_geometry(tf, geometry, color)
self.register_collision_geometry(tf, geometry, friction)
def register_visual_geometry(self, tf, geometry, color=np.ones(4)):
assert isinstance(tf, torch.Tensor) and tf.shape == (4, 4)
assert isinstance(geometry, pydrake.geometry.Shape)
self.visual_geometry.append((tf, geometry, color))
def register_collision_geometry(self, tf, geometry, friction=default_friction):
assert isinstance(tf, torch.Tensor) and tf.shape == (4, 4)
assert isinstance(geometry, pydrake.geometry.Shape)
assert isinstance(friction, CoulombFriction)
self.collision_geometry.append((tf, geometry, friction))
def register_clearance_geometry(self, tf, geometry):
assert isinstance(tf, torch.Tensor) and tf.shape == (4, 4)
assert isinstance(geometry, pydrake.geometry.Shape)
self.clearance_geometry.append((tf, geometry))
def sanity_check_node_tf_and_physics_geom_info(node):
assert isinstance(node.tf, torch.Tensor), type(node.tf)
assert node.tf.shape == (4, 4), node.tf.shape
assert isinstance(node.physics_geometry_info, PhysicsGeometryInfo), type(node.physics_geometry_info)
class DecayingForceToDesiredConfigSystem(LeafSystem):
''' Connect to a MBP to apply ghost forces (that decay over time)
to encourage the scene to settle near the desired configuration. '''
def __init__(self, mbp, q_des):
LeafSystem.__init__(self)
self.set_name('DecayingForceToDesiredConfigSystem')
self.robot_state_input_port = self.DeclareVectorInputPort(
"robot_state", BasicVector(mbp.num_positions() + mbp.num_velocities()))
forces_cls = Value[DrakeBindingList[ExternallyAppliedSpatialForce]]
self.spatial_forces_output_port = self.DeclareAbstractOutputPort(
"spatial_forces_vector",
lambda: forces_cls(),
self.DoCalcAbstractOutput)
self.mbp = mbp
self.q_des = q_des
self.mbp_current_context = mbp.CreateDefaultContext()
self.mbp_des_context = mbp.CreateDefaultContext()
self.mbp.SetPositions(self.mbp_des_context, self.q_des)
def DoCalcAbstractOutput(self, context, y_data):
t = context.get_time()
# Annealing schedule
force_multiplier = 10.0*np.exp(-0.5*t)*np.abs(np.cos(t*np.pi/2.))
x_in = self.EvalVectorInput(context, 0).get_value()
self.mbp.SetPositionsAndVelocities(self.mbp_current_context, x_in)
forces = []
for k in self.mbp.GetFloatingBaseBodies():
body = self.mbp.get_body(BodyIndex(k))
# Get pose of body in world frame
body_tf = self.mbp.GetFreeBodyPose(self.mbp_current_context, body)
body_r = body_tf.rotation().matrix()
body_tfd = self.mbp.EvalBodySpatialVelocityInWorld(self.mbp_current_context, body)
des_tf = self.mbp.GetFreeBodyPose(self.mbp_des_context, body)
delta_xyz = des_tf.translation() - body_tf.translation()
delta_r = des_tf.rotation().matrix().dot(body_tf.rotation().matrix().T)
# Get mass info so we can calc correct forces
si = body.CalcSpatialInertiaInBodyFrame(self.mbp_current_context)
m = si.get_mass()
I = si.CalcRotationalInertia().CopyToFullMatrix3()
I_w = body_tf.rotation().matrix().dot(I)
# Multiply out
aa = AngleAxis(delta_r)
tau = aa.axis()*aa.angle() - 0.1*body_tfd.rotational()
f = (delta_xyz*10. - 0.1*body_tfd.translational() + np.array([0., 0., 9.81])/max(1., force_multiplier))*m
max_force = 100.
max_torque = 100.
force = SpatialForce(
tau=np.clip(tau*force_multiplier, -max_torque, max_torque),
f=np.clip(f*force_multiplier, -max_force, max_force)
)
out = ExternallyAppliedSpatialForce()
out.F_Bq_W = force
out.body_index = body.index()
forces.append(out)
y_data.set_value(forces)
class StochasticLangevinForceSource(LeafSystem):
''' Connect to a MBP to apply ghost forces. The forces are:
1) Random noise whose magnitude decays with sim time.
2) A force proportional to the gradient of the log-prob of the
object poses w.r.t the log-prob of the scene tree.
This probably doesn't work for systems with tough inter-node
constraints like planar-ity. Need to do some reparameterization
of the system under sim for that to work?
MBP + scene_tree should be corresponded to each other through
the mbp construction method in this file.
'''
def __init__(self, mbp, scene_tree, node_to_free_body_ids_map, body_id_to_node_map):
LeafSystem.__init__(self)
self.set_name('StochasticLangevinForceSystem')
self.robot_state_input_port = self.DeclareVectorInputPort(
"robot_state", BasicVector(mbp.num_positions() + mbp.num_velocities()))
forces_cls = Value[DrakeBindingList[ExternallyAppliedSpatialForce]]
self.spatial_forces_output_port = self.DeclareAbstractOutputPort(
"spatial_forces_vector",
lambda: forces_cls(),
self.DoCalcAbstractOutput)
self.scene_tree = scene_tree
self.node_to_free_body_ids_map = node_to_free_body_ids_map
self.body_id_to_node_map = body_id_to_node_map
self.mbp = mbp
self.mbp_current_context = mbp.CreateDefaultContext()
for node, body_ids in self.node_to_free_body_ids_map.items():
for body_id in body_ids:
self.mbp.SetFreeBodyPose(self.mbp_current_context, self.mbp.get_body(body_id), torch_tf_to_drake_tf(node.tf))
def DoCalcAbstractOutput(self, context, y_data):
t = context.get_time()
# TODO: Hook up random input on the right kind of random port.
noise_scale = 0.25 * 0.25**t
ll_scale = 2.0 * 0.25**t
x_in = self.EvalVectorInput(context, 0).get_value()
self.mbp.SetPositionsAndVelocities(self.mbp_current_context, x_in)
# Copy state over to scene tree.
free_bodies = self.mbp.GetFloatingBaseBodies()
body_tf_vars = {}
for body_id, node in self.body_id_to_node_map.items():
if body_id not in free_bodies:
continue
tf_dec_var = drake_tf_to_torch_tf(self.mbp.GetFreeBodyPose(self.mbp_current_context, self.mbp.get_body(body_id)))
tf_dec_var.requires_grad = True
body_tf_vars[body_id] = tf_dec_var
node.tf = tf_dec_var
# Compute log prob and backprop.
score = self.scene_tree.score(include_discrete=False, include_continuous=True)
score.backward()
forces = []
for body_id in free_bodies:
body = self.mbp.get_body(body_id)
# Get pose of body in world frame
body_tf = self.mbp.GetFreeBodyPose(self.mbp_current_context, body)
body_tfd = self.mbp.EvalBodySpatialVelocityInWorld(self.mbp_current_context, body)
# Get mass info so we can calc correct force scaling
si = body.CalcSpatialInertiaInBodyFrame(self.mbp_current_context)
m = si.get_mass()
I = si.CalcRotationalInertia().CopyToFullMatrix3()
I_w = body_tf.rotation().matrix().dot(I)
# Calculate total wrench
# Noise term
f_noise = np.random.normal(0., noise_scale, size=3)
tau_noise = np.random.normal(0., noise_scale, size=3)
# Force maximizing log prob
t_grad = body_tf_vars[body_id].grad[:3, 3].numpy()
f_ll = t_grad*m
force = SpatialForce(
tau=tau_noise - 0.01*body_tfd.rotational(),
f=f_noise + f_ll*ll_scale - body_tfd.translational()*0.5
)
out = ExternallyAppliedSpatialForce()
out.F_Bq_W = force
out.body_index = body.index()
forces.append(out)
y_data.set_value(forces)
def resolve_catkin_package_path(package_map, input_str):
if "://" in input_str:
elements = input_str.split("://")
assert len(elements) == 2, "Malformed path " + input_str
package_name, path_in_package = elements
assert package_map.Contains(package_name), "%s not in package map" % package_name
return os.path.join(
package_map.GetPath(package_name),
path_in_package
)
else:
return input_str
def compile_scene_tree_clearance_geometry_to_mbp_and_sg(scene_tree, timestep=0.001, alpha=0.25):
builder = DiagramBuilder()
mbp, scene_graph = AddMultibodyPlantSceneGraph(
builder, MultibodyPlant(time_step=timestep))
parser = Parser(mbp)
parser.package_map().PopulateFromEnvironment("ROS_PACKAGE_PATH")
world_body = mbp.world_body()
free_body_poses = []
# For generating colors.
node_class_to_color_dict = {}
cmap = plt.cm.get_cmap('jet')
cmap_counter = 0.
for node in scene_tree.nodes:
if node.tf is not None and node.physics_geometry_info is not None:
# Don't have to do anything if this does not introduce geometry.
sanity_check_node_tf_and_physics_geom_info(node)
phys_geom_info = node.physics_geometry_info
has_clearance_geometry = len(phys_geom_info.clearance_geometry) > 0
if not has_clearance_geometry:
continue
# Add a body for this node and register the clearance geometry.
# TODO(gizatt) This tree body index is built in to disambiguate names.
# But I forsee a name-to-stuff resolution crisis when inference time comes...
# this might get resolved by the solution to that.
body = mbp.AddRigidBody(name=node.name,
M_BBo_B=phys_geom_info.spatial_inertia)
tf = torch_tf_to_drake_tf(node.tf)
mbp.SetDefaultFreeBodyPose(body, tf)
# Pick out a color for this class.
node_type_string = node.__class__.__name__
if node_type_string in node_class_to_color_dict.keys():
color = node_class_to_color_dict[node_type_string]
else:
color = list(cmap(cmap_counter))
color[3] = alpha
node_class_to_color_dict[node_type_string] = color
cmap_counter = np.fmod(cmap_counter + np.pi*2., 1.)
# Handle adding primitive geometry by adding it all to one
# mbp.
if len(phys_geom_info.clearance_geometry) > 0:
for k, (tf, geometry) in enumerate(phys_geom_info.clearance_geometry):
mbp.RegisterCollisionGeometry(
body=body,
X_BG=torch_tf_to_drake_tf(tf),
shape=geometry,
name=node.name + "_col_%03d" % k,
coulomb_friction=default_friction)
mbp.RegisterVisualGeometry(
body=body,
X_BG=torch_tf_to_drake_tf(tf),
shape=geometry,
name=node.name + "_vis_%03d" % k,
diffuse_color=color)
return builder, mbp, scene_graph
def build_nonpenetration_constraint(mbp, mbp_context_in_diagram, signed_distance_threshold):
''' Given an MBP/SG pair and a signed distance threshold, returns a constraint
function that takes a context and returns whether the MBP/SG in that configuration
has all bodies farther than the given threshold. '''
return MinimumDistanceConstraint(mbp, signed_distance_threshold, mbp_context_in_diagram)
def get_collisions(mbp, mbp_context_in_diagram):
# Essentially the same logic as in ik/MinimumDistanceConstraint's distances evaluation.
query_port = mbp.get_geometry_query_input_port()
assert query_port.HasValue(mbp_context_in_diagram), \
"Either the plant geometry_query_input_port() is not properly " \
"connected to the SceneGraph's output port, or the plant_context_ is " \
"incorrect. Please refer to AddMultibodyPlantSceneGraph on connecting " \
"MultibodyPlant to SceneGraph."
query_object = query_port.Eval(mbp_context_in_diagram)
return query_object.ComputePointPairPenetration()
def resolve_sg_proximity_id_to_mbp_id(sg, mbp, geometry_id):
for model_k in range(mbp.num_model_instances()):
model_k = ModelInstanceIndex(model_k)
for body_k in mbp.GetBodyIndices(model_k):
if geometry_id in mbp.GetCollisionGeometriesForBody(mbp.get_body(body_k)):
return model_k, body_k
raise ValueError("Geometry ID not registered by this MBP.")
def expand_container_tree(full_tree, new_tree, current_node):
# Given the original tree for reference and a new tree
# that contains the current node, gets the current node's
# children, adds them all (with approp connections) to the
# new tree, and recurses on the children.
# Does not recurse on children that are containers,
# but will still add them to the tree. (Containers should
# appear in the tree above *and* below them.)
for child in full_tree.successors(current_node):
new_tree.add_node(child)
new_tree.add_edge(current_node, child)
if (child.physics_geometry_info is not None and
child.physics_geometry_info.is_container):
continue
new_tree = expand_container_tree(full_tree, new_tree, child)
return new_tree
def split_tree_into_containers(scene_tree):
# The roots will be each container + the root
# of the overall tree.
roots = [node for node in scene_tree.nodes if
(len(list(scene_tree.predecessors(node))) == 0 or
(node.physics_geometry_info is not None and
node.physics_geometry_info.is_container))]
# Build the subtree from each root until it hits a terminal or
# or a container.
trees = []
for root in roots:
# Manually add the first
new_tree = nx.DiGraph()
new_tree.add_node(root)
trees.append(expand_container_tree(scene_tree, new_tree, root))
return trees
def compile_scene_tree_to_mbp_and_sg(scene_tree, timestep=0.001):
builder = DiagramBuilder()
mbp, scene_graph = AddMultibodyPlantSceneGraph(
builder, MultibodyPlant(time_step=timestep))
parser = Parser(mbp)
parser.package_map().PopulateFromEnvironment("ROS_PACKAGE_PATH")
world_body = mbp.world_body()
node_to_free_body_ids_map = {}
body_id_to_node_map = {}
free_body_poses = []
for node in scene_tree.nodes:
node_to_free_body_ids_map[node] = []
if node.tf is not None and node.physics_geometry_info is not None:
# Don't have to do anything if this does not introduce geometry.
sanity_check_node_tf_and_physics_geom_info(node)
phys_geom_info = node.physics_geometry_info
# Don't have to do anything if this does not introduce geometry.
has_models = len(phys_geom_info.model_paths) > 0
has_prim_geometry = (len(phys_geom_info.visual_geometry)
+ len(phys_geom_info.collision_geometry)) > 0
if not has_models and not has_prim_geometry:
continue
node_model_ids = []
# Handle adding primitive geometry by adding it all to one
# mbp.
if has_prim_geometry:
# Contain this primitive geometry in a model instance.
model_id = mbp.AddModelInstance(
node.name + "::model_%d" % len(node_model_ids))
# Add a body for this node, and register any of the
# visual and collision geometry available.
# TODO(gizatt) This tree body index is built in to disambiguate names.
# But I forsee a name-to-stuff resolution crisis when inference time comes...
# this might get resolved by the solution to that.
body = mbp.AddRigidBody(name=node.name, model_instance=model_id,
M_BBo_B=phys_geom_info.spatial_inertia)
body_id_to_node_map[body.index()] = node
tf = torch_tf_to_drake_tf(node.tf)
if phys_geom_info.fixed:
weld = mbp.WeldFrames(world_body.body_frame(),
body.body_frame(),
tf)
else:
node_to_free_body_ids_map[node].append(body.index())
mbp.SetDefaultFreeBodyPose(body, tf)
for k, (tf, geometry, color) in enumerate(phys_geom_info.visual_geometry):
mbp.RegisterVisualGeometry(
body=body,
X_BG=torch_tf_to_drake_tf(tf),
shape=geometry,
name=node.name + "_vis_%03d" % k,
diffuse_color=color)
for k, (tf, geometry, friction) in enumerate(phys_geom_info.collision_geometry):
mbp.RegisterCollisionGeometry(
body=body,
X_BG=torch_tf_to_drake_tf(tf),
shape=geometry,
name=node.name + "_col_%03d" % k,
coulomb_friction=friction)
# Handle adding each model from sdf/urdf.
if has_models:
for local_tf, model_path, root_body_name, q0_dict in phys_geom_info.model_paths:
model_id = parser.AddModelFromFile(
resolve_catkin_package_path(parser.package_map(), model_path),
node.name + "::" "model_%d" % len(node_model_ids))
if root_body_name is None:
root_body_ind_possibilities = mbp.GetBodyIndices(model_id)
assert len(root_body_ind_possibilities) == 1, \
"Please supply root_body_name for model with path %s" % model_path
root_body = mbp.get_body(root_body_ind_possibilities[0])
else:
root_body = mbp.GetBodyByName(
name=root_body_name,
model_instance=model_id)
body_id_to_node_map[root_body.index()] = node
node_tf = torch_tf_to_drake_tf(node.tf)
full_model_tf = node_tf.multiply(torch_tf_to_drake_tf(local_tf))
if phys_geom_info.fixed:
mbp.WeldFrames(world_body.body_frame(),
root_body.body_frame(),
full_model_tf)
else:
node_to_free_body_ids_map[node].append(root_body.index())
mbp.SetDefaultFreeBodyPose(root_body, full_model_tf)
# Handle initial joint state
if q0_dict is not None:
for joint_name in list(q0_dict.keys()):
q0_this = q0_dict[joint_name]
joint = mbp.GetMutableJointByName(
joint_name, model_instance=model_id)
# Reshape to make Drake happy.
q0_this = q0_this.reshape(joint.num_positions(), 1)
joint.set_default_positions(q0_this)
return builder, mbp, scene_graph, node_to_free_body_ids_map, body_id_to_node_map
def project_tree_to_feasibility(tree, constraints=[], jitter_q=None, do_forward_sim=False, zmq_url=None, prefix="projection", timestep=0.001, T=1.):
# Mutates tree into tree with bodies in closest
# nonpenetrating configuration.
builder, mbp, sg, node_to_free_body_ids_map, body_id_to_node_map = \
compile_scene_tree_to_mbp_and_sg(tree, timestep=timestep)
mbp.Finalize()
# Connect visualizer if requested. Wrap carefully to keep it
# from spamming the console.
if zmq_url is not None:
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
visualizer = ConnectMeshcatVisualizer(builder, sg, zmq_url=zmq_url, prefix=prefix)
diagram = builder.Build()
diagram_context = diagram.CreateDefaultContext()
mbp_context = diagram.GetMutableSubsystemContext(mbp, diagram_context)
q0 = mbp.GetPositions(mbp_context)
nq = len(q0)
if nq == 0:
logging.warn("Generated MBP had no positions.")
return tree
# Set up projection NLP.
ik = InverseKinematics(mbp, mbp_context)
q_dec = ik.q()
prog = ik.prog()
# It's always a projection, so we always have this
# Euclidean norm error between the optimized q and
# q0.
prog.AddQuadraticErrorCost(np.eye(nq), q0, q_dec)
# Nonpenetration constraint.
ik.AddMinimumDistanceConstraint(0.01)
# Other requested constraints.
for constraint in constraints:
constraint.add_to_ik_prog(tree, ik, mbp, mbp_context, node_to_free_body_ids_map)
# Initial guess, which can be slightly randomized by request.
q_guess = q0
if jitter_q:
q_guess = q_guess + np.random.normal(0., jitter_q, size=q_guess.size)
prog.SetInitialGuess(q_dec, q_guess)
# Solve.
solver = SnoptSolver()
options = SolverOptions()
logfile = "/tmp/snopt.log"
os.system("rm %s" % logfile)
options.SetOption(solver.id(), "Print file", logfile)
options.SetOption(solver.id(), "Major feasibility tolerance", 1E-3)
options.SetOption(solver.id(), "Major optimality tolerance", 1E-3)
options.SetOption(solver.id(), "Major iterations limit", 300)
result = solver.Solve(prog, None, options)
if not result.is_success():
logging.warn("Projection failed.")
print("Logfile: ")
with open(logfile) as f:
print(f.read())
qf = result.GetSolution(q_dec)
mbp.SetPositions(mbp_context, qf)
# If forward sim is requested, do a quick forward sim to get to
# a statically stable config.
if do_forward_sim:
sim = Simulator(diagram, diagram_context)
sim.set_target_realtime_rate(1000.)
sim.AdvanceTo(T)
# Reload poses back into tree
free_bodies = mbp.GetFloatingBaseBodies()
for body_id, node in body_id_to_node_map.items():
if body_id not in free_bodies:
continue
node.tf = drake_tf_to_torch_tf(mbp.GetFreeBodyPose(mbp_context, mbp.get_body(body_id)))
return tree
def project_tree_to_feasibility_via_sim(tree, constraints=[], zmq_url=None, prefix="projection", timestep=0.0005, T=10.):
# Mutates tree into tree with bodies in closest
# nonpenetrating configuration.
builder, mbp, sg, node_to_free_body_ids_map, body_id_to_node_map = \
compile_scene_tree_to_mbp_and_sg(tree, timestep=timestep)
mbp.Finalize()
# Connect visualizer if requested. Wrap carefully to keep it
# from spamming the console.
if zmq_url is not None:
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
visualizer = ConnectMeshcatVisualizer(builder, sg, zmq_url=zmq_url, prefix=prefix)
# Forward sim under langevin forces
force_source = builder.AddSystem(
DecayingForceToDesiredConfigSystem(mbp, mbp.GetPositions(mbp.CreateDefaultContext()))
)
builder.Connect(mbp.get_state_output_port(),
force_source.get_input_port(0))
builder.Connect(force_source.get_output_port(0),
mbp.get_applied_spatial_force_input_port())
diagram = builder.Build()
diagram_context = diagram.CreateDefaultContext()
mbp_context = diagram.GetMutableSubsystemContext(mbp, diagram_context)
q0 = mbp.GetPositions(mbp_context)
nq = len(q0)
if nq == 0:
logging.warn("Generated MBP had no positions.")
return tree
# Make 'safe' initial configuration that randomly arranges objects vertically
k = 0
all_pos = []
for node in tree:
for body_id in node_to_free_body_ids_map[node]:
body = mbp.get_body(body_id)
tf = mbp.GetFreeBodyPose(mbp_context, body)
tf = RigidTransform(p=tf.translation() + np.array([0., 0., 1. + k*0.5]), R=tf.rotation())
mbp.SetFreeBodyPose(mbp_context, body, tf)
k += 1
sim = Simulator(diagram, diagram_context)
sim.set_target_realtime_rate(1000)
sim.AdvanceTo(T)
# Reload poses back into tree
free_bodies = mbp.GetFloatingBaseBodies()
for body_id, node in body_id_to_node_map.items():
if body_id not in free_bodies:
continue
node.tf = drake_tf_to_torch_tf(mbp.GetFreeBodyPose(mbp_context, mbp.get_body(body_id)))
return tree
def rejection_sample_structure_to_feasibility(
tree, constraints=[], max_n_iters=100,
do_forward_sim=False, timestep=0.001, T=1.):
# Pre-build prog to check ourselves against
builder, mbp, sg, node_to_free_body_ids_map, body_id_to_node_map = \
compile_scene_tree_to_mbp_and_sg(tree, timestep=timestep)
mbp.Finalize()
floating_base_bodies = mbp.GetFloatingBaseBodies()
diagram = builder.Build()
diagram_context = diagram.CreateDefaultContext()
mbp_context = diagram.GetMutableSubsystemContext(mbp, diagram_context)
q0 = mbp.GetPositions(mbp_context)
nq = len(q0)
# Set up projection NLP.
ik = InverseKinematics(mbp, mbp_context)
q_dec = ik.q()
prog = ik.prog()
# Nonpenetration constraint.
ik.AddMinimumDistanceConstraint(0.001)
# Other requested constraints.
for constraint in constraints:
constraint.add_to_ik_prog(tree, ik, mbp, mbp_context, node_to_free_body_ids_map)
from pyro.contrib.autoname import scope
best_q = q0
best_violation = np.inf
for k in range(max_n_iters):
node_queue = [tree.get_root()]
while len(node_queue) > 0:
parent = node_queue.pop(0)
children, rules = tree.get_children_and_rules(parent)
for child, rule in zip(children, rules):
with scope(prefix=parent.name):
rule.sample_child(parent, child)
node_queue.append(child)
for node, body_ids in node_to_free_body_ids_map.items():
for body_id in body_ids:
mbp.SetFreeBodyPose(mbp_context, mbp.get_body(body_id), torch_tf_to_drake_tf(node.tf))
q = mbp.GetPositions(mbp_context)
all_bindings = prog.GetAllConstraints()
satisfied = prog.CheckSatisfied(all_bindings, q)
if satisfied:
return tree, True
# Otherwise compute violation
evals = np.concatenate([binding.evaluator().Eval(q).flatten() for binding in all_bindings])
lbs = np.concatenate([binding.evaluator().lower_bound().flatten() for binding in all_bindings])
ubs = np.concatenate([binding.evaluator().upper_bound().flatten() for binding in all_bindings])
viols = np.maximum(np.clip(lbs - evals, 0., np.inf), np.clip(evals-ubs, 0., np.inf))
total_violation = np.sum(viols)
if total_violation < best_violation:
print("Updating best viol to ", best_violation)
best_violation = total_violation
best_q = q
# Load best q into tree
mbp.SetPositions(mbp_context, q)
for body_id, node in body_id_to_node_map.items():
if body_id in floating_base_bodies:
node.tf = drake_tf_to_torch_tf(mbp.GetFreeBodyPose(mbp_context, mbp.get_body(body_id)))
return tree, False
def simulate_scene_tree(scene_tree, T, timestep=0.001, target_realtime_rate=1.0, meshcat=None):
builder, mbp, scene_graph, _, _ = compile_scene_tree_to_mbp_and_sg(
scene_tree, timestep=timestep)
mbp.Finalize()
if meshcat:
visualizer = ConnectMeshcatVisualizer(builder, scene_graph,
zmq_url=meshcat)
diagram = builder.Build()
diag_context = diagram.CreateDefaultContext()
sim = Simulator(diagram)
sim.set_target_realtime_rate(target_realtime_rate)
sim.AdvanceTo(T)
|
python
|
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TestModel(models.Model):
field1 = models.CharField(max_length=255)
field2 = models.IntegerField()
def __str__(self):
return '%s%d' % (self.field1, self.field2)
@python_2_unicode_compatible
class RelatedToTestModel(models.Model):
field = models.ForeignKey(TestModel, on_delete=models.CASCADE)
def __str__(self):
return self.field
@python_2_unicode_compatible
class SearchableTestModel(models.Model):
field1 = models.CharField(max_length=255)
field2 = models.IntegerField()
def __str__(self):
return '%s%d' % (self.field1, self.field2)
@staticmethod
def autocomplete_search_fields():
return 'field1'
|
python
|
# -*- coding: utf-8 -*-
import torch.nn as nn
import torchvision
def densenet(n_classes, pretrained=False, n_layers=121, **kwargs):
'''
Creates a DenseNet based on the parameters
Arguments:
n_layers: The number of hidden layers
n_classes: The number of classes/labels
pretrained: Boolean value indicating whether the pretrained densenet shoudl be used
Returns:
A DenseNet model
'''
dnet = None
if n_layers == 121:
dnet = torchvision.models.densenet121(pretrained=pretrained, **kwargs)
elif n_layers == 161:
dnet = torchvision.models.densenet169(pretrained=pretrained, **kwargs)
elif n_layers == 201:
dnet = torchvision.models.densenet201(pretrained=pretrained, **kwargs)
num_features = dnet.classifier.in_features
dnet.classifier = nn.Sequential(
nn.Linear(num_features, num_features),
nn.Dropout(p=0.1),
nn.Linear(num_features, n_classes),
nn.Sigmoid(),
)
return dnet
def resnet(num_classes, pretrained=False, n_layers=50, **kwargs):
'''
Creates a DenseNet based on the parameters
Arguments:
n_layers: The number of hidden layers
n_classes: The number of classes/labels
pretrained: Boolean value indicating whether the pretrained densenet shoudl be used
Returns:
A DenseNet model
'''
rnet = torchvision.models.resnet50(pretrained=pretrained)
if n_layers == 50:
rnet = torchvision.models.resnet50(pretrained=pretrained, **kwargs)
elif n_layers == 101:
rnet = torchvision.models.resnet50(pretrained=pretrained, **kwargs)
elif n_layers == 152:
rnet = torchvision.models.resnet50(pretrained=pretrained, **kwargs)
num_features = rnet.fc.in_features
rnet.fc = nn.Sequential(
nn.Linear(num_features, num_features),
nn.ReLU(inplace=True),
nn.Dropout(p=0.1),
nn.Linear(num_features, num_classes),
nn.Sigmoid(),
)
return rnet
|
python
|
#hwIo.py
#A part of NonVisual Desktop Access (NVDA)
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
#Copyright (C) 2015-2018 NV Access Limited, Babbage B.V.
"""Raw input/output for braille displays via serial and HID.
See the L{Serial} and L{Hid} classes.
Braille display drivers must be thread-safe to use this, as it utilises a background thread.
See L{braille.BrailleDisplayDriver.isThreadSafe}.
"""
import sys
import ctypes
from ctypes import byref
from ctypes.wintypes import DWORD, USHORT
from typing import Optional, Any, Union, Tuple, Callable
import serial
from serial.win32 import OVERLAPPED, FILE_FLAG_OVERLAPPED, INVALID_HANDLE_VALUE, ERROR_IO_PENDING, COMMTIMEOUTS, CreateFile, SetCommTimeouts
import winKernel
import braille
from logHandler import log
import config
import time
LPOVERLAPPED_COMPLETION_ROUTINE = ctypes.WINFUNCTYPE(None, DWORD, DWORD, serial.win32.LPOVERLAPPED)
def _isDebug():
return config.conf["debugLog"]["hwIo"]
class IoBase(object):
"""Base class for raw I/O.
This watches for data of a specified size and calls a callback when it is received.
"""
def __init__(
self,
fileHandle: Union[ctypes.wintypes.HANDLE],
onReceive: Callable[[bytes], None],
writeFileHandle: Optional[ctypes.wintypes.HANDLE] = None,
onReceiveSize: int = 1
):
"""Constructor.
@param fileHandle: A handle to an open I/O device opened for overlapped I/O.
If L{writeFileHandle} is specified, this is only for input.
The serial implementation uses a _port_handle member for this argument.
@param onReceive: A callable taking the received data as its only argument.
@param writeFileHandle: A handle to an open output device opened for overlapped I/O.
@param onReceiveSize: The size (in bytes) of the data with which to call C{onReceive}.
"""
self._file = fileHandle
self._writeFile = writeFileHandle if writeFileHandle is not None else fileHandle
self._onReceive = onReceive
self._readSize = onReceiveSize
self._readBuf = ctypes.create_string_buffer(onReceiveSize)
self._readOl = OVERLAPPED()
self._recvEvt = winKernel.createEvent()
self._ioDoneInst = LPOVERLAPPED_COMPLETION_ROUTINE(self._ioDone)
self._writeOl = OVERLAPPED()
# Do the initial read.
@winKernel.PAPCFUNC
def init(param):
self._initApc = None
self._asyncRead()
# Ensure the APC stays alive until it runs.
self._initApc = init
braille._BgThread.queueApc(init)
def waitForRead(self, timeout:Union[int, float]) -> bool:
"""Wait for a chunk of data to be received and processed.
This will return after L{onReceive} has been called or when the timeout elapses.
@param timeout: The maximum time to wait in seconds.
@return: C{True} if received data was processed before the timeout,
C{False} if not.
"""
timeout= int(timeout*1000)
while True:
curTime = time.time()
res = winKernel.waitForSingleObjectEx(self._recvEvt, timeout, True)
if res==winKernel.WAIT_OBJECT_0:
return True
elif res==winKernel.WAIT_TIMEOUT:
if _isDebug():
log.debug("Wait timed out")
return False
elif res==winKernel.WAIT_IO_COMPLETION:
if _isDebug():
log.debug("Waiting interrupted by completed i/o")
timeout -= int((time.time()-curTime)*1000)
def _prepareWriteBuffer(self, data: bytes) -> Tuple[int, ctypes.c_char_p]:
""" Private helper method to allow derived classes to prepare buffers in different ways"""
size = len(data)
return (
size,
ctypes.create_string_buffer(data) # this will append a null char, which is intentional
)
def write(self, data: bytes):
if not isinstance(data, bytes):
raise TypeError("Expected argument 'data' to be of type 'bytes'")
if _isDebug():
log.debug("Write: %r" % data)
size, data = self._prepareWriteBuffer(data)
if not ctypes.windll.kernel32.WriteFile(self._writeFile, data, size, None, byref(self._writeOl)):
if ctypes.GetLastError() != ERROR_IO_PENDING:
if _isDebug():
log.debug("Write failed: %s" % ctypes.WinError())
raise ctypes.WinError()
byteData = DWORD()
ctypes.windll.kernel32.GetOverlappedResult(self._writeFile, byref(self._writeOl), byref(byteData), True)
def close(self):
if _isDebug():
log.debug("Closing")
self._onReceive = None
if hasattr(self, "_file") and self._file is not INVALID_HANDLE_VALUE:
ctypes.windll.kernel32.CancelIoEx(self._file, byref(self._readOl))
if hasattr(self, "_writeFile") and self._writeFile not in (self._file, INVALID_HANDLE_VALUE):
ctypes.windll.kernel32.CancelIoEx(self._writeFile, byref(self._readOl))
winKernel.closeHandle(self._recvEvt)
def __del__(self):
try:
self.close()
except AttributeError:
if _isDebug():
log.debugWarning("Couldn't delete object gracefully", exc_info=True)
def _asyncRead(self):
# Wait for _readSize bytes of data.
# _ioDone will call onReceive once it is received.
# onReceive can then optionally read additional bytes if it knows these are coming.
ctypes.windll.kernel32.ReadFileEx(self._file, self._readBuf, self._readSize, byref(self._readOl), self._ioDoneInst)
def _ioDone(self, error, numberOfBytes: int, overlapped):
if not self._onReceive:
# close has been called.
self._ioDone = None
return
elif error != 0:
raise ctypes.WinError(error)
self._notifyReceive(self._readBuf[:numberOfBytes])
winKernel.kernel32.SetEvent(self._recvEvt)
self._asyncRead()
def _notifyReceive(self, data: bytes):
"""Called when data is received.
The base implementation just calls the onReceive callback provided to the constructor.
This can be extended to perform tasks before/after the callback.
@type data: bytes
"""
if not isinstance(data, bytes):
raise TypeError("Expected argument 'data' to be of type 'bytes'")
if _isDebug():
log.debug("Read: %r" % data)
try:
self._onReceive(data)
except:
log.error("", exc_info=True)
class Serial(IoBase):
"""Raw I/O for serial devices.
This extends pyserial to call a callback when data is received.
"""
def __init__(
self,
*args,
onReceive: Callable[[bytes], None],
**kwargs):
"""Constructor.
Pass the arguments you would normally pass to L{serial.Serial}.
There is also one additional required keyword argument.
@param onReceive: A callable taking a byte of received data as its only argument.
This callable can then call C{read} to get additional data if desired.
"""
self._ser = None
self.port = args[0] if len(args) >= 1 else kwargs["port"]
if _isDebug():
log.debug("Opening port %s" % self.port)
try:
self._ser = serial.Serial(*args, **kwargs)
except Exception as e:
if _isDebug():
log.debug("Open failed: %s" % e)
raise
self._origTimeout = self._ser.timeout
# We don't want a timeout while we're waiting for data.
self._setTimeout(None)
super(Serial, self).__init__(self._ser._port_handle, onReceive)
def read(self, size=1) -> bytes:
data = self._ser.read(size)
if _isDebug():
log.debug("Read: %r" % data)
return data
def write(self, data: bytes):
if _isDebug():
log.debug("Write: %r" % data)
self._ser.write(data)
def close(self):
if not self._ser:
return
super(Serial, self).close()
self._ser.close()
def _notifyReceive(self, data: bytes):
# Set the timeout for onReceive in case it does a sync read.
self._setTimeout(self._origTimeout)
super(Serial, self)._notifyReceive(data)
self._setTimeout(None)
def _setTimeout(self, timeout: Optional[int]):
# #6035: pyserial reconfigures all settings of the port when setting a timeout.
# This can cause error 'Cannot configure port, some setting was wrong.'
# Therefore, manually set the timeouts using the Win32 API.
# Adapted from pyserial 3.4.
timeouts = COMMTIMEOUTS()
if timeout is not None:
if timeout == 0:
timeouts.ReadIntervalTimeout = serial.win32.MAXDWORD
else:
timeouts.ReadTotalTimeoutConstant = max(int(timeout * 1000), 1)
if timeout != 0 and self._ser._inter_byte_timeout is not None:
timeouts.ReadIntervalTimeout = max(int(self._ser._inter_byte_timeout * 1000), 1)
if self._ser._write_timeout is not None:
if self._ser._write_timeout == 0:
timeouts.WriteTotalTimeoutConstant = serial.win32.MAXDWORD
else:
timeouts.WriteTotalTimeoutConstant = max(int(self._ser._write_timeout * 1000), 1)
SetCommTimeouts(self._ser._port_handle, ctypes.byref(timeouts))
class HIDP_CAPS (ctypes.Structure):
_fields_ = (
("Usage", USHORT),
("UsagePage", USHORT),
("InputReportByteLength", USHORT),
("OutputReportByteLength", USHORT),
("FeatureReportByteLength", USHORT),
("Reserved", USHORT * 17),
("NumberLinkCollectionNodes", USHORT),
("NumberInputButtonCaps", USHORT),
("NumberInputValueCaps", USHORT),
("NumberInputDataIndices", USHORT),
("NumberOutputButtonCaps", USHORT),
("NumberOutputValueCaps", USHORT),
("NumberOutputDataIndices", USHORT),
("NumberFeatureButtonCaps", USHORT),
("NumberFeatureValueCaps", USHORT),
("NumberFeatureDataIndices", USHORT)
)
class Hid(IoBase):
"""Raw I/O for HID devices.
"""
_featureSize: int
def __init__(self, path: str, onReceive: Callable[[bytes], None], exclusive: bool = True):
"""Constructor.
@param path: The device path.
This can be retrieved using L{hwPortUtils.listHidDevices}.
@param onReceive: A callable taking a received input report as its only argument.
@param exclusive: Whether to block other application's access to this device.
"""
if _isDebug():
log.debug("Opening device %s" % path)
handle = CreateFile(
path,
winKernel.GENERIC_READ | winKernel.GENERIC_WRITE,
0 if exclusive else winKernel.FILE_SHARE_READ|winKernel.FILE_SHARE_WRITE,
None,
winKernel.OPEN_EXISTING,
FILE_FLAG_OVERLAPPED,
None
)
if handle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open failed: %s" % ctypes.WinError())
raise ctypes.WinError()
pd = ctypes.c_void_p()
if not ctypes.windll.hid.HidD_GetPreparsedData(handle, byref(pd)):
raise ctypes.WinError()
caps = HIDP_CAPS()
ctypes.windll.hid.HidP_GetCaps(pd, byref(caps))
ctypes.windll.hid.HidD_FreePreparsedData(pd)
if _isDebug():
log.debug("Report byte lengths: input %d, output %d, feature %d"
% (caps.InputReportByteLength, caps.OutputReportByteLength,
caps.FeatureReportByteLength))
self._featureSize = caps.FeatureReportByteLength
self._writeSize = caps.OutputReportByteLength
# Reading any less than caps.InputReportByteLength is an error.
super(Hid, self).__init__(handle, onReceive,
onReceiveSize=caps.InputReportByteLength
)
def _prepareWriteBuffer(self, data: bytes) -> Tuple[int, ctypes.c_char_p]:
""" For HID devices, the buffer to be written must match the
OutputReportByteLength fetched from HIDP_CAPS, to ensure this is the case
we create a buffer of that size. We also check that data is not bigger than
the write size, which we do not currently support. If it becomes necessary to
support this, we could split the data and send it several chunks.
"""
# On Windows 7, writing any less than caps.OutputReportByteLength is also an error.
# See also: http://www.onarm.com/forum/20152/
if len(data) > self._writeSize:
log.error(u"Attempting to send a buffer larger than supported.")
raise RuntimeError("Unable to send buffer of: %d", len(data))
return (
self._writeSize,
ctypes.create_string_buffer(data, self._writeSize)
)
def getFeature(self, reportId: bytes) -> bytes:
"""Get a feature report from this device.
@param reportId: The report id.
@return: The report, including the report id.
"""
buf = ctypes.create_string_buffer(reportId, size=self._featureSize)
if not ctypes.windll.hid.HidD_GetFeature(self._file, buf, self._featureSize):
if _isDebug():
log.debug("Get feature %r failed: %s"
% (reportId, ctypes.WinError()))
raise ctypes.WinError()
if _isDebug():
log.debug("Get feature: %r" % buf.raw)
return buf.raw
def setFeature(self, report: bytes) -> None:
"""Send a feature report to this device.
@param report: The report, including its id.
"""
buf = ctypes.create_string_buffer(report, size=len(report))
bufSize = ctypes.sizeof(buf)
if _isDebug():
log.debug("Set feature: %r" % report)
result = ctypes.windll.hid.HidD_SetFeature(
self._file,
buf,
bufSize
)
if not result:
if _isDebug():
log.debug("Set feature failed: %s" % ctypes.WinError())
raise ctypes.WinError()
def setOutputReport(self, report: bytes) -> None:
"""
Write the given report to the device using HidD_SetOutputReport.
This is instead of using the standard WriteFile which may freeze with some USB HID implementations.
@param report: The report, including its id.
"""
buf = ctypes.create_string_buffer(report, size=len(report))
bufSize = ctypes.sizeof(buf)
if _isDebug():
log.debug("Set output report: %r" % report)
result = ctypes.windll.hid.HidD_SetOutputReport(
self._writeFile,
buf,
bufSize
)
if not result:
if _isDebug():
log.debug("Set output report failed: %s" % ctypes.WinError())
raise ctypes.WinError()
def close(self):
super(Hid, self).close()
winKernel.closeHandle(self._file)
self._file = None
class Bulk(IoBase):
"""Raw I/O for bulk USB devices.
This implementation assumes that the used Bulk device has two separate end points for input and output.
"""
def __init__(
self, path: str, epIn: int, epOut: int,
onReceive: Callable[[bytes], None],
onReceiveSize: int = 1
):
"""Constructor.
@param path: The device path.
@param epIn: The endpoint to read data from.
@param epOut: The endpoint to write data to.
@param onReceive: A callable taking a received input report as its only argument.
"""
if _isDebug():
log.debug("Opening device %s" % path)
readPath="{path}\\{endpoint}".format(path=path,endpoint=epIn)
writePath="{path}\\{endpoint}".format(path=path,endpoint=epOut)
readHandle = CreateFile(readPath, winKernel.GENERIC_READ,
0, None, winKernel.OPEN_EXISTING, FILE_FLAG_OVERLAPPED, None)
if readHandle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open read handle failed: %s" % ctypes.WinError())
raise ctypes.WinError()
writeHandle = CreateFile(writePath, winKernel.GENERIC_WRITE,
0, None, winKernel.OPEN_EXISTING, FILE_FLAG_OVERLAPPED, None)
if writeHandle == INVALID_HANDLE_VALUE:
if _isDebug():
log.debug("Open write handle failed: %s" % ctypes.WinError())
raise ctypes.WinError()
super(Bulk, self).__init__(readHandle, onReceive,
writeFileHandle=writeHandle, onReceiveSize=onReceiveSize)
def close(self):
super(Bulk, self).close()
if hasattr(self, "_file") and self._file is not INVALID_HANDLE_VALUE:
winKernel.closeHandle(self._file)
if hasattr(self, "_writeFile") and self._writeFile is not INVALID_HANDLE_VALUE:
winKernel.closeHandle(self._writeFile)
def boolToByte(arg: bool) -> bytes:
return arg.to_bytes(
length=1,
byteorder=sys.byteorder, # for a single byte big/little endian does not matter.
signed=False # Since this represents length, it makes no sense to send a negative value.
)
def intToByte(arg: int) -> bytes:
""" Convert an int (value < 256) to a single byte bytes object
"""
return arg.to_bytes(
length=1, # Will raise if value overflows, eg arg > 255
byteorder=sys.byteorder, # for a single byte big/little endian does not matter.
signed=False # Since this represents length, it makes no sense to send a negative value.
)
def getByte(arg: bytes, index: int) -> bytes:
""" Return the single byte at index"""
return arg[index:index+1]
|
python
|
from finta import TA
import scipy as sp
from scipy.signal import argrelextrema
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import yfinance as yf
from collections import defaultdict
class Loss(object):
def __init__(self,method, sup, res):
self.method = method
self.sup = sup
self.res = res
def __repr__(self):
return f"{self.method} sup:{self.sup} res:{self.res}"
class Kline(object):
def __init__(self):
self.buy = [self.double_bottom, self.hammer_head]
self.sell = []
def double_bottom(self, row):
pass
def hammer_head(self, row):
#when decreasing
pass
class MA(object): #using different combinations
def __init__(self,yahoo_tick):
self.days = [5,10,20,50,80,120,180,200]
self.fibdays = [8,21,34,55,89,144,233]
self.madict = {} #key:{day:pddf} #everything in here is shown in the plot
self.plotma = [] #ma lines to be plotted
self.maxdaylength = max(max(self.days), max(self.fibdays))
self.expected_num = 20
self.shiftAmount = 7 #how many max to look back
self.chosen_sup ={}
self.chosen_res = {}
self.half_hist = yahoo_tick.history(period=f"60d", interval="30m")
self.hour_hist = yahoo_tick.history(period=f"60d", interval="1h")
self.__hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1d")
self.week_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="5d")
self.month_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1mo")
self.get_MA(self.hist)
def refresh(self, choice=None):
if choice=="hour":
self.hour_hist = yahoo_tick.history(period=f"60d", interval="1h")
elif choice == "half":
self.half_hist = yahoo_tick.history(period=f"60d", interval="30m")
elif choice == "week":
self.week_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="5d")
elif choice == "month":
self.month_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1mo")
elif choice == "day":
self.__hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1d")
else:
self.half_hist = yahoo_tick.history(period=f"60d", interval="30m")
self.hour_hist = yahoo_tick.history(period=f"60d", interval="1h")
self.__hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1d")
self.week_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="5d")
self.month_hist = yahoo_tick.history(period=f"{self.maxdaylength*2}d", interval="1mo")
@property
def hist(self):
return self.__hist
@hist.setter
def hist(self, value):
self.__hist = value
self.get_MA(value)
def get_MA(self, hist):
new_cols = [x.lower() for x in hist.columns]
hist.columns = new_cols
for day in self.days:
self.madict[f"SMA_{day}"] = TA.SMA(hist, day)
self.madict[f"SMA_{day}"].name = f"SMA_{day}"
for day in self.days:
self.madict[f"WMA_{day}"] = TA.WMA(hist, day)
for day in self.days:
self.madict[f"HMA_{day}"] = TA.HMA(hist, day)
for day in self.days:
self.madict[f"EMA_{day}"] = TA.EMA(hist, day)
for day in self.fibdays:
self.madict[f"SMA_{day}F"] = TA.SMA(hist, day)
for day in self.fibdays:
self.madict[f"WMA_{day}F"] = TA.WMA(hist, day)
for day in self.fibdays:
self.madict[f"HMA_{day}F"] = TA.HMA(hist, day)
for day in self.fibdays:
self.madict[f"EMA_{day}F"] = TA.EMA(hist, day)
mavalue = self.madict[f"SMA_50"]
sma50 = np.argwhere(np.isnan(mavalue.values)).flatten()
curve_point = np.setxor1d(np.argwhere(np.diff(np.sign(self.madict[f"SMA_20"].values - mavalue.values))).flatten(), sma50)
indexs = mavalue.iloc[curve_point]
dff = self.madict[f"SMA_50"].to_frame('50dfvalue')
dff["inter"] = np.nan
dff.loc[indexs.index,"inter"] = mavalue.loc[indexs.index]
self.overlap_sma2050 = dff["inter"]
#print(dff)
'''
plt.plot(self.madict[f"SMA_50"].index, self.madict[f"SMA_50"] , "-o")
plt.plot(self.madict[f"SMA_20"].index, self.madict[f"SMA_20"] , "-o")
plt.plot(self.overlap_sma2050.index, self.overlap_sma2050, "o")
plt.show()
exit()
'''
def findATR(self, hist):
result= TA.ATR(hist)
return result.dropna()
def findRSI(self, hist):
result = TA.RSI(hist, 14)
return result.dropna()
def update_hists(self, row_data):
#update half, hour, day, week, month
pass
def find_max_min(self, hist): #find all the max points and all the min points
arr_size = len(hist.Bottom)
expected_width = arr_size // self.expected_num // 2
print('expected width of peaks: ', expected_width)
maximaIdxs = sp.signal.find_peaks_cwt(hist.Bottom, np.linspace(2, expected_width, 10))
minimaIdxs = sp.signal.find_peaks_cwt(-1*hist.Bottom, np.linspace(2, expected_width, 10))
hist["critical"] = ""
old_index = hist.index.name
hist = hist.reset_index()
hist.loc[minimaIdxs, "critical"] = "min"
hist.loc[maximaIdxs, "critical"] = "max"
hist = hist.set_index(old_index)
hist = self.findSupandRes(hist)
return hist
def findSupandRes(self, hist):
lossvalue = {}
for method, series in self.madict.items():
series.name = method
old_index = hist.index
if hist.index.name == "Datetime":
hist.index = hist.index.date
hist = pd.merge(hist, series,left_index=True, right_index=True)
hist.index = old_index
min_df = hist[hist["critical"] == "min"]
min_df_shift = pd.DataFrame(index=min_df.index)
for i in range(1, self.shiftAmount+1):
min_df_shift[[f'{method}_{i}', f'Bottom_{i}']] = min_df[[method, "Bottom"]].shift(i)
#min_df_shift[[f'{method}_{-i}', f'Bottom_{-i}']] = min_df[[method, "Bottom"]].shift(-i, fill_value=0)
max_df = hist[hist["critical"] == "max"]
max_df_shift = pd.DataFrame(index=max_df.index)
for i in range(1, self.shiftAmount+1):
max_df_shift[[f'{method}_{i}', f'Bottom_{i}']] = max_df[[method, "Bottom"]].shift(i)
#max_df_shift[[f'{method}_{-i}', f'Bottom_{-i}']] = max_df[[method, "Bottom"]].shift(-i, fill_value=0)
sup_cond, res_cond = self.findcondition(hist, method, min_df_shift, max_df_shift)
hist.loc[min_df.index, f'{method}valuemin'] = sup_cond
hist.loc[max_df.index, f'{method}valuemax'] = res_cond
sup_count = hist.loc[min_df.index, f'{method}valuemin'].sum()
res_count = hist.loc[max_df.index, f'{method}valuemax'].sum()
self.chosen_sup[method] = sup_cond
self.chosen_res[method] = res_cond
lossvalue[method] = Loss(method, sup_count, res_count)
print(lossvalue)
key_min_sup = max(lossvalue, key=lambda k: lossvalue[k].sup)
key_min_res = max(lossvalue, key=lambda k: lossvalue[k].res)
supMethod = lossvalue[key_min_sup].method
resMethod = lossvalue[key_min_res].method
sup_cond = self.chosen_sup[supMethod]
index_v = sup_cond[sup_cond==1].index
plt.plot(sup_cond[sup_cond==1].index, hist.loc[index_v, "close"], "o", markersize=12, label="sup")
res_cond = self.chosen_res[resMethod]
index_v = res_cond[res_cond==1].index
plt.plot(res_cond[res_cond==1].index, hist.loc[index_v, "close"], "o", markersize=12, label="res")
print(f"{lossvalue[key_min_sup].method} sup:{lossvalue[key_min_sup].sup}")
print(f"{lossvalue[key_min_res].method} res:{lossvalue[key_min_res].res}")
if lossvalue[key_min_sup].sup != 0:
self.plotma.append(lossvalue[key_min_sup].method)
if lossvalue[key_min_res].res != 0:
self.plotma.append(lossvalue[key_min_res].method)
#self.plotma.append("HMA_80")
return hist
def findcondition(self, hist, method, min_df, max_df):
col_names = min_df.columns
min_df["all_met"] = min_df.apply(lambda row : self.filter_condition(row, "min", col_names), axis = 1)
max_df["all_met"] = max_df.apply(lambda row : self.filter_condition(row, "max", col_names), axis = 1)
sup_cond = (hist.loc[min_df.index, method] <= hist.loc[min_df.index, 'Bottom']) & \
(hist.loc[min_df.index, method] >= hist.loc[min_df.index, 'low'])& \
min_df["all_met"]
#(hist.loc[min_df.index, method] >= hist.loc[min_df.index, 'Bottom'] - hist.loc[min_df.index, 'Bottom']*self.threshold) & \
res_cond = (hist.loc[max_df.index, method] >= hist.loc[max_df.index, 'Bottom']) & \
(hist.loc[max_df.index, method] <= hist.loc[max_df.index, 'high']) & \
max_df["all_met"]
#(hist.loc[max_df.index, method] <= hist.loc[max_df.index, 'Bottom'] + hist.loc[max_df.index, 'Bottom']*self.threshold) & \
return sup_cond.astype(int), res_cond.astype(int)
def filter_condition(self, row, target, col_names):
results = []
if target == "min":
for i, value in enumerate(row):
if i % 2 == 0:
results.append(value <= row[col_names[i+1]])
if target == "max":
for i, value in enumerate(row):
if i % 2 == 0:
results.append(value >= row[col_names[i+1]])
return np.all(results)
if __name__ == "__main__":
smoothing = 3
window = 10
yahoo_tick = yf.Ticker("SENS")
myMA = MA(yahoo_tick)
ticks = ["SENS", "GIK", "NNDM", "SPY"]
ema_list = [5]
window_list = [5]
results = myMA.screener(ticks, ema_list, window_list, plot=True, results=True)
print(results)
plt.show()
#minmax = myMA.get_max_min(smoothing, window)
#print(minmax)
|
python
|
# This file is Copyright (c) 2010 by the GPSD project
# BSD terms apply: see the file COPYING in the distribution root for details.
#
# Creates build/lib.linux-${arch}-${pyvers}/gpspacket.so,
# where ${arch} is an architecture and ${pyvers} is a Python version.
from distutils.core import setup, Extension
import os
import sys
# For VPATH builds, this script must be run from $(srcdir) with the
# abs_builddir environment variable set to the location of the build
# directory. This is necessary because Python's distutils package
# does not have built-in support for VPATH builds.
# These dependencies are enforced here and not in the Makefile to make
# it easier to build the Python parts without building everything else
# (the user can run 'python setup.py' without having to run 'make').
needed_files = ['gpsd.h', 'packet_names.h']
created_files = []
manpages = []
try:
where = sys.argv.index('--mangenerator')
# Doesn't matter what it is, just that we have one
if sys.argv[where+1]:
manpages=[('share/man/man1', ['gpscat.1', 'gpsfake.1','gpsprof.1',
'xgps.1', 'xgpsspeed.1'])]
print("Installing manual pages, generator is %s" %( sys.argv[where+1]))
sys.argv = sys.argv[:where] + sys.argv[where+2:]
except ValueError:
pass
if not manpages:
print("No XML processor, omitting manual-page installation.")
MAKE = ("MAKE" in os.environ) and os.environ["MAKE"] or "make"
if not 'clean' in sys.argv:
abs_builddir = ("abs_builddir" in os.environ) and os.environ["abs_builddir"] or ""
if not os.path.exists(os.path.join(abs_builddir, 'gpsd_config.h')):
sys.stderr.write('\nPlease run configure first!\n')
sys.exit(1)
cdcmd = abs_builddir and ("cd '" + abs_builddir + "' && ") or ""
for f_name in needed_files:
# TODO: Shouldn't make be run unconditionally in case a
# dependency of f_name has been updated?
if not os.path.exists(os.path.join(abs_builddir, f_name)):
cmd = cdcmd + MAKE + " '" + f_name + "'"
print(cmd)
make_out = os.popen(cmd)
print(make_out.read())
if make_out.close():
sys.exit(1)
created_files.append(f_name)
gpspacket_sources = ["gpspacket.c", "packet.c", "isgps.c",
"driver_rtcm2.c", "strl.c", "hex.c", "crc24q.c"]
include_dirs = [ os.path.realpath(os.path.dirname(__file__)) ]
version_out = os.popen(MAKE + " -s version")
version = version_out.read()
print(version)
if version_out.close():
sys.exit(1)
version = version.split('\n')[-2]
version = version.strip()
setup( name="gps",
version=version,
description='Python libraries for the gpsd service daemon',
url="http://gpsd.berlios.de/",
author='the GPSD project',
author_email="[email protected]",
license="BSD",
ext_modules=[
Extension("gps.packet", gpspacket_sources, include_dirs=include_dirs),
Extension("gps.clienthelpers", ["gpsclient.c", "geoid.c", "gpsdclient.c", "strl.c"], include_dirs=include_dirs)
],
packages = ['gps'],
scripts = ['gpscat','gpsfake','gpsprof', 'xgps', 'xgpsspeed'],
data_files= manpages
)
|
python
|
"""
The :class:`Signature` object and associated functionality. This
provides a way to represent rich callable objects and type check
calls.
"""
from collections import defaultdict
from .error_code import ErrorCode
from .stacked_scopes import (
AndConstraint,
Composite,
Constraint,
ConstraintType,
NULL_CONSTRAINT,
AbstractConstraint,
Varname,
)
from .value import (
AnnotatedValue,
AsyncTaskIncompleteValue,
CanAssignContext,
GenericValue,
HasAttrExtension,
HasAttrGuardExtension,
KnownValue,
ParameterTypeGuardExtension,
SequenceIncompleteValue,
DictIncompleteValue,
TypeGuardExtension,
TypeVarValue,
TypedDictValue,
UNRESOLVED_VALUE,
Value,
TypeVarMap,
CanAssign,
CanAssignError,
extract_typevars,
stringify_object,
unify_typevar_maps,
unite_values,
)
import ast
import asynq
import collections.abc
from dataclasses import dataclass, field
from functools import reduce
from types import MethodType, FunctionType
import inspect
import qcore
from qcore.helpers import safe_str
from typing import (
Any,
Iterable,
NamedTuple,
Optional,
ClassVar,
Union,
Callable,
Dict,
List,
Set,
TypeVar,
Tuple,
TYPE_CHECKING,
)
from typing_extensions import Literal
if TYPE_CHECKING:
from .name_check_visitor import NameCheckVisitor
EMPTY = inspect.Parameter.empty
ARGS = qcore.MarkerObject("*args")
KWARGS = qcore.MarkerObject("**kwargs")
# Representation of a single argument to a call. Second member is
# None for positional args, str for keyword args, ARGS for *args, KWARGS
# for **kwargs.
Argument = Tuple[Composite, Union[None, str, Literal[ARGS], Literal[KWARGS]]]
class ImplReturn(NamedTuple):
"""Return value of :term:`impl` functions.
These functions return either a single :class:`pyanalyze.value.Value`
object, indicating what the function returns, or an instance of this class.
"""
return_value: Value
"""The return value of the function."""
constraint: AbstractConstraint = NULL_CONSTRAINT
"""A :class:`pyanalyze.stacked_scopes.Constraint` indicating things that are true
if the function returns a truthy value."""
no_return_unless: AbstractConstraint = NULL_CONSTRAINT
"""A :class:`pyanalyze.stacked_scopes.Constraint` indicating things that are true
unless the function does not return."""
@dataclass
class CallContext:
"""The context passed to an :term:`impl` function."""
vars: Dict[str, Value]
"""Dictionary of variable names passed to the function."""
visitor: "NameCheckVisitor"
"""Using the visitor can allow various kinds of advanced logic
in impl functions."""
bound_args: inspect.BoundArguments
node: ast.AST
"""AST node corresponding to the function call. Useful for
showing errors."""
def ast_for_arg(self, arg: str) -> Optional[ast.AST]:
composite = self.composite_for_arg(arg)
if composite is not None:
return composite.node
return None
def varname_for_arg(self, arg: str) -> Optional[Varname]:
"""Return a :term:`varname` corresponding to the given function argument.
This is useful for creating a :class:`pyanalyze.stacked_scopes.Constraint`
referencing the argument.
"""
composite = self.composite_for_arg(arg)
if composite is not None:
return composite.varname
return None
def composite_for_arg(self, arg: str) -> Optional[Composite]:
composite = self.bound_args.arguments.get(arg)
if isinstance(composite, Composite):
return composite
return None
def show_error(
self,
message: str,
error_code: ErrorCode = ErrorCode.incompatible_call,
*,
arg: Optional[str] = None,
node: Optional[ast.AST] = None,
detail: Optional[str] = None,
) -> None:
"""Show an error.
If the `arg` parameter is given, we attempt to find the
AST for that argument to the function and point the error
to it.
"""
node = None
if arg is not None:
node = self.ast_for_arg(arg)
if node is None:
node = self.node
self.visitor.show_error(node, message, error_code=error_code, detail=detail)
Impl = Callable[[CallContext], Union[Value, ImplReturn]]
class SigParameter(inspect.Parameter):
"""Wrapper around :class:`inspect.Parameter` that stores annotations
as :class:`pyanalyze.value.Value` objects."""
__slots__ = ()
def __init__(
self,
name: str,
kind: inspect._ParameterKind = inspect.Parameter.POSITIONAL_OR_KEYWORD,
*,
default: Union[None, Value, Literal[EMPTY]] = None,
annotation: Union[None, Value, Literal[EMPTY]] = None,
) -> None:
if default is None:
default_composite = EMPTY
elif isinstance(default, Value):
default_composite = Composite(default, None, None)
else:
default_composite = default
if annotation is None:
annotation = EMPTY
super().__init__(name, kind, default=default_composite, annotation=annotation)
def substitute_typevars(self, typevars: TypeVarMap) -> "SigParameter":
if self._annotation is EMPTY:
annotation = self._annotation
else:
annotation = self._annotation.substitute_typevars(typevars)
return SigParameter(
name=self._name,
kind=self._kind,
default=self._default,
annotation=annotation,
)
def get_annotation(self) -> Value:
if self.annotation is EMPTY:
return UNRESOLVED_VALUE
return self.annotation
def __str__(self) -> str:
# Adapted from Parameter.__str__
kind = self.kind
formatted = self._name
if self._annotation is not EMPTY:
formatted = f"{formatted}: {self._annotation}"
if self._default is not EMPTY:
if self._annotation is not EMPTY:
formatted = f"{formatted} = {self._default.value}"
else:
formatted = f"{formatted}={self._default.value}"
if kind is SigParameter.VAR_POSITIONAL:
formatted = "*" + formatted
elif kind is SigParameter.VAR_KEYWORD:
formatted = "**" + formatted
return formatted
@dataclass
class Signature:
"""Represents the signature of a Python callable.
This is used to type check function calls and it powers the
:class:`pyanalyze.value.CallableValue` class.
"""
_return_key: ClassVar[str] = "%return"
signature: inspect.Signature
"""The underlying :class:`inspect.Signature`, storing the parameters
and the return annotation."""
impl: Optional[Impl] = field(default=None, compare=False)
""":term:`impl` function for this signature."""
callable: Optional[Callable[..., Any]] = field(default=None, compare=False)
"""The callable that this signature represents."""
is_asynq: bool = False
"""Whether this signature represents an asynq function."""
has_return_annotation: bool = True
is_ellipsis_args: bool = False
"""Whether this signature represents a ``Callable[..., T]`` callable. Such
a callable is compatible with any other callable with a compatible return type."""
allow_call: bool = False
"""Whether type checking can call the actual function to retrieve a precise return value."""
typevars_of_params: Dict[str, List["TypeVar"]] = field(
init=False, default_factory=dict, repr=False, compare=False
)
all_typevars: Set["TypeVar"] = field(
init=False, default_factory=set, repr=False, compare=False
)
def __post_init__(self) -> None:
for param_name, param in self.signature.parameters.items():
if param.annotation is EMPTY:
continue
typevars = list(extract_typevars(param.annotation))
if typevars:
self.typevars_of_params[param_name] = typevars
if self.signature.return_annotation is not EMPTY:
return_typevars = list(extract_typevars(self.signature.return_annotation))
if return_typevars:
self.typevars_of_params[self._return_key] = return_typevars
self.all_typevars = {
typevar
for tv_list in self.typevars_of_params.values()
for typevar in tv_list
}
def _check_param_type_compatibility(
self,
param: SigParameter,
var_value: Value,
visitor: "NameCheckVisitor",
node: ast.AST,
typevar_map: TypeVarMap,
) -> bool:
if param.annotation is not EMPTY and not (
isinstance(param.default, Composite) and var_value is param.default.value
):
if typevar_map:
param_typ = param.annotation.substitute_typevars(typevar_map)
else:
param_typ = param.annotation
tv_map = param_typ.can_assign(var_value, visitor)
if isinstance(tv_map, CanAssignError):
visitor.show_error(
node,
f"Incompatible argument type for {param.name}: expected {param_typ}"
f" but got {var_value}",
ErrorCode.incompatible_argument,
detail=str(tv_map),
)
return False
return True
def _translate_bound_arg(self, argument: Any) -> Value:
if argument is EMPTY:
return UNRESOLVED_VALUE
elif isinstance(argument, Composite):
return argument.value
elif isinstance(argument, tuple):
return SequenceIncompleteValue(
tuple, [composite.value for composite in argument]
)
elif isinstance(argument, dict):
return DictIncompleteValue(
[
(KnownValue(key), composite.value)
for key, composite in argument.items()
]
)
else:
raise TypeError(repr(argument))
def _apply_annotated_constraints(
self, raw_return: Union[Value, ImplReturn], bound_args: inspect.BoundArguments
) -> ImplReturn:
if isinstance(raw_return, Value):
ret = ImplReturn(raw_return)
else:
ret = raw_return
constraints = []
if ret.constraint is not NULL_CONSTRAINT:
constraints.append(ret.constraint)
if isinstance(ret.return_value, AnnotatedValue):
for guard in ret.return_value.get_metadata_of_type(
ParameterTypeGuardExtension
):
if guard.varname in bound_args.arguments:
composite = bound_args.arguments[guard.varname]
if (
isinstance(composite, Composite)
and composite.varname is not None
):
constraint = Constraint(
composite.varname,
ConstraintType.is_value_object,
True,
guard.guarded_type,
)
constraints.append(constraint)
for guard in ret.return_value.get_metadata_of_type(TypeGuardExtension):
# This might miss some cases where we should use the second argument instead. We'll
# have to come up with additional heuristics if that comes up.
if isinstance(self.callable, MethodType) or (
isinstance(self.callable, FunctionType)
and self.callable.__name__ != self.callable.__qualname__
):
index = 1
else:
index = 0
composite = bound_args.args[index]
if isinstance(composite, Composite) and composite.varname is not None:
constraint = Constraint(
composite.varname,
ConstraintType.is_value_object,
True,
guard.guarded_type,
)
constraints.append(constraint)
for guard in ret.return_value.get_metadata_of_type(HasAttrGuardExtension):
if guard.varname in bound_args.arguments:
composite = bound_args.arguments[guard.varname]
if (
isinstance(composite, Composite)
and composite.varname is not None
):
constraint = Constraint(
composite.varname,
ConstraintType.add_annotation,
True,
HasAttrExtension(
guard.attribute_name, guard.attribute_type
),
)
constraints.append(constraint)
if constraints:
constraint = reduce(AndConstraint, constraints)
else:
constraint = NULL_CONSTRAINT
return ImplReturn(ret.return_value, constraint, ret.no_return_unless)
def check_call(
self, args: Iterable[Argument], visitor: "NameCheckVisitor", node: ast.AST
) -> ImplReturn:
"""Type check a call to this Signature with the given arguments.
This may call the :term:`impl` function or the underlying callable,
but normally just uses :meth:`inspect.Signature.bind`.
"""
call_args = []
call_kwargs = {}
for composite, label in args:
if label is None:
call_args.append(composite)
elif isinstance(label, str):
call_kwargs[label] = composite
elif label is ARGS or label is KWARGS:
# TODO handle these:
# - type check that they are iterables/mappings
# - if it's a KnownValue or SequenceIncompleteValue, just add to call_args
# - else do something smart to still typecheck the call
return ImplReturn(UNRESOLVED_VALUE)
if self.is_ellipsis_args:
if self.allow_call:
runtime_return = self._maybe_perform_call(
call_args, call_kwargs, visitor, node
)
if runtime_return is not None:
return ImplReturn(runtime_return)
return_value = self.signature.return_annotation
if return_value is EMPTY:
return ImplReturn(UNRESOLVED_VALUE)
return ImplReturn(return_value)
try:
bound_args = self.signature.bind(*call_args, **call_kwargs)
except TypeError as e:
if self.callable is not None:
message = f"In call to {stringify_object(self.callable)}: {e}"
else:
message = str(e)
visitor.show_error(node, message, ErrorCode.incompatible_call)
return ImplReturn(UNRESOLVED_VALUE)
bound_args.apply_defaults()
variables = {
name: self._translate_bound_arg(value)
for name, value in bound_args.arguments.items()
}
return_value = self.signature.return_annotation
typevar_values: Dict[TypeVar, Value] = {}
if self.all_typevars:
tv_possible_values: Dict[TypeVar, List[Value]] = defaultdict(list)
for param_name in self.typevars_of_params:
if param_name == self._return_key:
continue
var_value = variables[param_name]
param = self.signature.parameters[param_name]
if param.annotation is EMPTY:
continue
tv_map = param.annotation.can_assign(var_value, visitor)
if not isinstance(tv_map, CanAssignError):
# For now, the first assignment wins.
for typevar, value in tv_map.items():
tv_possible_values[typevar].append(value)
typevar_values = {
typevar: unite_values(
*tv_possible_values.get(typevar, [UNRESOLVED_VALUE])
)
for typevar in self.all_typevars
}
if self._return_key in self.typevars_of_params:
return_value = return_value.substitute_typevars(typevar_values)
had_error = False
for name, var_value in variables.items():
param = self.signature.parameters[name]
if not self._check_param_type_compatibility(
param, var_value, visitor, node, typevar_values
):
had_error = True
# don't call the implementation function if we had an error, so that
# the implementation function doesn't have to worry about basic
# type checking
if not had_error and self.impl is not None:
ctx = CallContext(
vars=variables, visitor=visitor, bound_args=bound_args, node=node
)
return_value = self.impl(ctx)
if self.allow_call:
runtime_return = self._maybe_perform_call(
call_args, call_kwargs, visitor, node
)
if runtime_return is not None:
if isinstance(return_value, ImplReturn):
return_value = ImplReturn(
runtime_return,
return_value.constraint,
return_value.no_return_unless,
)
else:
return_value = runtime_return
if return_value is EMPTY:
return ImplReturn(UNRESOLVED_VALUE)
else:
return self._apply_annotated_constraints(return_value, bound_args)
def _maybe_perform_call(
self,
call_args: List[Composite],
call_kwargs: Dict[str, Composite],
visitor: "NameCheckVisitor",
node: ast.AST,
) -> Optional[Value]:
if self.callable is None:
return None
args = []
for composite in call_args:
if isinstance(composite.value, KnownValue):
args.append(composite.value.val)
else:
return None
kwargs = {}
for key, composite in call_kwargs.items():
if isinstance(composite.value, KnownValue):
kwargs[key] = composite.value.val
else:
return None
try:
value = self.callable(*args, **kwargs)
except Exception as e:
message = f"Error calling {self}: {safe_str(e)}"
visitor._show_error_if_checking(node, message, ErrorCode.incompatible_call)
return None
else:
return KnownValue(value)
def can_assign(self, other: "Signature", ctx: CanAssignContext) -> CanAssign:
"""Equivalent of :meth:`pyanalyze.value.Value.can_assign`. Checks
whether another ``Signature`` is compatible with this ``Signature``.
"""
if self.is_asynq and not other.is_asynq:
return CanAssignError("callable is not asynq")
their_return = other.signature.return_annotation
my_return = self.signature.return_annotation
return_tv_map = my_return.can_assign(their_return, ctx)
if isinstance(return_tv_map, CanAssignError):
return CanAssignError(
"return annotation is not compatible", [return_tv_map]
)
if self.is_ellipsis_args or other.is_ellipsis_args:
return {}
tv_maps = [return_tv_map]
their_params = list(other.signature.parameters.values())
their_args = other.get_param_of_kind(SigParameter.VAR_POSITIONAL)
if their_args is not None:
their_args_index = their_params.index(their_args)
args_annotation = their_args.get_annotation()
else:
their_args_index = -1
args_annotation = None
their_kwargs = other.get_param_of_kind(SigParameter.VAR_KEYWORD)
if their_kwargs is not None:
kwargs_annotation = their_kwargs.get_annotation()
else:
kwargs_annotation = None
consumed_positional = set()
consumed_keyword = set()
for i, my_param in enumerate(self.signature.parameters.values()):
my_annotation = my_param.get_annotation()
if my_param.kind is SigParameter.POSITIONAL_ONLY:
if i < len(their_params) and their_params[i].kind in (
SigParameter.POSITIONAL_ONLY,
SigParameter.POSITIONAL_OR_KEYWORD,
):
if (
my_param.default is not EMPTY
and their_params[i].default is EMPTY
):
return CanAssignError(
f"positional-only param {my_param.name!r} has no default"
)
their_annotation = their_params[i].get_annotation()
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of positional-only parameter {my_param.name!r} is"
" incompatible",
[tv_map],
)
tv_maps.append(tv_map)
consumed_positional.add(their_params[i].name)
elif args_annotation is not None:
new_tv_maps = can_assign_var_positional(
my_param, args_annotation, i - their_args_index, ctx
)
if isinstance(new_tv_maps, CanAssignError):
return new_tv_maps
tv_maps += new_tv_maps
else:
return CanAssignError(
f"positional-only parameter {i} is not accepted"
)
elif my_param.kind is SigParameter.POSITIONAL_OR_KEYWORD:
if (
i < len(their_params)
and their_params[i].kind is SigParameter.POSITIONAL_OR_KEYWORD
):
if my_param.name != their_params[i].name:
return CanAssignError(
f"param name {their_params[i].name!r} does not match"
f" {my_param.name!r}"
)
if (
my_param.default is not EMPTY
and their_params[i].default is EMPTY
):
return CanAssignError(f"param {my_param.name!r} has no default")
their_annotation = their_params[i].get_annotation()
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
consumed_positional.add(their_params[i].name)
consumed_keyword.add(their_params[i].name)
elif (
i < len(their_params)
and their_params[i].kind is SigParameter.POSITIONAL_ONLY
):
return CanAssignError(
f"parameter {my_param.name!r} is not accepted as a keyword"
" argument"
)
elif args_annotation is not None and kwargs_annotation is not None:
new_tv_maps = can_assign_var_positional(
my_param, args_annotation, i - their_args_index, ctx
)
if isinstance(new_tv_maps, CanAssignError):
return new_tv_maps
tv_maps += new_tv_maps
new_tv_maps = can_assign_var_keyword(
my_param, kwargs_annotation, ctx
)
if isinstance(new_tv_maps, CanAssignError):
return new_tv_maps
tv_maps += new_tv_maps
else:
return CanAssignError(
f"parameter {my_param.name!r} is not accepted"
)
elif my_param.kind is SigParameter.KEYWORD_ONLY:
their_param = other.signature.parameters.get(my_param.name)
if their_param is not None and their_param.kind in (
SigParameter.POSITIONAL_OR_KEYWORD,
SigParameter.KEYWORD_ONLY,
):
if my_param.default is not EMPTY and their_param.default is EMPTY:
return CanAssignError(
f"keyword-only param {my_param.name!r} has no default"
)
their_annotation = their_param.get_annotation()
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
consumed_keyword.add(their_param.name)
elif kwargs_annotation is not None:
new_tv_maps = can_assign_var_keyword(
my_param, kwargs_annotation, ctx
)
if isinstance(new_tv_maps, CanAssignError):
return new_tv_maps
tv_maps += new_tv_maps
else:
return CanAssignError(
f"parameter {my_param.name!r} is not accepted"
)
elif my_param.kind is SigParameter.VAR_POSITIONAL:
if args_annotation is None:
return CanAssignError("*args are not accepted")
tv_map = args_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError("type of *args is incompatible", [tv_map])
tv_maps.append(tv_map)
extra_positional = [
param
for param in their_params
if param.name not in consumed_positional
and param.kind
in (
SigParameter.POSITIONAL_ONLY,
SigParameter.POSITIONAL_OR_KEYWORD,
)
]
for extra_param in extra_positional:
tv_map = extra_param.get_annotation().can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of param {extra_param.name!r} is incompatible with "
"*args type",
[tv_map],
)
tv_maps.append(tv_map)
elif my_param.kind is SigParameter.VAR_KEYWORD:
if kwargs_annotation is None:
return CanAssignError("**kwargs are not accepted")
tv_map = kwargs_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError("type of **kwargs is incompatible", [tv_map])
tv_maps.append(tv_map)
extra_keyword = [
param
for param in their_params
if param.name not in consumed_keyword
and param.kind
in (SigParameter.KEYWORD_ONLY, SigParameter.POSITIONAL_OR_KEYWORD)
]
for extra_param in extra_keyword:
tv_map = extra_param.get_annotation().can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of param {extra_param.name!r} is incompatible with "
"**kwargs type",
[tv_map],
)
tv_maps.append(tv_map)
return unify_typevar_maps(tv_maps)
def get_param_of_kind(self, kind: inspect._ParameterKind) -> Optional[SigParameter]:
for param in self.signature.parameters.values():
if param.kind is kind:
return param
return None
def substitute_typevars(self, typevars: TypeVarMap) -> "Signature":
return Signature(
signature=inspect.Signature(
[
param.substitute_typevars(typevars)
for param in self.signature.parameters.values()
],
return_annotation=self.signature.return_annotation.substitute_typevars(
typevars
),
),
impl=self.impl,
callable=self.callable,
is_asynq=self.is_asynq,
has_return_annotation=self.has_return_annotation,
is_ellipsis_args=self.is_ellipsis_args,
allow_call=self.allow_call,
)
def walk_values(self) -> Iterable[Value]:
yield from self.signature.return_annotation.walk_values()
for param in self.signature.parameters.values():
if param.annotation is not EMPTY:
yield from param.annotation.walk_values()
def get_asynq_value(self) -> "Signature":
"""Return the :class:`Signature` for the `.asynq` attribute of an
:class:`pyanalyze.extensions.AsynqCallable`."""
if not self.is_asynq:
raise TypeError("get_asynq_value() is only supported for AsynqCallable")
return_annotation = AsyncTaskIncompleteValue(
asynq.AsyncTask, self.signature.return_annotation
)
return Signature.make(
self.signature.parameters.values(),
return_annotation,
impl=self.impl,
callable=self.callable,
has_return_annotation=self.has_return_annotation,
is_ellipsis_args=self.is_ellipsis_args,
is_asynq=False,
allow_call=self.allow_call,
)
@classmethod
def make(
cls,
parameters: Iterable[SigParameter],
return_annotation: Optional[Value] = None,
*,
impl: Optional[Impl] = None,
callable: Optional[object] = None,
has_return_annotation: bool = True,
is_ellipsis_args: bool = False,
is_asynq: bool = False,
allow_call: bool = False,
) -> "Signature":
"""Create a :class:`Signature` object.
This is more convenient to use than the constructor
because it abstracts away the creation of the underlying
:class:`inspect.Signature`.
"""
if return_annotation is None:
return_annotation = UNRESOLVED_VALUE
has_return_annotation = False
return cls(
signature=inspect.Signature(
parameters, return_annotation=return_annotation
),
impl=impl,
callable=callable,
has_return_annotation=has_return_annotation,
is_ellipsis_args=is_ellipsis_args,
is_asynq=is_asynq,
allow_call=allow_call,
)
def __str__(self) -> str:
param_str = ", ".join(self._render_parameters())
asynq_str = "@asynq " if self.is_asynq else ""
rendered = f"{asynq_str}({param_str})"
if self.signature.return_annotation is not EMPTY:
rendered += f" -> {self.signature.return_annotation}"
return rendered
def _render_parameters(self) -> Iterable[str]:
# Adapted from Signature's own __str__
if self.is_ellipsis_args:
yield "..."
return
render_pos_only_separator = False
render_kw_only_separator = True
for param in self.signature.parameters.values():
formatted = str(param)
kind = param.kind
if kind == SigParameter.POSITIONAL_ONLY:
render_pos_only_separator = True
elif render_pos_only_separator:
yield "/"
render_pos_only_separator = False
if kind == SigParameter.VAR_POSITIONAL:
render_kw_only_separator = False
elif kind == SigParameter.KEYWORD_ONLY and render_kw_only_separator:
yield "*"
render_kw_only_separator = False
yield formatted
if render_pos_only_separator:
yield "/"
# TODO: do we need these?
def has_return_value(self) -> bool:
return self.has_return_annotation
@property
def return_value(self) -> Value:
return self.signature.return_annotation
ANY_SIGNATURE = Signature.make(
[], UNRESOLVED_VALUE, is_ellipsis_args=True, is_asynq=True
)
""":class:`Signature` that should be compatible with any other
:class:`Signature`."""
@dataclass
class BoundMethodSignature:
"""Signature for a method bound to a particular value."""
signature: Signature
self_composite: Composite
return_override: Optional[Value] = None
def check_call(
self, args: Iterable[Argument], visitor: "NameCheckVisitor", node: ast.AST
) -> ImplReturn:
ret = self.signature.check_call(
[(self.self_composite, None), *args], visitor, node
)
if self.return_override is not None and not self.signature.has_return_value():
return ImplReturn(
self.return_override, ret.constraint, ret.no_return_unless
)
return ret
def get_signature(self, *, preserve_impl: bool = False) -> Optional[Signature]:
if self.signature.is_ellipsis_args:
return ANY_SIGNATURE
params = list(self.signature.signature.parameters.values())
if not params or params[0].kind not in (
SigParameter.POSITIONAL_ONLY,
SigParameter.POSITIONAL_OR_KEYWORD,
):
return None
return Signature(
signature=inspect.Signature(
params[1:], return_annotation=self.return_value
),
# We don't carry over the implementation function by default, because it
# may not work when passed different arguments.
impl=self.signature.impl if preserve_impl else None,
callable=self.signature.callable,
is_asynq=self.signature.is_asynq,
has_return_annotation=self.has_return_value(),
is_ellipsis_args=self.signature.is_ellipsis_args,
allow_call=self.signature.allow_call,
)
def has_return_value(self) -> bool:
if self.return_override is not None:
return True
return self.signature.has_return_value()
@property
def return_value(self) -> Value:
if self.signature.has_return_value():
return self.signature.return_value
if self.return_override is not None:
return self.return_override
return UNRESOLVED_VALUE
def substitute_typevars(self, typevars: TypeVarMap) -> "BoundMethodSignature":
return BoundMethodSignature(
self.signature.substitute_typevars(typevars),
self.self_composite.substitute_typevars(typevars),
self.return_override.substitute_typevars(typevars)
if self.return_override is not None
else None,
)
@dataclass
class PropertyArgSpec:
"""Pseudo-argspec for properties."""
obj: object
return_value: Value = UNRESOLVED_VALUE
def check_call(
self, args: Iterable[Argument], visitor: "NameCheckVisitor", node: ast.AST
) -> ImplReturn:
raise TypeError("property object is not callable")
def has_return_value(self) -> bool:
return self.return_value is not UNRESOLVED_VALUE
def substitute_typevars(self, typevars: TypeVarMap) -> "PropertyArgSpec":
return PropertyArgSpec(
self.obj, self.return_value.substitute_typevars(typevars)
)
MaybeSignature = Union[None, Signature, BoundMethodSignature, PropertyArgSpec]
def make_bound_method(
argspec: MaybeSignature,
self_composite: Composite,
return_override: Optional[Value] = None,
) -> Optional[BoundMethodSignature]:
if argspec is None:
return None
if isinstance(argspec, Signature):
return BoundMethodSignature(argspec, self_composite, return_override)
elif isinstance(argspec, BoundMethodSignature):
if return_override is None:
return_override = argspec.return_override
return BoundMethodSignature(argspec.signature, self_composite, return_override)
else:
assert False, f"invalid argspec {argspec}"
T = TypeVar("T")
IterableValue = GenericValue(collections.abc.Iterable, [TypeVarValue(T)])
K = TypeVar("K")
V = TypeVar("V")
MappingValue = GenericValue(collections.abc.Mapping, [TypeVarValue(K), TypeVarValue(V)])
def can_assign_var_positional(
my_param: SigParameter, args_annotation: Value, idx: int, ctx: CanAssignContext
) -> Union[List[TypeVarMap], CanAssignError]:
tv_maps = []
my_annotation = my_param.get_annotation()
if isinstance(args_annotation, SequenceIncompleteValue):
length = len(args_annotation.members)
if idx >= length:
return CanAssignError(
f"parameter {my_param.name!r} is not accepted; {args_annotation} only"
f" accepts {length} values"
)
their_annotation = args_annotation.members[idx]
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible: *args[{idx}]"
" type is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
else:
tv_map = IterableValue.can_assign(args_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"{args_annotation} is not an iterable type", [tv_map]
)
iterable_arg = tv_map.get(T, UNRESOLVED_VALUE)
tv_map = iterable_arg.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible: "
"*args type is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
return tv_maps
def can_assign_var_keyword(
my_param: SigParameter, kwargs_annotation: Value, ctx: CanAssignContext
) -> Union[List[TypeVarMap], CanAssignError]:
my_annotation = my_param.get_annotation()
tv_maps = []
if isinstance(kwargs_annotation, TypedDictValue):
if my_param.name not in kwargs_annotation.items:
return CanAssignError(
f"parameter {my_param.name!r} is not accepted by {kwargs_annotation}"
)
their_annotation = kwargs_annotation.items[my_param.name]
tv_map = their_annotation.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible:"
f" *kwargs[{my_param.name!r}] type is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
else:
mapping_tv_map = MappingValue.can_assign(kwargs_annotation, ctx)
if isinstance(mapping_tv_map, CanAssignError):
return CanAssignError(
f"{kwargs_annotation} is not a mapping type", [mapping_tv_map]
)
key_arg = mapping_tv_map.get(K, UNRESOLVED_VALUE)
tv_map = key_arg.can_assign(KnownValue(my_param.name), ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"parameter {my_param.name!r} is not accepted by **kwargs type",
[tv_map],
)
tv_maps.append(tv_map)
value_arg = mapping_tv_map.get(V, UNRESOLVED_VALUE)
tv_map = value_arg.can_assign(my_annotation, ctx)
if isinstance(tv_map, CanAssignError):
return CanAssignError(
f"type of parameter {my_param.name!r} is incompatible: **kwargs type"
" is incompatible",
[tv_map],
)
tv_maps.append(tv_map)
return tv_maps
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with [email protected]
"""PhoneHome server listening to requests from deployed instances to test E2E network connectivity.
Usage:
{prog}
Environment:
SANITY_CHECKS_SETTINGS (Optional) Path to settings file
TEST_PHONEHOME_LOGGING (Optional) Path to logging configuration file
TEST_PHONEHOME_ENDPOINT (Optional) PhoneHome service endpoint
Files:
etc/settings.json Default settings file
etc/logging_phonehome.conf Default logging configuration file
"""
from commons.constants import PROPERTIES_CONFIG_TEST, PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT, \
PHONEHOME_DBUS_OBJECT_PATH, PHONEHOME_DBUS_OBJECT_METADATA_PATH, PHONEHOME_TX_ID_HEADER, \
DEFAULT_PHONEHOME_LOGGING_CONF, DEFAULT_SETTINGS_FILE
from os import environ
from dbus_phonehome_service import DbusPhoneHomeServer
from cherrypy import _cperror
import cherrypy
import httplib
import logging
import json
import sys
import urlparse
import logging.config
import os.path
import uuid
# Global DBus server instance
dbus_server = None
# Global logger
logger = None
class PhoneHome:
exposed = True
@cherrypy.tools.accept(media='text/plain')
def POST(self):
"""Manages a POST request. Phonehome service.
Emits a new DBus signal to the PhoneHome object published.
The request always will return 200OK if some content is received. This content will be emitted in the signal.
:return: None
"""
global dbus_server
content_length = int(cherrypy.request.headers['Content-Length'])
content = cherrypy.request.body.read(content_length)
logger.info("%s: %s - POST: ", PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
path = cherrypy.request.path_info
# Get data from body
if content:
if path == PHONEHOME_DBUS_OBJECT_METADATA_PATH:
if "Hostname" in cherrypy.request.headers:
hostname = cherrypy.request.headers['Hostname']
dbus_server.logdebug("{0}: {1} - Sending signal to hostname: {2}".format(
PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id, hostname))
dbus_server.emit_phonehome_signal(str(content), PHONEHOME_DBUS_OBJECT_METADATA_PATH,
hostname, cherrypy.request.transaction_id)
cherrypy.response.status = httplib.OK
return
else:
cherrypy.response.status = httplib.BAD_REQUEST
return "{0}: {1} - Hostname header is not present in HTTP PhoneHome request".format(
PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
elif path == PHONEHOME_DBUS_OBJECT_PATH:
dbus_server.logdebug("{0}: {1} - Sending signal".format(PHONEHOME_TX_ID_HEADER,
cherrypy.request.transaction_id))
dbus_server.emit_phonehome_signal(str(content), PHONEHOME_DBUS_OBJECT_PATH, None,
cherrypy.request.transaction_id)
cherrypy.response.status = httplib.OK
return
else:
cherrypy.response.status = httplib.NOT_FOUND
return "{0}: {1} - Path not found for HTTP PhoneHome request".format(
PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
else:
# Bad Request
cherrypy.response.status = httplib.BAD_REQUEST
return "{0}: {1} - Invalid data received in HTTP PhoneHome request".\
format(PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
def handle_error():
cherrypy.response.status = httplib.INTERNAL_SERVER_ERROR
cherrypy.response.body = "Internal Server Error"
print(_cperror.format_exc())
class Root(object):
_cp_config = {'request.error_response': handle_error}
pass
def before_request_body():
"""
Add a Tool to our new Toolbox.
"""
logger.info("before_request_body: %s ", cherrypy.request.params)
if PHONEHOME_TX_ID_HEADER in cherrypy.request.headers:
transaction_id = cherrypy.request.headers[PHONEHOME_TX_ID_HEADER]
elif PHONEHOME_TX_ID_HEADER in cherrypy.request.params:
transaction_id = cherrypy.request.params[PHONEHOME_TX_ID_HEADER]
cherrypy.request.params = {}
else:
transaction_id = str(uuid.uuid1())
cherrypy.request.transaction_id = transaction_id
logger.info("%s: %s - before_request_body, path: %s", PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id,
cherrypy.request.path_info)
request = cherrypy.serving.request
def processor(entity):
"""Important! Do nothing with body"""
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
try:
content_length = int(cherrypy.request.headers['Content-Length'])
logger.info("%s: %s - body - content_length: %s ", PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id,
content_length)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid Content-Length')
request.body.processors['application/x-www-form-urlencoded'] = processor
def on_end_request():
"""
After each request
"""
logger.info("%s: %s - on_end_request", PHONEHOME_TX_ID_HEADER, cherrypy.request.transaction_id)
print 'end'
class HttpPhoneHomeServer:
"""
This Server will be waiting for POST requests. If some request is received to '/' resource (root), it will be
processed. POST body is processed using a DBus PhoneHome Client and 200 OK is always returned.
"""
def __init__(self, port, timeout=None):
"""Creates a PhoneHome server
:param port: Listen port
:param timeout: Timeout to wait for some request. Only is used when 'single request server' is configured.
:return: None
"""
logger.debug("Creating PhoneHome Server. Port %d; Timeout: %s", port, str(timeout))
self.timeout = timeout
self.port = port
def start_forever(self):
"""Starts the server. Forever...
:return: None
"""
logger.debug("Waiting for calls...")
conf = {
'global': {
'server.socket_host': '0.0.0.0',
'server.socket_port': self.port,
'tools.newprocessor_open.on': True,
'tools.newprocessor_close.on': True,
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'response.timeout': self.timeout,
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
}
}
root = Root()
root.phonehome = PhoneHome()
root.metadata = PhoneHome()
cherrypy.tools.newprocessor_open = cherrypy.Tool('before_request_body', before_request_body, priority=100)
cherrypy.tools.newprocessor_close = cherrypy.Tool('on_end_request', on_end_request)
cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
cherrypy.log.screen = None
cherrypy.quickstart(root, '/', conf)
if __name__ == '__main__':
global logger
# Configuration files
parentdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
settings_file = os.environ.get('SANITY_CHECKS_SETTINGS', os.path.join(parentdir, DEFAULT_SETTINGS_FILE))
logging_conf = os.environ.get('TEST_PHONEHOME_LOGGING', os.path.join(parentdir, DEFAULT_PHONEHOME_LOGGING_CONF))
# Configure logger
logging.config.fileConfig(logging_conf)
logger = logging.getLogger("HttpPhoneHomeServer")
# Load properties
logger.info("Loading test settings...")
conf = dict()
with open(settings_file) as settings:
try:
conf = json.load(settings)
except Exception as e:
print "Error parsing config file '{}': {}".format(settings_file, e)
sys.exit(-1)
# Check and load PhoneHome configuration (settings or environment variabless)
default_phonehome_endpoint = conf[PROPERTIES_CONFIG_TEST][PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT]
phonehome_endpoint = environ.get('TEST_PHONEHOME_ENDPOINT', default_phonehome_endpoint)
env_conf = {
PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT: phonehome_endpoint
}
conf[PROPERTIES_CONFIG_TEST].update(env_conf)
if not phonehome_endpoint:
logger.error("No value found for '%s.%s' setting. PhoneHome server will NOT be launched",
PROPERTIES_CONFIG_TEST, PROPERTIES_CONFIG_TEST_PHONEHOME_ENDPOINT)
sys.exit(1)
phonehome_port = urlparse.urlsplit(phonehome_endpoint).port
logger.info("PhoneHome port to be used by server: %d", phonehome_port)
# Create global DBus server
logger.info("Creating DBus PhoneHome service with object: %s", PHONEHOME_DBUS_OBJECT_PATH)
logger.info("Creating DBus PhoneHome service with object: %s", PHONEHOME_DBUS_OBJECT_METADATA_PATH)
dbus_server = DbusPhoneHomeServer(logger)
dbus_server.register_phonehome_object(PHONEHOME_DBUS_OBJECT_PATH)
dbus_server.register_phonehome_object(PHONEHOME_DBUS_OBJECT_METADATA_PATH)
# Create and start server
logger.info("Creating and starting PhoneHome Server")
server = HttpPhoneHomeServer(phonehome_port)
server.start_forever()
|
python
|
import math, statistics, random, time, sys
import numpy as np
import pandas as pd
import ray
import time
import holoviews as hv
from holoviews import opts
from holoviews.streams import Counter, Tap
from bokeh_util import square_circle_plot, two_lines_plot, means_stddevs_plot
hv.extension('bokeh')
from bokeh.layouts import gridplot, layout
from bokeh.models import Slider, Button
from bokeh.plotting import figure, output_file, show
from pi_calc import MonteCarloPi, compute_pi_for
DEFAULT_NS = [1000, 10000, 100000]
DEFAULT_RADIUS = 1.0
DEFAULT_BOUNDS = (-DEFAULT_RADIUS, -DEFAULT_RADIUS, DEFAULT_RADIUS, DEFAULT_RADIUS)
DEFAULT_MIN_N = 100
DEFAULT_MAX_N = 100000
DEFAULT_N_PER_PI_CALC = DEFAULT_MIN_N
DEFAULT_PLOT_SIZE = 1200
DEFAULT_IMAGE_SIZE = round(DEFAULT_PLOT_SIZE/2)
DEFAULT_CMAP = 'Spectral'
DEFAULT_IMAGE_COLOR_IDX = 2
DEFAULT_POINT_COLOR_IDX = 125
DEFAULT_PI_UPDATE_FORMAT = 'Pi ~= {:8.7f}\nerror = {:6.3f}%\nn = {:d}\n(N ~ {:d})'
img_opts = opts.Image(cmap=DEFAULT_CMAP, toolbar=None,
height=DEFAULT_PLOT_SIZE, width=DEFAULT_PLOT_SIZE,
xaxis=None, yaxis=None)
def make_circle(radius=DEFAULT_RADIUS):
def circle(t):
return (radius*np.sin(t), radius*np.cos(t), t)
lin = np.linspace(-np.pi, np.pi, 200)
return hv.Path([circle(lin)]).opts(img_opts).opts(line_width=2, color='red')
def make_rect(bounds=DEFAULT_BOUNDS, color='blue'):
minX, minY, maxX, maxY = bounds
return hv.Path([(minX, minY), (maxX, minY), (maxX, maxY), (minX, maxY), (minX, minY)]).opts(
img_opts).opts(line_width=2, color='blue')
def make_text(content):
return hv.Text(0, 0, content).opts(img_opts).opts(
toolbar=None, height=100, width=150, xaxis=None, yaxis=None,
text_alpha=1.0, bgcolor='lightgrey')
def make_image(data=None, image_size=DEFAULT_IMAGE_SIZE, bounds=DEFAULT_BOUNDS, color_idx=DEFAULT_IMAGE_COLOR_IDX, label='Pi:'):
if data == None:
data = np.full((image_size, image_size), color_idx, dtype=np.uint8)
return hv.Image(data, label=label, bounds=bounds).opts(img_opts)
def to_pixel(array, image_size=DEFAULT_IMAGE_SIZE):
"""
NumPy array input for real coordinates. Returns image pixel index.
To keep indices between 0, inclusize, and image_size, exclusive, we set the upper bound to image_size - 1
"""
array2 = (array+1.0)/2.0 # Shift to origin range between (0-1,0-1)
return np.rint((image_size-1)*array2).astype(int) # Scale to pixels
def make_overlay(items, width=DEFAULT_PLOT_SIZE, height=DEFAULT_PLOT_SIZE):
return hv.Overlay(items=items).opts(width=width, height=height)
def make_update(k, N, counter_instance,
n_per_pi_calc=DEFAULT_N_PER_PI_CALC, pi_update_format=DEFAULT_PI_UPDATE_FORMAT):
"""Returns a closure used as the update function for a dmap."""
pi_calc = MonteCarloPi()
image = make_image()
rect = make_rect()
circle = make_circle()
text = make_text('Pi calculation')
def update(counter):
"""
Due to an apparent bug in Holoview's ``periodic`` class for
DynamicMaps, the update gets called far more than the specified
``count`` value in ``run_simulations`` below. Unfortunately, we
can't just "ignore" extra invocations (if we've already computed
N values), because we have to return an overlay and there
appears to be no reliable way to save the last one(?). That's
why we call ``counter_instance.clear()``, which removes the
dmap as a subscriber.
"""
def updated_image(value, xys, img):
xs, ys = xys[:,0], xys[:,1]
pxs, pys = to_pixel(xs), to_pixel(ys)
for i in range(pxs.size):
img.data[pxs[i]][pys[i]] = value
return img
pi, count_inside, count_total, xys_in, xys_out = pi_calc.sample(n_per_pi_calc)
error = 100.0*abs(pi - math.pi)/math.pi
label = pi_update_format.format(pi, error, count_total, N)
img1 = updated_image(1, xys_in, image)
img2 = updated_image(0, xys_out, img1)
img3 = hv.Image(img2, label=label)
text = make_text(label)
overlay = make_overlay(items=[img3, rect, circle, text])
if count_total >= N:
counter_instance.clear() # basically stop further updates.
return overlay
return update
def make_dmaps(Ns = DEFAULT_NS):
dmaps = []
for k in range(len(Ns)):
N = Ns[k]
counter = Counter(transient=True)
psize = int(DEFAULT_PLOT_SIZE/len(Ns))
dmap_update = make_update(k, N, counter)
dmap = hv.DynamicMap(dmap_update, streams=[counter]).opts(height=psize, width=psize)
# We fetch DEFAULT_N_PER_PI_CALC points each pass through "update", so only count up to N/...
dmaps.append(dmap)
return dmaps
def run_simulations(dmaps, Ns = DEFAULT_NS, n_per_pi_calc=DEFAULT_N_PER_PI_CALC):
for i in range(len(dmaps)):
dmaps[i].periodic(0.01, count=int(Ns[i]/n_per_pi_calc)-1, block=False)
def stop_simulations(dmaps):
[dmap.periodic.stop() for dmap in dmaps]
if __name__ == '__main__':
dmaps = make_dmaps(DEFAULT_NS)
show(dmaps[0] + dmaps[1] + dmaps[2])
run_simulations(dmaps)
|
python
|
# coding: utf-8
"""
OrderCloud
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class PaymentApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create(self, direction, order_id, payment, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create(direction, order_id, payment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param Payment payment: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_with_http_info(direction, order_id, payment, **kwargs)
else:
(data) = self.create_with_http_info(direction, order_id, payment, **kwargs)
return data
def create_with_http_info(self, direction, order_id, payment, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_with_http_info(direction, order_id, payment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param Payment payment: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `create`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `create`")
# verify the required parameter 'payment' is set
if ('payment' not in params) or (params['payment'] is None):
raise ValueError("Missing the required parameter `payment` when calling `create`")
resource_path = '/orders/{direction}/{orderID}/payments'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'payment' in params:
body_params = params['payment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def create_transaction(self, direction, order_id, payment_id, payment_transaction, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_transaction(direction, order_id, payment_id, payment_transaction, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param PaymentTransaction payment_transaction: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_transaction_with_http_info(direction, order_id, payment_id, payment_transaction, **kwargs)
else:
(data) = self.create_transaction_with_http_info(direction, order_id, payment_id, payment_transaction, **kwargs)
return data
def create_transaction_with_http_info(self, direction, order_id, payment_id, payment_transaction, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_transaction_with_http_info(direction, order_id, payment_id, payment_transaction, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param PaymentTransaction payment_transaction: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id', 'payment_transaction']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_transaction" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `create_transaction`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `create_transaction`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `create_transaction`")
# verify the required parameter 'payment_transaction' is set
if ('payment_transaction' not in params) or (params['payment_transaction'] is None):
raise ValueError("Missing the required parameter `payment_transaction` when calling `create_transaction`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}/transactions'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'payment_transaction' in params:
body_params = params['payment_transaction']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete(self, direction, order_id, payment_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete(direction, order_id, payment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_with_http_info(direction, order_id, payment_id, **kwargs)
else:
(data) = self.delete_with_http_info(direction, order_id, payment_id, **kwargs)
return data
def delete_with_http_info(self, direction, order_id, payment_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_with_http_info(direction, order_id, payment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `delete`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `delete`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `delete`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def delete_transaction(self, direction, order_id, payment_id, transaction_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_transaction(direction, order_id, payment_id, transaction_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param str transaction_id: ID of the transaction. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_transaction_with_http_info(direction, order_id, payment_id, transaction_id, **kwargs)
else:
(data) = self.delete_transaction_with_http_info(direction, order_id, payment_id, transaction_id, **kwargs)
return data
def delete_transaction_with_http_info(self, direction, order_id, payment_id, transaction_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_transaction_with_http_info(direction, order_id, payment_id, transaction_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param str transaction_id: ID of the transaction. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id', 'transaction_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_transaction" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `delete_transaction`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `delete_transaction`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `delete_transaction`")
# verify the required parameter 'transaction_id' is set
if ('transaction_id' not in params) or (params['transaction_id'] is None):
raise ValueError("Missing the required parameter `transaction_id` when calling `delete_transaction`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}/transactions/{transactionID}'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
if 'transaction_id' in params:
path_params['transactionID'] = params['transaction_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get(self, direction, order_id, payment_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get(direction, order_id, payment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_with_http_info(direction, order_id, payment_id, **kwargs)
else:
(data) = self.get_with_http_info(direction, order_id, payment_id, **kwargs)
return data
def get_with_http_info(self, direction, order_id, payment_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_with_http_info(direction, order_id, payment_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `get`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `get`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `get`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def list(self, direction, order_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list(direction, order_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str search: Word or phrase to search for.
:param str search_on: Comma-delimited list of fields to search on.
:param str sort_by: Comma-delimited list of fields to sort by.
:param int page: Page of results to return. Default: 1
:param int page_size: Number of results to return per page. Default: 20, max: 100.
:param dict(str, str) filters: Any additional key/value pairs passed in the query string are interpretted as filters. Valid keys are top-level properties of the returned model or 'xp.???'
:return: ListPayment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_with_http_info(direction, order_id, **kwargs)
else:
(data) = self.list_with_http_info(direction, order_id, **kwargs)
return data
def list_with_http_info(self, direction, order_id, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_with_http_info(direction, order_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str search: Word or phrase to search for.
:param str search_on: Comma-delimited list of fields to search on.
:param str sort_by: Comma-delimited list of fields to sort by.
:param int page: Page of results to return. Default: 1
:param int page_size: Number of results to return per page. Default: 20, max: 100.
:param dict(str, str) filters: Any additional key/value pairs passed in the query string are interpretted as filters. Valid keys are top-level properties of the returned model or 'xp.???'
:return: ListPayment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'search', 'search_on', 'sort_by', 'page', 'page_size', 'filters']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `list`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `list`")
resource_path = '/orders/{direction}/{orderID}/payments'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
query_params = {}
if 'search' in params:
query_params['search'] = params['search']
if 'search_on' in params:
query_params['searchOn'] = params['search_on']
if 'sort_by' in params:
query_params['sortBy'] = params['sort_by']
if 'page' in params:
query_params['page'] = params['page']
if 'page_size' in params:
query_params['pageSize'] = params['page_size']
if 'filters' in params:
query_params['filters'] = params['filters']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListPayment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def patch(self, direction, order_id, payment_id, partial_payment, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch(direction, order_id, payment_id, partial_payment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param Payment partial_payment: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.patch_with_http_info(direction, order_id, payment_id, partial_payment, **kwargs)
else:
(data) = self.patch_with_http_info(direction, order_id, payment_id, partial_payment, **kwargs)
return data
def patch_with_http_info(self, direction, order_id, payment_id, partial_payment, **kwargs):
"""
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.patch_with_http_info(direction, order_id, payment_id, partial_payment, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str direction: Direction of the order, from the current user's perspective. Possible values: incoming, outgoing. (required)
:param str order_id: ID of the order. (required)
:param str payment_id: ID of the payment. (required)
:param Payment partial_payment: (required)
:return: Payment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['direction', 'order_id', 'payment_id', 'partial_payment']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'direction' is set
if ('direction' not in params) or (params['direction'] is None):
raise ValueError("Missing the required parameter `direction` when calling `patch`")
# verify the required parameter 'order_id' is set
if ('order_id' not in params) or (params['order_id'] is None):
raise ValueError("Missing the required parameter `order_id` when calling `patch`")
# verify the required parameter 'payment_id' is set
if ('payment_id' not in params) or (params['payment_id'] is None):
raise ValueError("Missing the required parameter `payment_id` when calling `patch`")
# verify the required parameter 'partial_payment' is set
if ('partial_payment' not in params) or (params['partial_payment'] is None):
raise ValueError("Missing the required parameter `partial_payment` when calling `patch`")
resource_path = '/orders/{direction}/{orderID}/payments/{paymentID}'.replace('{format}', 'json')
path_params = {}
if 'direction' in params:
path_params['direction'] = params['direction']
if 'order_id' in params:
path_params['orderID'] = params['order_id']
if 'payment_id' in params:
path_params['paymentID'] = params['payment_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'partial_payment' in params:
body_params = params['partial_payment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'text/plain; charset=utf-8'])
# Authentication setting
auth_settings = ['oauth2']
return self.api_client.call_api(resource_path, 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Payment',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
|
python
|
import calendar
import datetime as dt
import time
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.000Z'
DATE_FORMAT = '%Y-%m-%dT00:00:00.000Z'
DPLUS_FORMAT = '%Y-%m-%dT00:01:00.000Z'
def valid_rfcformat(potential):
try:
dt.datetime.strptime(potential, DATETIME_FORMAT)
return True
except:
return False
def to_rfc3339(unknown):
if hasattr(unknown, 'timetuple'):
if hasattr(unknown, 'tzinfo') and unknown.tzinfo is not None:
utc_timestamp = calendar.timegm(unknown.utctimetuple())
else:
utc_timestamp = time.mktime(unknown.timetuple())
utc_datetime = dt.datetime.utcfromtimestamp(utc_timestamp)
return utc_datetime.strftime(DATETIME_FORMAT)
elif type(unknown) in (float, int):
utc_datetime = dt.datetime.utcfromtimestamp(unknown)
return utc_datetime.strftime(DATETIME_FORMAT)
elif valid_rfcformat(unknown):
return unknown
else:
raise RFC3339ConversionError(unknown)
def from_rfc3339(rfc3339):
time_tuple = time.strptime(rfc3339, DATETIME_FORMAT)
utc_timestamp = calendar.timegm(time_tuple)
return dt.datetime.fromtimestamp(utc_timestamp)
def to_date_rfc3339(unknown, plus_a_min=False):
if plus_a_min:
rfc_format = DPLUS_FORMAT
else:
rfc_format = DATE_FORMAT
if hasattr(unknown, 'strftime'):
return unknown.strftime(rfc_format)
elif type(unknown) in (float, int):
return dt.date.fromtimestamp(unknown).strftime(rfc_format)
elif valid_rfcformat(unknown):
return to_date_rfc3339(from_date_rfc3339(unknown), plus_a_min)
else:
raise RFC3339ConversionError(unknown)
def from_date_rfc3339(rfc3339):
return dt.datetime.strptime(rfc3339, DATE_FORMAT).date()
class RFC3339ConversionError(Exception):
def __str__(self, culprit):
return 'Could not convert {} to RFC 3339 timestamp.'.format(culprit)
|
python
|
"""Test the cli module."""
|
python
|
import asyncio, re, json
from smsgateway.sources.sms import command_list
from smsgateway.config import *
from smsgateway.sources.utils import *
from smsgateway import sink_sms
from telethon import TelegramClient, utils
from telethon.tl.types import Chat, User, Channel, \
PeerUser, PeerChat, PeerChannel, \
MessageMediaGeo, MessageMediaContact, MessageMediaPhoto, \
MessageMediaDocument, MessageMediaWebPage, \
Document, DocumentAttributeFilename, DocumentAttributeSticker
def init():
global app_log, IDENTIFIER, command_regex, api_id, api_hash, session_path
app_log = setup_logging("telegram-send")
IDENTIFIER = "TG"
command_regex = re.compile('^(?P<command>[a-zA-Z ]+)$')
api_id = 242101
api_hash = "80cbc97ce425aae38c1e0291ef2ab2a4"
session_path = os.path.join(CONFIG_DIR, 'telegram-send')
def check(cmd, multiline):
init()
# print("Checking %s" % cmd)
if cmd.lower() == IDENTIFIER.lower() and multiline:
return True
else:
return False
def get_display_name(entity):
# app_log.debug("Looking up entity " + entity.stringify())
if isinstance(entity, User):
return ' '.join([x for x in [entity.first_name, entity.last_name] if x])
elif isinstance(entity, Chat) or isinstance(entity, Channel):
return entity.title
else:
return None
async def send_message(message, to_matched):
app_log.info("Starting client..")
client = TelegramClient(session_path, api_id, api_hash)
try:
await client.start()
except Exception as e:
ret = "Could not connect! Run python3 -m smsgateway.sources.commands.send_telegram to authorize!\nError: %s" % e
app_log.error(ret)
return (False, ret)
to = None
async for x in client.iter_dialogs():
name = get_display_name(x.entity)
if not to and name and name == to_matched:
to = x.entity.id
app_log.info("Found it via display_name: %s" % x.entity.stringify())
break
if not to:
app_log.warning(f"Couldn't find {to}! Trying directly..")
to = name = to_matched
app_log.info("Sending Telegram msg:\n%s" % message)
try:
import getpass
app_log.info("I am: %s" % getpass.getuser())
except:
pass
await client.send_message(to, message)
await client.disconnect()
msg = format_sms(IDENTIFIER, message, {
'to': name,
'status': 'Processed'
})
app_log.info(msg)
# ret = '\n'.join([
# IDENTIFIER,
# f"To: {name}",
# "",
# message
# ])
return True, msg
def run(lines):
init()
app_log.info("Forwarding Telegram Message")
messageStarted = False
to_matched = None
message = ""
for line in lines[1:]: # skip IDENTIFIER
if messageStarted:
if message:
message += "\n"
message += f"{line}"
elif not line.strip(): # empty line
messageStarted = True
else:
mTo = re.match("^To: (.*)$", line)
if mTo:
to_matched = mTo.group(1).strip()
else:
app_log.warning(f"Unkown header: {line}!")
if to_matched and message:
loop = asyncio.get_event_loop()
(success, ret) = loop.run_until_complete(send_message(message, to_matched))
if success:
ret = None
loop.close()
else:
ret = f"Couldn't match To: {to_matched} or message {message}"
app_log.error(ret)
return ret
if __name__ == '__main__':
init()
client = TelegramClient(session_path, api_id, api_hash)
if not client.start():
app_log.error(
"Could not connect to Telegram!\nIf you haven't authorized this client, run python3 -m smsgateway.sources.commands.send_telegram!")
sys.exit(1)
command_list.append({
'name': 'TG-Forwarder',
'check': check,
'run': run
})
|
python
|
from environs import Env
from lektor.pluginsystem import Plugin
__version__ = "18.6.12.3"
DEFAULT_PREFIX = "LEKTOR_"
class LektorEnv:
def __init__(self, config=None):
self.env = Env()
if not config:
self.prefix = DEFAULT_PREFIX
else:
self.prefix = config.get("envvar.prefix", DEFAULT_PREFIX)
def envvars(self, name, var_type=None, no_prefix=False):
prefix = "" if no_prefix else self.prefix
with self.env.prefixed(prefix):
if var_type:
return getattr(self.env, var_type)(name)
else:
return self.env(name)
class EnvvarsPlugin(Plugin):
name = "Environment Variables"
description = "A plugin making environment variables available in templates."
def on_setup_env(self, **extra):
config = self.get_config()
self.env.jinja_env.globals.update({"envvars": LektorEnv(config).envvars})
|
python
|
import sys
from PyQt5.QtWidgets import *
from PyQt5.QAxContainer import *
from PyQt5.QtCore import *
import logging.handlers
import time
from pandas import DataFrame
is_64bits = sys.maxsize > 2**32
if is_64bits:
print('64bit 환경입니다.')
else:
print('32bit 환경입니다.')
formatter = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')
logger = logging.getLogger("crumbs")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
TR_REQ_TIME_INTERVAL = 0.2
class Openapi(QAxWidget):
def __init__(self):
print("openapi __name__:", __name__)
super().__init__()
self._create_open_api_instance()
self._set_signal_slots()
self.comm_connect()
self.account_info()
def _opt10081(self, rqname, trcode):
# 몇번 반복 실행 할지 설정
ohlcv_cnt = self._get_repeat_cnt(trcode, rqname)
# 하나의 row씩 append
for i in range(ohlcv_cnt):
date = self._get_comm_data(trcode, rqname, i, "일자")
open = self._get_comm_data(trcode, rqname, i, "시가")
high = self._get_comm_data(trcode, rqname, i, "고가")
low = self._get_comm_data(trcode, rqname, i, "저가")
close = self._get_comm_data(trcode, rqname, i, "현재가")
volume = self._get_comm_data(trcode, rqname, i, "거래량")
self.ohlcv['date'].append(date)
self.ohlcv['open'].append(int(open))
self.ohlcv['high'].append(int(high))
self.ohlcv['low'].append(int(low))
self.ohlcv['close'].append(int(close))
self.ohlcv['volume'].append(int(volume))
def _receive_tr_data(self, screen_no, rqname, trcode, record_name, next, unused1, unused2, unused3, unused4):
# print("_receive_tr_data!!!")
# print(rqname, trcode, next)
if next == '2':
self.remained_data = True
else:
self.remained_data = False
if rqname == "opt10081_req":
self._opt10081(rqname, trcode)
elif rqname == "opw00001_req":
# print("opw00001_req!!!")
# print("Get an de_deposit!!!")
self._opw00001(rqname, trcode)
elif rqname == "opw00018_req":
# print("opw00018_req!!!")
# print("Get the possessed item !!!!")
self._opw00018(rqname, trcode)
elif rqname == "opt10074_req":
# print("opt10074_req!!!")
# print("Get the profit")
self._opt10074(rqname, trcode)
elif rqname == "opw00015_req":
# print("opw00015_req!!!")
# print("deal list!!!!")
self._opw00015(rqname, trcode)
elif rqname == "opt10076_req":
# print("opt10076_req")
# print("chegyul list!!!!")
self._opt10076(rqname, trcode)
elif rqname == "opt10073_req":
# print("opt10073_req")
# print("Get today profit !!!!")
self._opt10073(rqname, trcode)
elif rqname == "opt10080_req":
# print("opt10080_req!!!")
# print("Get an de_deposit!!!")
self._opt10080(rqname, trcode)
try:
self.tr_event_loop.exit()
except AttributeError:
pass
# get_total_data : 특정 종목의 일자별 거래 데이터 조회 함수
# 사용방법
# code: 종목코드(ex. '005930' )
# start : 기준일자. (ex. '20200424') => 20200424 일자 까지의 모든 open, high, low, close, volume 데이터 출력
def get_total_data(self, code, start):
self.ohlcv = {'date': [], 'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}
self.set_input_value("종목코드", code)
self.set_input_value("기준일자", start)
self.set_input_value("수정주가구분", 1)
self.comm_rq_data("opt10081_req", "opt10081", 0, "0101")
# 이 밑에는 한번만 가져오는게 아니고 싹다 가져오는거다.
while self.remained_data == True:
# time.sleep(TR_REQ_TIME_INTERVAL)
self.set_input_value("종목코드", code)
self.set_input_value("기준일자", start)
self.set_input_value("수정주가구분", 1)
self.comm_rq_data("opt10081_req", "opt10081", 2, "0101")
time.sleep(0.2)
# data 비어있는 경우
if len(self.ohlcv) == 0:
return []
if self.ohlcv['date'] == '':
return []
df = DataFrame(self.ohlcv, columns=['open', 'high', 'low', 'close', 'volume'], index=self.ohlcv['date'])
return df
# get_one_day_option_data : 특정 종목의 특정 일 open(시작가), high(최고가), low(최저가), close(종가), volume(거래량) 조회 함수
# 사용방법
# code : 종목코드
# start : 조회 일자
# option : open(시작가), high(최고가), low(최저가), close(종가), volume(거래량)
def get_one_day_option_data(self, code, start, option):
self.ohlcv = {'date': [], 'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}
self.set_input_value("종목코드", code)
self.set_input_value("기준일자", start)
self.set_input_value("수정주가구분", 1)
self.comm_rq_data("opt10081_req", "opt10081", 0, "0101")
if self.ohlcv['date'] == '':
return False
df = DataFrame(self.ohlcv, columns=['open', 'high', 'low', 'close', 'volume'], index=self.ohlcv['date'])
if option == 'open':
return df.iloc[0, 0]
elif option == 'high':
return df.iloc[0, 1]
elif option == 'low':
return df.iloc[0, 2]
elif option == 'close':
return df.iloc[0, 3]
elif option == 'volume':
return df.iloc[0, 4]
else:
return False
def multi_601_get_ohlcv_daliy_craw(self, code, code_name, start):
self.ohlcv = {'index': [], 'date': [], 'open': [], 'high': [], 'low': [], 'close': [], 'volume': []}
self.set_input_value("종목코드", code)
self.set_input_value("기준일자", start)
self.set_input_value("수정주가구분", 1)
self.comm_rq_data("opt10081_req", "opt10081", 0, "0101")
time.sleep(0.2)
if self.ohlcv['date'][0] == '':
return []
if self.ohlcv['date'] == '':
return []
df = DataFrame(self.ohlcv, columns=['date', 'open', 'high', 'low', 'close', 'volume'])
return df
def account_info(self):
account_number = self.get_login_info("ACCNO")
self.account_number = account_number.split(';')[0]
logger.debug("계좌번호: " + self.account_number)
def get_login_info(self, tag):
try:
ret = self.dynamicCall("GetLoginInfo(QString)", tag)
time.sleep(TR_REQ_TIME_INTERVAL)
return ret
except Exception as e:
logger.critical(e)
def _create_open_api_instance(self):
try:
self.setControl("KHOPENAPI.KHOpenAPICtrl.1")
except Exception as e:
logger.critical(e)
def _set_signal_slots(self):
try:
self.OnEventConnect.connect(self._event_connect)
self.OnReceiveTrData.connect(self._receive_tr_data)
self.OnReceiveMsg.connect(self._receive_msg)
# 주문체결 시점에서 키움증권 서버가 발생시키는 OnReceiveChejanData 이벤트를 처리하는 메서드
self.OnReceiveChejanData.connect(self._receive_chejan_data)
except Exception as e:
is_64bits = sys.maxsize > 2**32
if is_64bits:
logger.critical('현재 Anaconda는 64bit 환경입니다. 32bit 환경으로 실행하여 주시기 바랍니다.')
else:
logger.critical(e)
def _receive_chejan_data(self, gubun, item_cnt, fid_list):
print("_receive_chejan_data!!!")
print("gubun!!!")
print(gubun)
# 체결 data!
if gubun == "0":
print("in 체결 data!!!!!")
order_num = self.get_chejan_data(9203)
code_name_temp = self.get_chejan_data(302)
code_name = self.change_format3(code_name_temp)
code = self.codename_to_code(code_name)
chegyul_fail_amount_temp = self.get_chejan_data(902)
order_gubun = self.get_chejan_data(905)
purchase_price = self.get_chejan_data(10)
if code != False and code != "" and code != 0 and code != "0":
if chegyul_fail_amount_temp != "":
if self.is_all_item_db_check(code) == False:
print("all_item_db에 매도 안 된 종목이 없음 ! 즉 신규다!!")
if chegyul_fail_amount_temp == "0":
print("완벽히 싹 다 체결됨!")
self.db_to_all_item(order_num, code, code_name, 0, purchase_price)
else:
print("체결 되었지만 덜 체결 됨!")
self.db_to_all_item(order_num, code, code_name, 1, purchase_price)
elif order_gubun == "+매수":
if chegyul_fail_amount_temp != "0" and self.stock_chegyul_check(code) == True:
print("재매수던 매수던 미체결 수량이 남아있고, stock_chegyul_check True인 놈 / 즉, 계속 사야되는 종목!")
pass
elif chegyul_fail_amount_temp == "0" and self.stock_chegyul_check(code) == True:
print("재매수던 매수던 미체결 수량이 없고, stock_chegyul_check True인 놈 / 즉, 매수 끝난 종목!")
self.end_invest_count_check(code)
elif self.stock_chegyul_check(code) == False:
print("현재 all db에 존재하고 체결 체크가 0인 종목, 재매수 하는 경우!")
else:
pass
elif order_gubun == "-매도":
if chegyul_fail_amount_temp == "0":
print("all db에 존재하고 전량 매도하는 경우!")
self.sell_final_check(code)
else:
print("all db에 존재하고 수량 남겨 놓고 매도하는 경우!")
self.sell_chegyul_fail_check(code)
else:
pass
else:
print("_receive_chejan_data 에서 code 가 불량은 아닌데 체결된 종목이 빈공간인 경우!")
else:
print("_receive_chejan_data 에서 code가 불량이다!!")
elif gubun == "1":
print("잔고데이터!!!!!")
chegyul_fail_amount_temp = self.get_chejan_data(902)
print(chegyul_fail_amount_temp)
else:
pass
def comm_connect(self):
try:
self.dynamicCall("CommConnect()")
time.sleep(TR_REQ_TIME_INTERVAL)
self.login_event_loop = QEventLoop()
self.login_event_loop.exec_()
except Exception as e:
logger.critical(e)
def _receive_msg(self, sScrNo, sRQName, sTrCode, sMsg):
print(sMsg)
def _event_connect(self, err_code):
try:
if err_code == 0:
logger.debug("connected")
else:
logger.debug(f"disconnected. err_code : {err_code}")
self.login_event_loop.exit()
except Exception as e:
logger.critical(e)
def get_connect_state(self):
try:
ret = self.dynamicCall("GetConnectState()")
time.sleep(TR_REQ_TIME_INTERVAL)
return ret
except Exception as e:
logger.critical(e)
def set_input_value(self, id, value):
try:
self.dynamicCall("SetInputValue(QString, QString)", id, value)
except Exception as e:
logger.critical(e)
def comm_rq_data(self, rqname, trcode, next, screen_no):
self.dynamicCall("CommRqData(QString, QString, int, QString)", rqname, trcode, next, screen_no)
time.sleep(TR_REQ_TIME_INTERVAL)
self.tr_event_loop = QEventLoop()
self.tr_event_loop.exec_()
def _get_comm_data(self, code, field_name, index, item_name):
ret = self.dynamicCall("GetCommData(QString, QString, int, QString)", code, field_name, index, item_name)
return ret.strip()
def _get_repeat_cnt(self, trcode, rqname):
try:
ret = self.dynamicCall("GetRepeatCnt(QString, QString)", trcode, rqname)
return ret
except Exception as e:
logger.critical(e)
if __name__ == "__main__":
app = QApplication(sys.argv)
Openapi()
|
python
|
#!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A sample script showing how to start and stop Google Compute Engine instances.
"""
from google.cloud import compute_v1
# [START compute_start_instance]
def start_instance(project_id: str, zone: str, instance_name: str):
"""
Starts a stopped Google Compute Engine instance (with unencrypted disks).
Args:
project_id: project ID or project number of the Cloud project your instance belongs to.
zone: name of the zone your instance belongs to.
instance_name: name of the instance your want to start.
"""
instance_client = compute_v1.InstancesClient()
op_client = compute_v1.ZoneOperationsClient()
op = instance_client.start(project=project_id, zone=zone, instance=instance_name)
while op.status != compute_v1.Operation.Status.DONE:
op = op_client.wait(
operation=op.name, zone=zone, project=project_id
)
return
# [END compute_start_instance]
# [START compute_start_enc_instance]
def start_instance_with_encryption_key(project_id: str, zone: str, instance_name: str, key: bytes):
"""
Starts a stopped Google Compute Engine instance (with encrypted disks).
Args:
project_id: project ID or project number of the Cloud project your instance belongs to.
zone: name of the zone your instance belongs to.
instance_name: name of the instance your want to start.
key: bytes object representing a raw base64 encoded key to your machines boot disk.
For more information about disk encryption see:
https://cloud.google.com/compute/docs/disks/customer-supplied-encryption#specifications
"""
instance_client = compute_v1.InstancesClient()
op_client = compute_v1.ZoneOperationsClient()
instance_data = instance_client.get(project=project_id, zone=zone, instance=instance_name)
# Prepare the information about disk encryption
disk_data = compute_v1.CustomerEncryptionKeyProtectedDisk()
disk_data.source = instance_data.disks[0].source
disk_data.disk_encryption_key = compute_v1.CustomerEncryptionKey()
# Use raw_key to send over the key to unlock the disk
# To use a key stored in KMS, you need to provide `kms_key_name` and `kms_key_service_account`
disk_data.disk_encryption_key.raw_key = key
enc_data = compute_v1.InstancesStartWithEncryptionKeyRequest()
enc_data.disks = [disk_data]
op = instance_client.start_with_encryption_key(project=project_id, zone=zone, instance=instance_name,
instances_start_with_encryption_key_request_resource=enc_data)
while op.status != compute_v1.Operation.Status.DONE:
op = op_client.wait(
operation=op.name, zone=zone, project=project_id
)
return
# [END compute_start_enc_instance]
# [START compute_stop_instance]
def stop_instance(project_id: str, zone: str, instance_name: str):
"""
Stops a stopped Google Compute Engine instance.
Args:
project_id: project ID or project number of the Cloud project your instance belongs to.
zone: name of the zone your instance belongs to.
instance_name: name of the instance your want to stop.
"""
instance_client = compute_v1.InstancesClient()
op_client = compute_v1.ZoneOperationsClient()
op = instance_client.stop(project=project_id, zone=zone, instance=instance_name)
while op.status != compute_v1.Operation.Status.DONE:
op = op_client.wait(
operation=op.name, zone=zone, project=project_id
)
return
# [END compute_stop_instance]
# [START compute_reset_instance]
def reset_instance(project_id: str, zone: str, instance_name: str):
"""
Resets a stopped Google Compute Engine instance (with unencrypted disks).
Args:
project_id: project ID or project number of the Cloud project your instance belongs to.
zone: name of the zone your instance belongs to.
instance_name: name of the instance your want to reset.
"""
instance_client = compute_v1.InstancesClient()
op_client = compute_v1.ZoneOperationsClient()
op = instance_client.reset(project=project_id, zone=zone, instance=instance_name)
while op.status != compute_v1.Operation.Status.DONE:
op = op_client.wait(
operation=op.name, zone=zone, project=project_id
)
return
# [END compute_reset_instance]
|
python
|
""" Copyright (c) 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
"""
import neoml.PythonWrapper as PythonWrapper
from .Dnn import Layer
from neoml.Utils import check_input_layers
from .BatchNormalization import BatchNormalization
import neoml.Blob as Blob
class FullyConnected(Layer):
"""The fully connected layer.
It multiplies each of the input vectors by the weight matrix
and adds the free term vector to the result.
:param input_layers: The input layers to be connected.
The integer in each tuple specifies the number of the output.
If not set, the first output will be used.
:type input_layers: list of object, tuple(object, int)
:param element_count: The length of each vector in the output.
:type element_count: int, > 0
:param is_zero_free_term: If True, the free term vector is set to all zeros and not trained.
If False, the free term is trained together with the weights.
:type is_zero_free_term: bool, default=False
:param name: The layer name.
:type name: str, default=None
.. rubric:: Layer inputs:
The layer can have any number of inputs.
The dimensions:
- **BatchLength** * **BatchWidth** * **ListSize** is the number of vectors
- **Height** * **Width** * **Depth** * **Channels** is the vector size;
should be the same for all inputs
.. rubric:: Layer outputs:
The layer returns one output for each input.
The dimensions:
- **BatchLength**, **BatchWidth**, **ListSize** the same as for the input
- **Height**, **Width**, **Depth** are 1
- **Channels** is element_count
"""
def __init__(self, input_layers, element_count, is_zero_free_term=False, name=None):
if type(input_layers) is PythonWrapper.FullyConnected:
super().__init__(input_layers)
return
layers, outputs = check_input_layers(input_layers, 0)
if element_count < 1:
raise ValueError('The `element_count` must be > 0.')
internal = PythonWrapper.FullyConnected(str(name), layers, outputs, int(element_count), bool(is_zero_free_term))
super().__init__(internal)
@property
def element_count(self):
"""Gets the length of each vector in the output.
"""
return self._internal.get_element_count()
@property
def zero_free_term(self):
"""Sets the length of each vector in the output.
"""
return self._internal.get_zero_free_term()
@zero_free_term.setter
def zero_free_term(self, zero_free_term):
"""Checks if the free term is all zeros.
"""
self._internal.set_zero_free_term(bool(zero_free_term))
def apply_batch_normalization(self, layer):
"""Applies batch normalization to this layer.
Batch normalization must be deleted from the dnn afterwards
and layers which were connected to the batch norm must be connected to this layer.
:param neoml.Dnn.BatchNormalization layer: batch norm to be applied
"""
if type(layer) is not BatchNormalization:
raise ValueError('The `layer` must be neoml.Dnn.BatchNormalization.')
self._internal.apply_batch_normalization(layer._internal)
@property
def weights(self):
"""Gets the trained weights as a blob of the dimensions:
- **BatchLength** * **BatchWidth** * **ListSize** equal to element_count
- **Height**, **Width**, **Depth**, **Channels** the same as for the first input
"""
return Blob.Blob(self._internal.get_weights())
@weights.setter
def weights(self, blob):
"""Sets the trained weights as a blob of the dimensions:
- **BatchLength** * **BatchWidth** * **ListSize** equal to element_count
- **Height**, **Width**, **Depth**, **Channels** the same as for the first input
"""
if not type(blob) is Blob.Blob:
raise ValueError('The `blob` must be neoml.Blob.')
self._internal.set_weights(blob._internal)
@property
def free_term(self):
"""Gets the free term vector, of element_count length.
"""
return Blob.Blob(self._internal.get_free_term())
@free_term.setter
def free_term(self, blob):
"""Sets the free term vector, of element_count length.
"""
if not type(blob) is Blob.Blob:
raise ValueError('The `blob` must be neoml.Blob.')
self._internal.set_free_term(blob._internal)
|
python
|
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery
# Set default settings for celery
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'stratahq.settings')
app = Celery('strathq')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
python
|
from flask import session
import csv, pymssql, datetime
import threading
table_lock = threading.Lock()
def create_csv_rep(orgid, filename):
table_lock.acquire()
host = "197.189.232.50"
username = "FE-User"
password = "Fourier.01"
database = "PGAluminium"
conn = pymssql.connect(host, username, password, database)
cursor = conn.cursor()
def add_headers(columns):
with open('static/reports/'+filename,mode='w') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(columns)
csvFile.close()
def add_data(data):
#for col, val in enumerate(data):
# if isinstance(val, datetime.datetime):
# lysie = list(data)
# lysie[col] = lysie[col].date()
# data = tuple(lysie)
with open('static/reports/'+filename,mode='a') as csvFile:
writer = csv.writer(csvFile, delimiter=",",quoting=csv.QUOTE_MINIMAL)
#for col, val in enumerate(data):
writer.writerow(data)
csvFile.close()
return
# Main --------------------------------------------------------------
org = orgid
forceUpdate = 1
sql1 = """EXEC [dbo].[my_sp] @orgid=?,@forceUpdate=?;"""
sql1 = "DECLARE @return_value int; \
EXEC @return_value = [dbo].[ReportContractItemsForOrg] \
@orgid = ?, \
@forceUpdate = ?; \
SELECT 'Return Value' = @return_value;"
sql1 = """DECLARE @RC int;
EXEC @RC = [dbo].[ReportContractItemsForOrg] {}, {};
SELECT @RC AS rc;""".format(orgid, forceUpdate)
report_table_name = "ReportContractItems_" + str(orgid)
# sql = "EXEC [dbo].[ReportContractItemsForOrg] {}, {};".format(orgid, forceUpdate)
sql = "EXEC [dbo].[GenerateDynamicReport] {}, {};".format(orgid, forceUpdate)
sql2 = "EXEC [dbo].[CreateReportTableForOrg] {}".format(session["OrgId"])
cursor.execute(sql)
# cursor2.execute(sql2)
columns = [column[0] for column in cursor.description]
add_headers(columns)
row = 1
data = cursor.fetchone()
while data is not None:
row += 1
add_data(data)
data = cursor.fetchone()
print(row)
# worksheet1.autofilter('B2:AG39')
conn.commit()
table_lock.release()
|
python
|
from granule_ingester.processors.EmptyTileFilter import EmptyTileFilter
from granule_ingester.processors.GenerateTileId import GenerateTileId
from granule_ingester.processors.TileProcessor import TileProcessor
from granule_ingester.processors.TileSummarizingProcessor import TileSummarizingProcessor
from granule_ingester.processors.kelvintocelsius import KelvinToCelsius
from granule_ingester.processors.Subtract180FromLongitude import Subtract180FromLongitude
from granule_ingester.processors.ForceAscendingLatitude import ForceAscendingLatitude
|
python
|
import sys
input = sys.stdin.readline
# import accumulate takes too much memory
def accumulate(A):
n = len(A)
P = [0] * (n + 1)
for k in range(1, n + 1):
P[k] = P[k - 1] + A[k - 1]
return P
count = 0
length = 0
lower_bound = 0
cups, fill = map(int, input().split())
cuplist = [0] * (cups + 2)
for i in range(fill):
lower, upper, chocs = map(int, input().split())
cuplist[lower] += chocs
cuplist[upper+1] -= chocs
totalcup = int(input())
cuplist = list(accumulate(cuplist))
cuplist.pop(0)
cuphead = list(accumulate(cuplist)) # Prefix sum array for cuplist
cuphead.pop(0)
# A brute force method will force the upper bound to go back to l, therefore resulting in O(N^2)
# Though it may be 2 for loops, it is actually 2 counters
for upper_bound in range(1, cups + 1):
while cuphead[upper_bound] - cuphead[lower_bound] > totalcup:
lower_bound += 1
count = max(count, upper_bound - lower_bound)
print(count)
|
python
|
import PyPDF2
pdf1File = open('meetingminutes.pdf', 'rb')
pdf2File = open('meetingminutes2.pdf', 'rb')
pdf1Reader = PyPDF2.PdfFileReader(pdf1File)
pdf2Reader = PyPDF2.PdfFileReader(pdf2File)
pdfWriter = PyPDF2.PdfFileWriter()
for pageNum in range(pdf1Reader.numPages):
pageObj = pdf1Reader.getPage(pageNum)
pdfWriter.addPage(pageObj)
for pageNum in range(pdf2Reader.numPages):
pageObj = pdf2Reader.getPage(pageNum)
pdfWriter.addPage(pageObj)
pdfOutputFile = open('combinedminutes.pdf', 'wb')
pdfWriter.write(pdfOutputFile)
pdfOutputFile.close()
pdf1File.close()
pdf2File.close()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Nov 7, 2015
Don't blink...
@author: Juan_Insuasti
'''
import sys
import datetime
import os.path
import json
class Logger:
def __init__(self, logName="Log", file="log.txt", enabled=True, printConsole=True, saveFile=False, saveCloud=False):
self.logName = logName
self.file = file
self.enabled = enabled
self.printConsole = printConsole
self.saveFile = saveFile
self.saveCloud = saveCloud
self.saveRecord('===== ' + datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " +0000 " + '=====')
def log(self, action, data=None):
if (self.enabled):
record = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
record += " +0000 : "
record += self.logName + " >> "
if(data):
record += action % data
else:
record += action
self.printLog(record)
self.saveRecord(record)
def printLog(self, record):
if(self.printConsole):
print(record, file=sys.stderr)
def saveRecord(self, record):
if(self.saveFile):
fileData = record
fileData += "\n"
file = open(self.file,"a")
file.write(fileData)
file.close()
if __name__ == '__main__':
print('Starting Program')
console = Logger(logName='device0', file='test.log', enabled=True, printConsole=True, saveFile=True)
console.log('testing with data = %s',222)
console.log('testing without data')
pass
|
python
|
from setuptools import setup
# Load in babel support, if available.
try:
from babel.messages import frontend as babel
cmdclass = {"compile_catalog": babel.compile_catalog,
"extract_messages": babel.extract_messages,
"init_catalog": babel.init_catalog,
"update_catalog": babel.update_catalog, }
except ImportError:
cmdclass = {}
setup(name="django-nudge",
version="0.9.1",
description="Use Nudge to (gently) push content between Django servers",
author="Joshua Ruihley, Ross Karchner",
author_email="[email protected]",
url="https://github.com/CFPB/django-nudge",
zip_safe=False,
packages=["nudge", "nudge.demo", "nudge.management", "nudge.templatetags", "nudge.management.commands"],
package_data = {"nudge": ["templates/*.html",
"templates/admin/nudge/*.html",
"templates/admin/nudge/batch/*.html",
"templates/admin/nudge/setting/*.html"]},
package_dir={"": "src"},
install_requires=['django', 'django-reversion', 'pycrypto',],
cmdclass = cmdclass,
classifiers=["Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: Public Domain",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Framework :: Django",])
|
python
|
from typing import Union
from notion.models.annotations import Annotations
class RichText():
"""
https://developers.notion.com/reference/rich-text
"""
TYPES = ["text", "mention", "equation"]
def __init__(self, plain_text: str = None, href: str = None, annotations: Union[dict, Annotations] = None, type: str = None) -> None:
if plain_text is not None:
self.plain_text = plain_text
if href is not None:
self.href = href
if annotations is not None:
self.annotations = Annotations(**annotations) if isinstance(annotations, dict) else annotations
if type is not None:
self.type = type
assert type in RichText.TYPES, f"`type` must be one of {', '.join(RichText.types)}"
|
python
|
"""
Handles running extensions inside a sandbox, which runs outside the primary
Petronia memory space with OS specific constraints.
"""
from .module_loader import create_sandbox_module_loader
|
python
|
# -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: [email protected]
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
class LocallyConnected2d(nn.Module):
def __init__(self,
in_channels,
out_channels,
output_size,
kernel_size,
stride,
bias=False):
super(LocallyConnected2d, self).__init__()
output_size = _pair(output_size)
self.weight = nn.Parameter(
torch.randn(1, out_channels, in_channels, output_size[0],
output_size[1], kernel_size**2),
requires_grad=True,
)
if bias:
self.bias = nn.Parameter(torch.randn(1, out_channels,
output_size[0],
output_size[1]),
requires_grad=True)
else:
self.register_parameter('bias', None)
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
def forward(self, x):
_, c, h, w = x.size()
kh, kw = self.kernel_size
dh, dw = self.stride
x = x.unfold(2, kh, dh).unfold(3, kw, dw)
x = x.contiguous().view(*x.size()[:-2], -1)
# Sum in in_channel and kernel_size dims
out = (x.unsqueeze(1) * self.weight).sum([2, -1])
if self.bias is not None:
out += self.bias
return out
|
python
|
from fastai.conv_learner import *
from fastai.dataset import *
from tensorboard_cb_old import *
import cv2
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
import scipy.optimize as opt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import warnings
warnings.filterwarnings("ignore")
#=======================================================================================================================
PATH = './'
TRAIN = '../input/train/'
TEST = '../input/test/'
LABELS = '../input/train.csv'
SAMPLE = '../input/sample_submission.csv'
name_label_dict = {
0: 'Nucleoplasm',
1: 'Nuclear membrane',
2: 'Nucleoli',
3: 'Nucleoli fibrillar center',
4: 'Nuclear speckles',
5: 'Nuclear bodies',
6: 'Endoplasmic reticulum',
7: 'Golgi apparatus',
8: 'Peroxisomes',
9: 'Endosomes',
10: 'Lysosomes',
11: 'Intermediate filaments',
12: 'Actin filaments',
13: 'Focal adhesion sites',
14: 'Microtubules',
15: 'Microtubule ends',
16: 'Cytokinetic bridge',
17: 'Mitotic spindle',
18: 'Microtubule organizing center',
19: 'Centrosome',
20: 'Lipid droplets',
21: 'Plasma membrane',
22: 'Cell junctions',
23: 'Mitochondria',
24: 'Aggresome',
25: 'Cytosol',
26: 'Cytoplasmic bodies',
27: 'Rods & rings' }
nw = 4 #number of workers for data loader
arch = inceptionresnet_2 #specify target architecture
#=======================================================================================================================
#=======================================================================================================================
# Data
#=======================================================================================================================
# faulty image : dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0
#=================
TRAIN_IMAGES_PER_CATEGORY = 1000
image_df = pd.read_csv(LABELS)
image_df = image_df[(image_df.Id != 'dc756dea-bbb4-11e8-b2ba-ac1f6b6435d0') &
(image_df.Id != 'c861eb54-bb9f-11e8-b2b9-ac1f6b6435d0') &
(image_df.Id != '7a88f200-bbc3-11e8-b2bc-ac1f6b6435d0')]
image_df['target_list'] = image_df['Target'].map(lambda x: [int(a) for a in x.split(' ')])
all_labels = list(chain.from_iterable(image_df['target_list'].values))
c_val = Counter(all_labels)
n_keys = c_val.keys()
max_idx = max(n_keys)
#==================================================================================
# visualize train distribution
# fig, ax1 = plt.subplots(1,1, figsize = (10, 5))
# ax1.bar(n_keys, [c_val[k] for k in n_keys])
# ax1.set_xticks(range(max_idx))
# ax1.set_xticklabels([name_label_dict[k] for k in range(max_idx)], rotation=90)
# plt.show()
#==================================================================================
for k,v in c_val.items():
print(name_label_dict[k], 'count:', v)
# create a categorical vector
image_df['target_vec'] = image_df['target_list'].map(lambda ck: [i in ck for i in range(max_idx+1)])
raw_train_df, valid_df = train_test_split(image_df,
test_size = 0.15,
# hack to make stratification work
stratify = image_df['Target'].map(lambda x: x[:3] if '27' not in x else '0'),
random_state= 42)
print(raw_train_df.shape[0], 'training masks')
print(valid_df.shape[0], 'validation masks')
tr_n = raw_train_df['Id'].values.tolist()
val_n = valid_df['Id'].values.tolist()
tr_n = tr_n[:-2] # pytorch has problems if last batch has one sample
test_names = list({f[:36] for f in os.listdir(TEST)})
# #=================================================================================
# # # Balance data
# #================================================================================
# # keep labels with more then 50 objects
# out_df_list = []
# for k,v in c_val.items():
# if v>50:
# keep_rows = raw_train_df['target_list'].map(lambda x: k in x)
# out_df_list += [raw_train_df[keep_rows].sample(TRAIN_IMAGES_PER_CATEGORY,
# replace=True)]
# train_df = pd.concat(out_df_list, ignore_index=True)
#
# tr_n = train_df['Id'].values.tolist()
# val_n = valid_df['Id'].values.tolist()
# tr_n = tr_n[:-2] # pytorch has problems if last batch has one sample
#
# print(train_df.shape[0])
# print(len(tr_n))
# print('unique train:',len(train_df['Id'].unique().tolist()))
#
# #=========================================================================
# #show balanced class graph
fig, (ax1, ax2) = plt.subplots(1, 2, figsize = (10, 5))
train_sum_vec = np.sum(np.stack(raw_train_df['target_vec'].values, 0), 0)
valid_sum_vec = np.sum(np.stack(valid_df['target_vec'].values, 0), 0)
ax1.bar(n_keys, [train_sum_vec[k] for k in n_keys])
ax1.set_title('Training Distribution')
ax2.bar(n_keys, [valid_sum_vec[k] for k in n_keys])
ax2.set_title('Validation Distribution')
plt.show()
#=======================================================================================================================
# Dataset loading helpers
#=======================================================================================================================
def open_rgby(path,id): #a function that reads RGBY image
#print(id)
colors = ['red','green','blue','yellow']
flags = cv2.IMREAD_GRAYSCALE
img = [cv2.imread(os.path.join(path, id+'_'+color+'.png'), flags).astype(np.float32)/255
for color in colors]
img[0] = img[0] * 0.85
img[1] = img[1] * 1.0
img[2] = img[2] * 0.85
img[3] = img[3] * 0.85
img = np.stack(img, axis=-1)
#print('img loaded:', id)
return img
class pdFilesDataset(FilesDataset):
def __init__(self, fnames, path, transform):
self.labels = pd.read_csv(LABELS).set_index('Id')
self.labels['Target'] = [[int(i) for i in s.split()] for s in self.labels['Target']]
super().__init__(fnames, transform, path)
def get_x(self, i):
img = open_rgby(self.path, self.fnames[i])
if self.sz == 512:
return img
else:
return cv2.resize(img, (self.sz, self.sz), cv2.INTER_AREA)
def get_y(self, i):
if (self.path == TEST):
return np.zeros(len(name_label_dict), dtype=np.int)
else:
labels = self.labels.loc[self.fnames[i]]['Target']
return np.eye(len(name_label_dict), dtype=np.float)[labels].sum(axis=0)
@property
def is_multi(self):
return True
@property
def is_reg(self):
return True
# this flag is set to remove the output sigmoid that allows log(sigmoid) optimization
# of the numerical stability of the loss function
def get_c(self):
return len(name_label_dict) # number of classes
def get_data(sz,bs):
#data augmentation
aug_tfms = [RandomRotate(30, tfm_y=TfmType.NO),
RandomDihedral(tfm_y=TfmType.NO),
RandomLighting(0.05, 0.05, tfm_y=TfmType.NO)]
#mean and std in of each channel in the train set
stats = A([0.08069, 0.05258, 0.05487, 0.08282], [0.13704, 0.10145, 0.15313, 0.13814])
#stats = A([0.08069, 0.05258, 0.05487], [0.13704, 0.10145, 0.15313])
tfms = tfms_from_stats(stats, sz, crop_type=CropType.NO, tfm_y=TfmType.NO,
aug_tfms=aug_tfms)
ds = ImageData.get_ds(pdFilesDataset, (tr_n[:-(len(tr_n)%bs)],TRAIN),
(val_n,TRAIN), tfms, test=(test_names,TEST))
md = ImageData(PATH, ds, bs, num_workers=nw, classes=None)
return md
#=======================================================================================================================
bs = 16
sz = 256
md = get_data(sz,bs)
x,y = next(iter(md.trn_dl))
print(x.shape, y.shape)
#=======================================================================================================================
# Display images
#=======================================================================================================================
# def display_imgs(x):
# columns = 4
# bs = x.shape[0]
# rows = min((bs + 3) // 4, 4)
# fig = plt.figure(figsize=(columns * 4, rows * 4))
# for i in range(rows):
# for j in range(columns):
# idx = i + j * columns
# fig.add_subplot(rows, columns, idx + 1)
# plt.axis('off')
# plt.imshow((x[idx, :, :, :3] * 255).astype(np.int))
# plt.show()
#
#
# display_imgs(np.asarray(md.trn_ds.denorm(x)))
#=======================================================================================================================
# compute dataset stats
#=======================================================================================================================
# x_tot = np.zeros(4)
# x2_tot = np.zeros(4)
# for x,y in iter(md.trn_dl):
# tmp = md.trn_ds.denorm(x).reshape(16,-1)
# x = md.trn_ds.denorm(x).reshape(-1,4)
# x_tot += x.mean(axis=0)
# x2_tot += (x**2).mean(axis=0)
#
# channel_avr = x_tot/len(md.trn_dl)
# channel_std = np.sqrt(x2_tot/len(md.trn_dl) - channel_avr**2)
# print(channel_avr,channel_std)
#=======================================================================================================================
# Loss and metrics
#=======================================================================================================================
class FocalLoss(nn.Module):
def __init__(self, gamma=1):
super().__init__()
self.gamma = gamma
def forward(self, input, target):
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})"
.format(target.size(), input.size()))
max_val = (-input).clamp(min=0)
loss = input - input * target + max_val + \
((-max_val).exp() + (-input - max_val).exp()).log()
invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
return loss.sum(dim=1).mean()
def acc(preds,targs,th=0.0):
preds = (preds > th).int()
targs = targs.int()
return (preds==targs).float().mean()
def recall(preds, targs, thresh=0.5):
pred_pos = preds > thresh
tpos = torch.mul((targs.byte() == pred_pos), targs.byte())
tp = tpos.sum().item()
tr = targs.sum().item()
return float(tp+0.000001)/float( tr + 0.000001)
def precision(preds, targs, thresh=0.5):
pred_pos = preds > thresh
tpos = torch.mul((targs.byte() == pred_pos), targs.byte())
tp = tpos.sum().item()
pp = pred_pos.sum().item()
return float(tp+0.000001)/float(pp + 0.000001)
def fbeta(preds, targs, beta, thresh=0.5):
"""Calculates the F-beta score (the weighted harmonic mean of precision and recall).
This is the micro averaged version where the true positives, false negatives and
false positives are calculated globally (as opposed to on a per label basis).
beta == 1 places equal weight on precision and recall, b < 1 emphasizes precision and
beta > 1 favors recall.
"""
assert beta > 0, 'beta needs to be greater than 0'
beta2 = beta ** 2
rec = recall(preds, targs, thresh)
prec = precision(preds, targs, thresh)
return float((1 + beta2) * prec * rec) / float(beta2 * prec + rec + 0.00000001)
def f1(preds, targs, thresh=0.5): return float(fbeta(preds, targs, 1, thresh))
########################################################################################################################
# Training
########################################################################################################################
class ConvnetBuilder_custom():
def __init__(self, f, c, is_multi, is_reg, ps=None, xtra_fc=None, xtra_cut=0,
custom_head=None, pretrained=True):
self.f, self.c, self.is_multi, self.is_reg, self.xtra_cut = f, c, is_multi, is_reg, xtra_cut
if xtra_fc is None: xtra_fc = [512]
if ps is None: ps = [0.25] * len(xtra_fc) + [0.5]
self.ps, self.xtra_fc = ps, xtra_fc
if f in model_meta:
cut, self.lr_cut = model_meta[f]
else:
cut, self.lr_cut = 0, 0
cut -= xtra_cut
layers = cut_model(f(pretrained), cut)
# replace first convolutional layer by 4->32 while keeping corresponding weights
# and initializing new weights with zeros
w = layers[00].conv.weight
layers[00].conv = nn.Conv2d(4, 32, kernel_size=(3, 3), stride=(2, 2), bias=False)
layers[00].conv.weight = torch.nn.Parameter(torch.cat((w, torch.zeros(32, 1, 3, 3)), dim=1))
self.nf = model_features[f] if f in model_features else (num_features(layers) * 2)
if not custom_head: layers += [AdaptiveConcatPool2d(), Flatten()]
self.top_model = nn.Sequential(*layers)
n_fc = len(self.xtra_fc) + 1
if not isinstance(self.ps, list): self.ps = [self.ps] * n_fc
if custom_head:
fc_layers = [custom_head]
else:
fc_layers = self.get_fc_layers()
self.n_fc = len(fc_layers)
self.fc_model = to_gpu(nn.Sequential(*fc_layers))
if not custom_head: apply_init(self.fc_model, kaiming_normal)
self.model = to_gpu(nn.Sequential(*(layers + fc_layers)))
@property
def name(self):
return f'{self.f.__name__}_{self.xtra_cut}'
def create_fc_layer(self, ni, nf, p, actn=None):
res = [nn.BatchNorm1d(num_features=ni)]
if p: res.append(nn.Dropout(p=p))
res.append(nn.Linear(in_features=ni, out_features=nf))
if actn: res.append(actn)
return res
def get_fc_layers(self):
res = []
ni = self.nf
for i, nf in enumerate(self.xtra_fc):
res += self.create_fc_layer(ni, nf, p=self.ps[i], actn=nn.ReLU())
ni = nf
final_actn = nn.Sigmoid() if self.is_multi else nn.LogSoftmax()
if self.is_reg: final_actn = None
res += self.create_fc_layer(ni, self.c, p=self.ps[-1], actn=final_actn)
return res
def get_layer_groups(self, do_fc=False):
if do_fc:
return [self.fc_model]
idxs = [self.lr_cut]
c = children(self.top_model)
if len(c) == 3: c = children(c[0]) + c[1:]
lgs = list(split_by_idxs(c, idxs))
return lgs + [self.fc_model]
class ConvLearner(Learner):
def __init__(self, data, models, precompute=False, **kwargs):
self.precompute = False
super().__init__(data, models, **kwargs)
if hasattr(data, 'is_multi') and not data.is_reg and self.metrics is None:
self.metrics = [accuracy_thresh(0.5)] if self.data.is_multi else [accuracy]
if precompute: self.save_fc1()
self.freeze()
self.precompute = precompute
def _get_crit(self, data):
if not hasattr(data, 'is_multi'): return super()._get_crit(data)
return F.l1_loss if data.is_reg else F.binary_cross_entropy if data.is_multi else F.nll_loss
@classmethod
def pretrained(cls, f, data, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, precompute=False,
pretrained=True, **kwargs):
models = ConvnetBuilder_custom(f, data.c, data.is_multi, data.is_reg,
ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut, custom_head=custom_head,
pretrained=pretrained)
return cls(data, models, precompute, **kwargs)
@classmethod
def lsuv_learner(cls, f, data, ps=None, xtra_fc=None, xtra_cut=0, custom_head=None, precompute=False,
needed_std=1.0, std_tol=0.1, max_attempts=10, do_orthonorm=False, **kwargs):
models = ConvnetBuilder(f, data.c, data.is_multi, data.is_reg,
ps=ps, xtra_fc=xtra_fc, xtra_cut=xtra_cut, custom_head=custom_head, pretrained=False)
convlearn = cls(data, models, precompute, **kwargs)
convlearn.lsuv_init()
return convlearn
@property
def model(self):
return self.models.fc_model if self.precompute else self.models.model
def half(self):
if self.fp16: return
self.fp16 = True
if type(self.model) != FP16: self.models.model = FP16(self.model)
if not isinstance(self.models.fc_model, FP16): self.models.fc_model = FP16(self.models.fc_model)
def float(self):
if not self.fp16: return
self.fp16 = False
if type(self.models.model) == FP16: self.models.model = self.model.module.float()
if type(self.models.fc_model) == FP16: self.models.fc_model = self.models.fc_model.module.float()
@property
def data(self):
return self.fc_data if self.precompute else self.data_
def create_empty_bcolz(self, n, name):
return bcolz.carray(np.zeros((0, n), np.float32), chunklen=1, mode='w', rootdir=name)
def set_data(self, data, precompute=False):
super().set_data(data)
if precompute:
self.unfreeze()
self.save_fc1()
self.freeze()
self.precompute = True
else:
self.freeze()
def get_layer_groups(self):
return self.models.get_layer_groups(self.precompute)
def summary(self):
precompute = self.precompute
self.precompute = False
res = super().summary()
self.precompute = precompute
return res
def get_activations(self, force=False):
tmpl = f'_{self.models.name}_{self.data.sz}.bc'
# TODO: Somehow check that directory names haven't changed (e.g. added test set)
names = [os.path.join(self.tmp_path, p + tmpl) for p in ('x_act', 'x_act_val', 'x_act_test')]
if os.path.exists(names[0]) and not force:
self.activations = [bcolz.open(p) for p in names]
else:
self.activations = [self.create_empty_bcolz(self.models.nf, n) for n in names]
def save_fc1(self):
self.get_activations()
act, val_act, test_act = self.activations
m = self.models.top_model
if len(self.activations[0]) != len(self.data.trn_ds):
predict_to_bcolz(m, self.data.fix_dl, act)
if len(self.activations[1]) != len(self.data.val_ds):
predict_to_bcolz(m, self.data.val_dl, val_act)
if self.data.test_dl and (len(self.activations[2]) != len(self.data.test_ds)):
if self.data.test_dl: predict_to_bcolz(m, self.data.test_dl, test_act)
self.fc_data = ImageClassifierData.from_arrays(self.data.path,
(act, self.data.trn_y), (val_act, self.data.val_y), self.data.bs,
classes=self.data.classes,
test=test_act if self.data.test_dl else None, num_workers=8)
def freeze(self):
self.freeze_to(-1)
def unfreeze(self):
self.freeze_to(0)
self.precompute = False
def predict_array(self, arr):
precompute = self.precompute
self.precompute = False
pred = super().predict_array(arr)
self.precompute = precompute
return pred
#=======================================================================================================================
sz = 512 #image size
bs = 8 #batch size
md = get_data(sz,bs)
learner = ConvLearner.pretrained(arch, md, ps=0.2) #dropout 50%
learner.opt_fn = optim.Adam
learner.clip = 1.0 #gradient clipping
learner.crit = FocalLoss()
#learner.crit = f2_loss
learner.metrics = [precision, recall, f1]
print(learner.summary)
#learner.lr_find()
#learner.sched.plot()
#plt.show()
tb_logger = TensorboardLogger(learner.model, md, "inres_512_val3", metrics_names=["precision", 'recall', 'f1'])
lr = 1e-3
lrs=np.array([lr/10,lr/3,lr])
#learner.fit(lr,1, best_save_name='inres_512_0.3', callbacks=[tb_logger])
learner.unfreeze()
#learner.load('wrn_512_3.3')
#learner.fit(lrs/4,4,cycle_len=2,use_clr=(10,20),best_save_name='inres_512_1.3', callbacks=[tb_logger])
#learner.fit(lrs/4,2,cycle_len=4,use_clr=(10,20), best_save_name='inres_512_2.3', callbacks=[tb_logger])
#learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_3.3', callbacks=[tb_logger])
# learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_4.3_best', callbacks=[tb_logger] )
# learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_5.3_best', callbacks=[tb_logger])
#learner.save('inres_512_unbalanced_grn+')
learner.load('inres_512_unbalanced_grn+')
#learner.load('wrn_512_balanced')
learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_4.3_best_unbalanced_grn+', callbacks=[tb_logger] )
learner.fit(lrs/16,1,cycle_len=8,use_clr=(5,20), best_save_name='inres_512_5.3_best_unbalanced_grn+', callbacks=[tb_logger])
learner.save('inres_512_unbalanced_grn+_focalgamma1')
# swa
#learner.fit(lrs/160,1,cycle_len=8,use_clr=(5,20), best_save_name='wrn_512_4', callbacks=[tb_logger])
#learner.load('Res34_512_grn4-swa')
#learner.fit(lrs/16, n_cycle=4, cycle_len=4,use_clr=(5,20), best_save_name='Res34_512_grn4', use_swa=True, swa_start=1, swa_eval_freq=5,callbacks=[tb_logger])
#learner.load('Res34_512_grn4-swa')
#======================================================================================================================
# Validation
#=======================================================================================================================
def sigmoid_np(x):
return 1.0/(1.0 + np.exp(-x))
preds,y = learner.TTA(n_aug=16)
preds = np.stack(preds, axis=-1)
preds = sigmoid_np(preds)
pred = preds.max(axis=-1)
def F1_soft(preds,targs,th=0.5,d=50.0):
preds = sigmoid_np(d*(preds - th))
targs = targs.astype(np.float)
score = 2.0*(preds*targs).sum(axis=0)/((preds+targs).sum(axis=0) + 1e-6)
return score
def fit_val(x,y):
params = 0.5*np.ones(len(name_label_dict))
wd = 1e-5
error = lambda p: np.concatenate((F1_soft(x,y,p) - 1.0,
wd*(p - 0.5)), axis=None)
p, success = opt.leastsq(error, params)
return p
th = fit_val(pred,y)
th[th<0.1] = 0.1
print('Thresholds: ',th)
print('F1 macro: ',f1_score(y, pred>th, average='macro'))
print('F1 macro (th = 0.5): ',f1_score(y, pred>0.5, average='macro'))
print('F1 micro: ',f1_score(y, pred>th, average='micro'))
print('Fractions: ',(pred > th).mean(axis=0))
print('Fractions (true): ',(y > th).mean(axis=0))
#=======================================================================================================================
# Submission
#=======================================================================================================================
preds_t,y_t = learner.TTA(n_aug=16,is_test=True)
preds_t = np.stack(preds_t, axis=-1)
preds_t = sigmoid_np(preds_t)
pred_t = preds_t.max(axis=-1) #max works better for F1 macro score
def save_pred(pred, th=0.5, fname='protein_classification.csv'):
pred_list = []
for line in pred:
s = ' '.join(list([str(i) for i in np.nonzero(line > th)[0]]))
pred_list.append(s)
sample_df = pd.read_csv(SAMPLE)
sample_list = list(sample_df.Id)
pred_dic = dict((key, value) for (key, value)
in zip(learner.data.test_ds.fnames, pred_list))
pred_list_cor = [pred_dic[id] for id in sample_list]
df = pd.DataFrame({'Id': sample_list, 'Predicted': pred_list_cor})
df.to_csv(fname, header=True, index=False)
# Manual thresholds
th_t = np.array([0.565,0.39,0.55,0.345,0.33,0.39,0.33,0.45,0.38,0.39,
0.34,0.42,0.31,0.38,0.49,0.50,0.38,0.43,0.46,0.40,
0.39,0.505,0.37,0.47,0.41,0.545,0.32,0.1])
print('Fractions: ',(pred_t > th_t).mean(axis=0))
save_pred(pred_t,th_t) # From manual threshold
# Automatic fitting the thresholds based on the public LB statistics.
lb_prob = [
0.362397820,0.043841336,0.075268817,0.059322034,0.075268817,
0.075268817,0.043841336,0.075268817,0.010000000,0.010000000,
0.010000000,0.043841336,0.043841336,0.014198783,0.043841336,
0.010000000,0.028806584,0.014198783,0.028806584,0.059322034,
0.010000000,0.126126126,0.028806584,0.075268817,0.010000000,
0.222493880,0.028806584,0.010000000]
# I replaced 0 by 0.01 since there may be a rounding error leading to 0
def Count_soft(preds,th=0.5,d=50.0):
preds = sigmoid_np(d*(preds - th))
return preds.mean(axis=0)
def fit_test(x,y):
params = 0.5*np.ones(len(name_label_dict))
wd = 1e-5
error = lambda p: np.concatenate((Count_soft(x,p) - y,
wd*(p - 0.5)), axis=None)
p, success = opt.leastsq(error, params)
return p
th_t = fit_test(pred_t,lb_prob)
th_t[th_t<0.1] = 0.1
print('Thresholds: ',th_t)
print('Fractions: ',(pred_t > th_t).mean(axis=0))
print('Fractions (th = 0.5): ',(pred_t > 0.5).mean(axis=0))
save_pred(pred_t,th_t,'protein_classification_f.csv') # based on public lb stats
save_pred(pred_t,th,'protein_classification_v.csv') # based on validation
save_pred(pred_t,0.5,'protein_classification_05.csv') # based on fixed threshold 0.5
#=======================================================================================================================
# using the threshold from validation set for classes not present in the public LB:
class_list = [8,9,10,15,20,24,27]
for i in class_list:
th_t[i] = th[i]
save_pred(pred_t,th_t,'protein_classification_c.csv')
#=======================================================================================================================
# fitting thresholds based on the frequency of classes in the train dataset:
labels = pd.read_csv(LABELS).set_index('Id')
label_count = np.zeros(len(name_label_dict))
for label in labels['Target']:
l = [int(i) for i in label.split()]
label_count += np.eye(len(name_label_dict))[l].sum(axis=0)
label_fraction = label_count.astype(np.float)/len(labels)
print(label_count, label_fraction)
th_t = fit_test(pred_t,label_fraction)
th_t[th_t<0.05] = 0.05
print('Thresholds: ',th_t)
print('Fractions: ',(pred_t > th_t).mean(axis=0))
save_pred(pred_t,th_t,'protein_classification_t.csv') # based on frquency of classes in train
#=======================================================================================================================
# res34
# F1 macro: 0.7339006427813839
# F1 macro (th = 0.5): 0.6669998135148151
# F1 micro: 0.7723082957442635
#res101xt-m4
# Thresholds: [0.54491 0.75237 0.58362 0.55942 0.56169 0.52287 0.56564 0.58306 0.50261 0.52049 0.46712 0.5479 0.57008
# 0.71485 0.59936 0.1 0.66235 0.58874 0.51545 0.51548 0.52326 0.49656 0.65905 0.54701 0.68219 0.50362
# 0.48294 0.29036]
# F1 macro: 0.7011295048856508
# F1 macro (th = 0.5): 0.6415093521306193
# F1 micro: 0.7883417085427137
# # resnet101xt-swa
# [0.52095 0.67501 0.46876 0.62209 0.52894 0.55665 0.55442 0.48154 0.46129 0.75715 0.43572 0.586 0.64507
# 0.64826 0.55982 0.1 0.83022 0.90441 0.55107 0.51155 0.52846 0.4664 0.74345 0.52408 0.79122 0.46872
# 0.55224 0.1 ]
# F1 macro: 0.6819705865476475
# F1 macro (th = 0.5): 0.6223916910357309
# F1 micro: 0.7857503279846604
# sub_0.05 lb: 0.492
########################################################################################################################
# res34 grn+ 512 revised val
# Thresholds: [0.54664 0.5475 0.55301 0.50708 0.47384 0.5289 0.44358 0.5137 0.41192 0.4685 0.42144 0.54451 0.5089
# 0.53344 0.47533 0.12029 0.39405 0.40774 0.45618 0.46368 0.40192 0.47281 0.56206 0.49217 0.51224 0.49178
# 0.1 0.34105]
# F1 macro: 0.6304911952781457 # 0.451
# F1 macro (th = 0.5): 0.5779850745288859 # 0.438
# F1 micro: 0.6733807952769578
# res34 grn+0.5 swa 4x4 revised val
# Thresholds: [0.54103 0.56479 0.53996 0.55322 0.47398 0.54292 0.46768 0.53656 0.38274 0.48946 0.41035 0.4907 0.48226
# 0.5141 0.51645 0.1427 0.38583 0.42499 0.45048 0.4634 0.41046 0.45131 0.52466 0.51523 0.67688 0.48726
# 0.45562 0.34339]
# F1 macro: 0.666934899747442
# F1 macro (th = 0.5): 0.5910769947924901
# F1 micro: 0.7727588603196666
#grn34+0.9 swa 4x4 revised val
# Thresholds: [0.54138 0.56821 0.57645 0.49649 0.45076 0.5454 0.49167 0.52807 0.4007 0.43375 0.37413 0.52472 0.52156
# 0.44734 0.54172 0.1312 0.43421 0.42853 0.46424 0.48458 0.4138 0.45056 0.55984 0.50826 0.71608 0.48222
# 0.51216 0.35996]
# F1 macro: 0.6830337665787448
# F1 macro (th = 0.5): 0.6158043015502873
# F1 micro: 0.7779433681073026
#grn34+0.5 256 revised val
# Thresholds: [0.53031 0.61858 0.58287 0.50504 0.56897 0.6039 0.48341 0.57169 0.48902 0.61432 0.53577 0.60106 0.52176
# 0.49809 0.59424 0.1445 0.46618 0.50464 0.50754 0.53109 0.51841 0.51343 0.50707 0.58757 0.57555 0.49086
# 0.53558 0.52238]
# F1 macro: 0.6661630089375489
# F1 macro (th = 0.40): 0.5571912452206964
# F1 macro (th = 0.45): 0.6171187769631362
# F1 macro (th = 0.50): 0.6415479086760095 # 0.470
# F1 macro (th = 0.55): 0.6489516914324852
# F1 macro (th = 0.60): 0.6026006073694331
# F1 macro (th = 0.65): 0.553418550910492
# F1 micro: 0.7577577577577577
# Fractions: [0.4482 0.03153
# wrn - validation-stratified run 1
# Thresholds: [0.54149 0.55812 0.56723 0.58857 0.55518 0.59766 0.5105 0.59866 0.73868 0.69084 0.56855 0.66444 0.57103
# 0.73118 0.5686 0.63132 0.61924 0.57267 0.54284 0.48375 0.53864 0.47911 0.57397 0.56129 0.78236 0.1
# 0.55899 0.36679]
# F1 macro: 0.7199049787804883
# F1 macro (th = 0.5): 0.6730420723769626 # 0.
# F1 micro: 0.688504734639947
# wrn - validation-stratified run 2 -long -16 more epoch
# Thresholds: [0.56716 0.63661 0.59092 0.62034 0.52683 0.56752 0.50399 0.61647 0.65823 0.57482 0.52132 0.68148 0.60175
# 0.57967 0.5999 0.61468 0.48772 0.56341 0.5741 0.49707 0.5276 0.49113 0.57197 0.54825 0.62061 0.49563
# 0.624 0.27953]
# F1 macro: 0.7361542316545664
# F1 macro (th = 0.5): 0.6932206090395802
# F1 micro: 0.7887470695493618
# wrn - validation-stratified run 3 -long -balanced train - 16 more epoch --overfit
# 7
# 0.220173
# 0.761697
# 0.856499
# 0.610779
# 0.706326
# Thresholds: [0.55793 0.78152 0.66333 0.6825 0.65572 0.75289 0.74526 0.63238 0.53035 0.36548 0.42722 0.72039 0.71662
# 0.75241 0.66181 0.40304 0.78582 0.87507 0.72847 0.61643 0.81325 0.5994 0.64994 0.60596 0.8597 0.52862
# 0.84814 0.13508]
# F1 macro: 0.720740962219489
# F1 macro (th = 0.5): 0.6314560071256061
# F1 micro: 0.7730109204368174
# wrn - validation-stratified run 4 -long -unbalanced train - 16 more epoch --overfit Augmentation+
# 7 0.257098 0.746422 0.866529 0.583308 0.688742
# Thresholds: [0.5979 0.73537 0.634 0.71561 0.69005 0.69933 0.65443 0.63613 0.8177 0.42255 0.4162 0.7414 0.76869
# 0.78629 0.73087 0.45202 0.83717 0.77659 0.72559 0.63488 0.70433 0.60525 0.71827 0.61926 0.76373 0.52041
# 0.78393 0.16647]
# F1 macro: 0.7102068815363216
# F1 macro (th = 0.5): 0.6060142764385398
# F1 micro: 0.
#
# wrn - validation-stratified run 4 -long -unbalanced train - 16 more epoch -grn+
# 7 0.309054 0.610084 0.917235 0.591056 0.710322
#Thresholds: [0.56554 0.67176 0.63116 0.58562 0.54196 0.63124 0.54626 0.61112 0.60392 0.51268 0.4329 0.70066 0.63997
# 0.71733 0.63157 0.55963 0.64437 0.76405 0.69452 0.5257 0.57838 0.52668 0.55164 0.60304 0.77591 0.50272
#0.53615 0.41276]
#F1 macro: 0.747663269393283
#F1 macro (th = 0.5): 0.6990677584721688
#F1 micro: 0.7917927134026042
# Thresholds: [0.51905 0.63727 0.59806 0.61262 0.61216 0.60486 0.54142 0.60487 0.42234 0.58749 0.52914 0.6674 0.58175
# 0.69042 0.48655 0.1 0.56512 0.56398 0.53958 0.53107 0.45948 0.51422 0.57877 0.62183 0.51312 0.50313
# 0.54336 0.48171]
# F1 macro: 0.7065067795265598
# F1 macro (th = 0.5): 0.6705134909023859
# F1 micro: 0.7804733141895237
# incepres focal loss grn+
# 7 0.320347 0.61997 0.90574 0.5458 0.672016
#Thresholds: [0.57556 0.59779 0.62277 0.54471 0.49556 0.61897 0.57477 0.57172 0.6501 0.68403 0.46683 0.64066 0.58307
#0.64869 0.65064 0.63917 0.71538 0.78358 0.53561 0.57432 0.57465 0.51983 0.53168 0.61909 0.83321 0.52462
#0.73072 0.49235]
#F1 macro: 0.7212613994045415
#F1 macro (th = 0.5): 0.6801362915766137
#F1 micro: 0.7713873968295559
# incepres grn+ f1 loss
# 7 0.087745 0.224351 0.756631 0.7749 0.761466
# Thresholds: [0.89364 0.90076 0.91226 0.60601 0.9128 0.91639 0.90742 0.90624 0.58812 0.64672 0.60698 0.92495 0.91406
# 0.92322 0.8833 0.68635 0.77239 0.90479 0.92075 0.91763 0.75379 0.8955 0.92912 0.90515 0.79271 0.91636
# 0.94046 0.5 ]
# F1 macro: 0.6629372155000963
# F1 macro (th = 0.5): 0.6038595182923132
# F1 micro: 0.7442261289210618
|
python
|
"""Reverse-engineered client for the LG SmartThinQ API.
"""
from .core import * # noqa
from .client import * # noqa
from .ac import * # noqa
from .dishwasher import * # noqa
from .dryer import * # noqa
from .refrigerator import * # noqa
from .washer import * # noqa
__version__ = '1.3.0'
|
python
|
# dic = {'key': 'value', 'key2': 'value2'}
import json
#
# ret = json.dumps(dic) # 序列化
# print(dic, type(dic))
# print(ret, type(ret))
#
# res = json.loads(ret) # 反序列化
# print(res, type(res))
# 问题1
# dic = {1: 'value', 2: 'value2'}
# ret = json.dumps(dic) # 序列化
# print(dic, type(dic))
# print(ret, type(ret))
#
# res = json.loads(ret) # 反序列化
# print(res, type(res))
# 问题2
# dic = {1: [1, 2, 3], 2: (4, 5, 'aa')}
# ret = json.dumps(dic) # 序列化
# print(dic, type(dic))
# print(ret, type(ret))
#
# res = json.loads(ret) # 反序列化
# print(res, type(res))
# 问题3
# s = {1, 2, 'aaa'}
# json.dumps(s)
# 问题4 # TypeError: keys must be a string
# json.dumps({(1, 2, 3): 123})
# json 在所有的语言之间都通用 : json序列化的数据 在python上序列化了 那在java中也可以反序列化
# 能够处理的数据类型是非常有限的 : 字符串 列表 字典 数字
# 字典中的key只能是字符串
# 后端语言 java c c++ c#
# 前端语言 在网页上展示
# 向文件中记录字典
import json
# dic = {'key': 'value', 'key2': 'value2'}
# ret = json.dumps(dic) # 序列化
# with open('json_file', 'a') as f:
# f.write('\n')
# f.write(ret)
# 从文件中读取字典
# with open('json_file', 'r') as f:
# str_dic = f.read()
# dic = json.loads(str_dic)
# print(dic.keys())
# dump load 是直接操作文件的
# dic = {'key1': 'value1', 'key2': 'value2'}
# with open('json_file', 'a') as f:
# json.dump(dic, f)
# with open('json_file', 'r') as f:
# dic = json.load(f)
# print(dic.keys())
# 问题5 不支持连续的存 取
# dic = {'key1': 'value1', 'key2': 'value2'}
# with open('json_file', 'a') as f:
# json.dump(dic, f)
# json.dump(dic, f)
# json.dump(dic, f)
# with open('json_file', 'r') as f:
# dic = json.load(f)
# print(dic.keys())
# 需求 :就是想要把一个一个的字典放到文件中,再一个一个取出来???
# dic = {'key1': 'value1', 'key2': 'value2'}
#
# with open('json_file', 'a') as f:
# str_dic = json.dumps(dic)
# f.write(str_dic + '\n')
# str_dic = json.dumps(dic)
# f.write(str_dic + '\n')
# str_dic = json.dumps(dic)
# f.write(str_dic + '\n')
#
# with open('json_file', 'r') as f:
# for line in f:
# dic = json.loads(line.strip())
# print(dic.keys())
# json
# dumps loads
# 在内存中做数据转换 :
# dumps 数据类型 转成 字符串 序列化
# loads 字符串 转成 数据类型 反序列化
# dump load
# 直接将数据类型写入文件,直接从文件中读出数据类型
# dump 数据类型 写入 文件 序列化
# load 文件 读出 数据类型 反序列化
# json是所有语言都通用的一种序列化格式
# 只支持 列表 字典 字符串 数字
# 字典的key必须是字符串
# dic = {'key': '你好'}
# print(json.dumps(dic, ensure_ascii=False))
import json
data = {'username': ['李华', '二愣子'], 'sex': 'male', 'age': 16}
json_dic2 = json.dumps(data, sort_keys=True, indent=4, separators=(',', ':'), ensure_ascii=False)
print(json_dic2)
# 存文件/传网络
|
python
|
__author__ = 'xubinggui'
class Student(object):
def __init__(self, name, score):
self.name = name
self.score = score
def print_score(self):
print(self.score)
bart = Student('Bart Simpson', 59)
bart.print_score()
|
python
|
import unittest
from app.models import Pitch, User
from flask_login import current_user
from app import db
class TestPitch(unittest.TestCase):
def setUp(self):
self.user_joe = User(
username='jack', password='password', email='[email protected]')
self.new_pitch = Pitch(title="Test", pitch=' This is a test')
def tearDown(self):
Pitch.query.delete()
User.query.delete()
def test_instance(self):
self.assertTrue(isinstance(self.new_pitch, Pitch))
def test_check_instance_variables(self):
self.assertEquals(self.new_pitch.title, "Test")
self.assertEquals(self.new_pitch.pitch, 'This is test')
self.assertEquals(self.new_pitch.user, self.user_joe)
|
python
|
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from .unique_identifiable import CloudioUniqueIdentifiable
class CloudioObjectContainer(CloudioUniqueIdentifiable):
"""Interface to be implemented by all classes that can hold cloud.iO objects."""
__metaclass__ = ABCMeta
@abstractmethod
def attribute_has_changed_by_endpoint(self, attribute):
"""
:param attribute: Attribute which has changed.
:type attribute: CloudioAttribute
"""
pass
@abstractmethod
def attribute_has_changed_by_cloud(self, attribute):
"""The attribute has changed from the cloud.
:param attribute Attribute which has changed.
:type attribute CloudioAttribute
"""
pass
@abstractmethod
def is_node_registered_within_endpoint(self):
"""Returns true if the node the attribute is part of is registered within an endpoint, false otherwise.
:return True if the node is registered within the endpoint, false if not.
:rtype: bool
"""
pass
@abstractmethod
def get_objects(self):
"""Returns the list of child object contained inside this container.
:return Child objects
:rtype: {CloudioObject}
"""
pass
@abstractmethod
def get_parent_object_container(self):
"""Returns the object container's parent object container. Note that if the actual
object container is not embedded into another object controller, the method returns null.
"""
pass
@abstractmethod
def set_parent_object_container(self, object_container):
"""Sets the parent object container of the object container. Note that object containers
can not be moved, so this method throws a runtime exception if someone tries to move the
object container to a new parent or in the case the actual container is a node, which can
not be part of another object container.
"""
pass
@abstractmethod
def get_parent_node_container(self):
"""Returns the object container's parent node container. Note that if the actual object
container is not a node, the method returns null.
"""
pass
@abstractmethod
def set_parent_node_container(self, node_container):
"""Sets the parent node container of the object container (node). Note that object
containers can not be moved, so this method throws a runtime exception if someone tries
to move the object container to a new parent or in the case the actual container is not
a node.
"""
pass
@abstractmethod
def find_attribute(self, location):
"""Finds the given attribute inside the child objects using the given location
path (stack). If an attribute was found at the given location, a reference to that
attribute is returned, otherwise null is returned.
"""
pass
@abstractmethod
def find_object(self, location):
"""Finds the given object inside the objects tree using the given location
path (stack). If the object was found at the given location, a reference to
that object is returned, otherwise null is returned.
"""
pass
|
python
|
import os, sys; sys.path.append(os.path.join("..", "..", ".."))
from pattern.en import parse, Text
# The easiest way to analyze the output of the parser is to create a Text.
# A Text is a "parse tree" of linked Python objects.
# A Text is essentially a list of Sentence objects.
# Each Sentence is a list of Word objects.
# Each Word can be part of a Chunk object, accessible with Word.chunk.
s = "I eat pizza with a silver fork."
s = parse(s)
s = Text(s)
print s[0].words # A list of all the words in the first sentence.
print s[0].chunks # A list of all the chunks in the first sentence.
print s[0].chunks[-1].words
print
for sentence in s:
for word in sentence:
print word.string, \
word.type, \
word.chunk, \
word.pnp
# A Text can be exported as an XML-string (among other).
print
print s.xml
|
python
|
import re
from enum import Enum
from operator import attrgetter
from re import RegexFlag
from typing import List, Union, Match, Dict, Optional
from annotation.models.models import Citation
from annotation.models.models_enums import CitationSource
from library.log_utils import report_message
from ontology.models import OntologyService, OntologyTerm
class MatchType(Enum):
NUMERIC = 1
ALPHA_NUMERIC = 2
SIMPLE_NUMBERS = 3
ENTIRE_UNTIL_SPACE = 4
class DbRefRegex:
_all_db_ref_regexes: List['DbRefRegex'] = []
def __init__(self,
db: str,
prefixes: Union[str, List[str]],
link: str,
match_type: MatchType = MatchType.NUMERIC,
min_length: int = 3,
expected_length: Optional[int] = None):
"""
Creates an instance of a external id/link detection and automatically registers it with the complete collection.
The end result allowing us to scan text for any number of kinds of links.
:param db: An identifier uniquely associated with the DB
:param prefixes: A single string or array of strings that will be scanned for in text - IMPORTANT - these will be interpreted in regex
:param link: The URL the link will go to with ${1} being replaced with the value found after the prefix
:param match_type: Determines if the link is a series of numbers, alpha-numeric etc - be specific to avoid false positives
:param min_length: How long the ID part must be after the prefix, helps avoid false positives such as the gene rs1 being mistaken for SNP
"""
if isinstance(prefixes, str):
prefixes = [prefixes]
self.db = db
self.prefixes = prefixes
self.link = link
self.match_type = match_type
self.min_length = min_length or 1
self.expected_length = expected_length
self._all_db_ref_regexes.append(self)
def link_for(self, idx: int) -> str:
id_str = self.fix_id(str(idx))
return self.link.replace("${1}", id_str)
def fix_id(self, id_str: str) -> str:
if self.expected_length:
id_str = id_str.rjust(self.expected_length, '0')
return id_str
def __eq__(self, other):
# db should be unique in DbRefRegex
return self.db == other.db
def __hash__(self):
return hash(self.db)
class DbRegexes:
CLINGEN = DbRefRegex(db="ClinGen", prefixes="CA", link="http://reg.clinicalgenome.org/redmine/projects/registry/genboree_registry/by_caid?caid=CA${1}", match_type=MatchType.SIMPLE_NUMBERS)
CLINVAR = DbRefRegex(db="Clinvar", prefixes="VariationID", link="https://www.ncbi.nlm.nih.gov/clinvar/variation/${1}")
COSMIC = DbRefRegex(db="COSMIC", prefixes="COSM", link="https://cancer.sanger.ac.uk/cosmic/mutation/overview?id=${1}")
DOID = DbRefRegex(db="DOID", prefixes="DOID", link=OntologyService.URLS[OntologyService.DOID], min_length=OntologyService.EXPECTED_LENGTHS[OntologyService.DOID], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.DOID])
GTR = DbRefRegex(db="GTR", prefixes="GTR", link="https://www.ncbi.nlm.nih.gov/gtr/tests/${1}/overview/")
HP = DbRefRegex(db="HP", prefixes=["HPO", "HP"], link=OntologyService.URLS[OntologyService.HPO], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.HPO])
HGNC = DbRefRegex(db="HGNC", prefixes="HGNC", link=OntologyService.URLS[OntologyService.HGNC], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.HGNC])
MEDGEN = DbRefRegex(db="MedGen", prefixes="MedGen", link="https://www.ncbi.nlm.nih.gov/medgen/?term=${1}", match_type=MatchType.ALPHA_NUMERIC)
MONDO = DbRefRegex(db="MONDO", prefixes="MONDO", link=OntologyService.URLS[OntologyService.MONDO], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.MONDO])
NCBIBookShelf = DbRefRegex(db="NCBIBookShelf", prefixes=["NCBIBookShelf"], link="https://www.ncbi.nlm.nih.gov/books/${1}", match_type=MatchType.ALPHA_NUMERIC)
NIHMS = DbRefRegex(db="NIHMS", prefixes="NIHMS", link="https://www.ncbi.nlm.nih.gov/pubmed/?term=NIHMS${1}")
# smallest OMIM starts with a 1, so there's no 0 padding there, expect min length
OMIM = DbRefRegex(db="OMIM", prefixes=["OMIM", "MIM"], link=OntologyService.URLS[OntologyService.OMIM], min_length=OntologyService.EXPECTED_LENGTHS[OntologyService.OMIM], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.OMIM])
ORPHA = DbRefRegex(db="Orphanet", prefixes=["ORPHANET", "ORPHA"], link=OntologyService.URLS[OntologyService.ORPHANET], expected_length=OntologyService.EXPECTED_LENGTHS[OntologyService.ORPHANET])
PMC = DbRefRegex(db="PMC", prefixes="PMCID", link="https://www.ncbi.nlm.nih.gov/pubmed/?term=PMC${1}")
PUBMED = DbRefRegex(db="PubMed", prefixes=["PubMed", "PMID", "PubMedCentral"], link="https://www.ncbi.nlm.nih.gov/pubmed/?term=${1}")
SNP = DbRefRegex(db="SNP", prefixes="rs", link="https://www.ncbi.nlm.nih.gov/snp/${1}", match_type=MatchType.SIMPLE_NUMBERS)
SNOMEDCT = DbRefRegex(db="SNOMED-CT", prefixes=["SNOMED-CT", "SNOMEDCT"], link="https://snomedbrowser.com/Codes/Details/${1}")
UNIPROTKB = DbRefRegex(db="UniProtKB", prefixes="UniProtKB", link="https://www.uniprot.org/uniprot/${1}", match_type=MatchType.ALPHA_NUMERIC)
HTTP = DbRefRegex(db="HTTP", prefixes="http:", link="http:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
HTTPS = DbRefRegex(db="HTTPS", prefixes="https:", link="https:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
FTP = DbRefRegex(db="FTP", prefixes="ftp:", link="ftp:${1}", match_type=MatchType.ENTIRE_UNTIL_SPACE)
class DbRefRegexResult:
def __init__(self, cregx: DbRefRegex, idx: str, match: Match):
self.cregx = cregx
self.idx = cregx.fix_id(idx)
self.match = match
self.internal_id = None
self.summary = None
# this is where we check our database to see if we know what this reference is about
if self.db in OntologyService.LOCAL_ONTOLOGY_PREFIXES:
term_id = f"{self.db}:{self.idx}"
if term := OntologyTerm.objects.filter(id=term_id).first():
self.summary = term.name
try:
if source := CitationSource.CODES.get(self.db):
citation, _ = Citation.objects.get_or_create(citation_source=source, citation_id=idx)
self.internal_id = citation.pk
except:
report_message(message=f"Could not resolve external DB reference for {self.db}:{self.idx}")
@property
def id_fixed(self):
return f"{self.db}:{self.cregx.fix_id(self.idx)}"
@property
def url(self):
return self.cregx.link.replace('${1}', self.idx)
@property
def idx_num(self):
"""
Attempt to convert the id to a number, only use for sorting.
Some ids have a version suffix, so using float for the sake of decimals
"""
try:
return float(self.idx)
except:
return 0
@property
def db(self):
return self.cregx.db
def to_json(self):
jsonny = {'id': '%s: %s' % (self.db, self.idx), 'db': self.db, 'idx': self.idx, 'url': self.url}
if self.summary:
jsonny['summary'] = self.summary
if self.internal_id:
jsonny['internal_id'] = self.internal_id
return jsonny
def __str__(self):
return f'{self.cregx.db}:{self.idx}'
_simple_numbers = re.compile('([0-9]{3,})')
_num_regex = re.compile('[:#\\s]*([0-9]+)')
_num_repeat_regex = re.compile('\\s*,[:#\\s]*([0-9]+)')
_word_regex = re.compile('[:# ]*([A-Za-z0-9_-]+)') # no repeats for words, too risky
_entire_until_space = re.compile('(.*?)(?:[)]|\\s|$|[.] )')
class DbRefRegexes:
def __init__(self, regexes: List[DbRefRegex]):
self.regexes = regexes
self.prefix_map: Dict[str, DbRefRegex] = dict()
prefixes: List[str] = list()
for regex in self.regexes:
for prefix in regex.prefixes:
prefix = prefix.lower()
self.prefix_map[prefix] = regex
prefixes.append(prefix)
self.prefix_regex = re.compile('(' + '|'.join(prefixes) + ')', RegexFlag.IGNORECASE)
def link_html(self, text: str) -> str:
db_matches = reversed(self.search(text, sort=False))
for db_match in db_matches:
span = db_match.match.span()
if text[span[0]] in (':', ',', ' ', '#'):
span = [span[0]+1, span[1]]
before, middle, after = text[0:span[0]], text[span[0]:span[1]], text[span[1]:]
text = f"{before}<a href='{db_match.url}'>{middle}</a>{after}"
return text
def search(self, text: str, default_regex: DbRefRegex = None, sort: bool = True) -> List[DbRefRegexResult]:
"""
@param text The text to be searched for ID patterns
@param default_regex If the field is expected to be a specific kind of id
(e.g. db_rs_id should default to SNP). Only gets used if no match can be found
and will look for just the number part, e.g. if db_rs_id is "23432" instead of "rs23432"
it will still work).
@param sort If true sorts the results by database and id, otherwise leaves them in order of discovery
"""
results: List[DbRefRegexResult] = list()
def append_result_if_length(db_regex: DbRefRegex, match: Optional[Match]) -> bool:
"""
:param db_regex: The Database Regex we were searching for
:param match: The regex match
:return: True if the ID looked valid and was recorded, False otherwise
"""
nonlocal results
if match and len(match.group(1)) >= db_regex.min_length:
results.append(DbRefRegexResult(cregx=db_regex, idx=match.group(1), match=match))
return True
return False
for match in re.finditer(self.prefix_regex, text):
prefix = match.group(1).lower()
db_regex = self.prefix_map[prefix]
find_from = match.end(0)
if db_regex.match_type == MatchType.SIMPLE_NUMBERS:
match = _simple_numbers.match(text, find_from)
append_result_if_length(db_regex, match)
elif db_regex.match_type == MatchType.ALPHA_NUMERIC:
match = _word_regex.match(text, find_from)
append_result_if_length(db_regex, match)
elif db_regex.match_type == MatchType.ENTIRE_UNTIL_SPACE:
match = _entire_until_space.match(text, find_from)
append_result_if_length(db_regex, match)
else:
match = _num_regex.match(text, find_from)
if append_result_if_length(db_regex, match):
find_from = match.end(0)
while True:
match = _num_repeat_regex.match(text, find_from)
if append_result_if_length(db_regex, match):
find_from = match.end(0)
else:
break
if not results and default_regex:
match = None
if default_regex.match_type == MatchType.SIMPLE_NUMBERS:
match = _word_regex.match(text)
else:
match = _num_regex.match(text)
append_result_if_length(default_regex, match)
if sort:
results.sort(key=attrgetter('db', 'idx_num', 'idx'))
return results
db_ref_regexes = DbRefRegexes(DbRefRegex._all_db_ref_regexes)
|
python
|
import pprint
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods=['POST'])
def hello_world():
content = request.get_json(silent=True)
pprint.pprint(content)
return content
|
python
|
#! /usr/bin/env python
from __future__ import print_function
import tensorflow as tf
import os, collections, sys, subprocess, io
from abc import abstractmethod
import numpy as np
def flattern(A):
'''
Flatten a list containing a combination of strings and lists.
Copied from https://stackoverflow.com/questions/17864466/flatten-a-list-of-strings-and-lists-of-strings-and-lists-in-python.
'''
rt = []
for i in A:
if isinstance(i,list): rt.extend(flattern(i))
else: rt.append(i)
return rt
def save_item_to_id(item_to_id, file, encoding):
'''
Saves a item_to_id mapping to file.
'''
out = io.open(file, 'w', encoding=encoding)
for item, id_ in item_to_id.iteritems():
if item == '':
print('EMPTY ELEMENT')
if item == ' ':
print('SPACE')
out.write(u'{0}\t{1}\n'.format(item, id_))
out.close()
def load_item_to_id(file, encoding):
'''
Loads an item_to_id mapping and corresponding id_to_item mapping from file.
'''
item_to_id = {}
id_to_item = {}
for line in io.open(file, 'r', encoding=encoding):
l = line.strip().split()
item_to_id[l[0]] = int(l[1])
id_to_item[int(l[1])] = l[0]
return item_to_id, id_to_item
class LMData(object):
'''
The input data: words, batches across sentence boundaries.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
'''
Arguments:
config: configuration dictionary, specifying all parameters used for training
eval_config: configuration dictionary, specifying all parameters used for testing
TRAIN: boolean indicating whether we want to train or not
VALID: boolean indicating whether we want to validate or not
TEST: boolean indicating whether we want to test or not
'''
self.config = config
self.eval_config = eval_config
self.TRAIN = TRAIN
self.VALID = VALID
self.TEST = TEST
# if we want to train with a limited vocabulary, words not in the vocabulary
# should already be mapped to UNK
# data files should be of format train_50k-unk.txt etc. for a 50k vocabulary
if config['vocab']:
train_file = "train_" + str(config['vocab']) + "k-unk.txt"
valid_file = "valid_" + str(config['vocab']) + "k-unk.txt"
test_file = "test_" + str(config['vocab']) + "k-unk.txt"
self.train_path = os.path.join(config['data_path'], train_file)
self.valid_path = os.path.join(config['data_path'], valid_file)
self.test_path = os.path.join(config['data_path'], test_file)
else:
self.train_path = os.path.join(config['data_path'], "train.txt")
self.valid_path = os.path.join(config['data_path'], "valid.txt")
self.test_path = os.path.join(config['data_path'], "test.txt")
self.batch_size = config['batch_size']
self.num_steps = config['num_steps']
self.eval_batch_size = eval_config['batch_size']
self.eval_num_steps = eval_config['num_steps']
self.iterator = 0
self.end_reached = False
# default encoding = utf-8, specify in config file if otherwise
if 'encoding' in self.config:
self.encoding = self.config['encoding']
else:
self.encoding = "utf-8"
self.id_to_item = {}
self.item_to_id = {}
# by default, unknown words are represented with <unk>
# if this is not the case for a certain dataset, add it here
if 'CGN' in self.config['data_path'] or \
'WSJ/88' in self.config['data_path']:
self.unk = '<UNK>'
self.replace_unk = '<unk>'
else:
self.unk = '<unk>'
self.replace_unk = '<UNK>'
if 'rescore' in self.config and isinstance(self.config['rescore'], str):
self.test_path = self.config['rescore']
elif 'predict_next' in self.config and isinstance(self.config['predict_next'], str):
self.test_path = self.config['predict_next']
elif 'debug2' in self.config and isinstance(self.config['debug2'], str):
self.test_path = self.config['debug2']
elif 'other_test' in self.config:
self.test_path = self.config['other_test']
if 'valid_as_test' in self.config:
self.test_path = self.valid_path
self.PADDING_SYMBOL = '@'
def read_items(self, filename):
'''
Returns a list of all WORDS in filename.
'''
with tf.gfile.GFile(filename, "r") as f:
# Wikitext: more than 1 sentence per line, also introduce <eos> at ' . '
# add here other datasets that contain more than 1 sentence per line
if "WikiText" in self.config['data_path']:
data = f.read().decode(self.encoding).replace("\n", " <eos> ").replace(" . "," <eos> ").split()
elif 'no_eos' in self.config:
data = f.read().decode(self.encoding).replace("\n", " ").split()
else:
data = f.read().decode(self.encoding).replace("\n", " <eos> ").split()
# make sure there is only 1 symbol for unknown words
data = [self.unk if word==self.replace_unk else word for word in data]
return data
@abstractmethod
def calc_longest_sent(self, all_data):
raise NotImplementedError("Abstract class.")
@abstractmethod
def padding(self, dataset, total_length):
raise NotImplementedError("Abstract class.")
@abstractmethod
def pad_data(self, all_data, max_length):
raise NotImplementedError("Abstract class.")
def build_vocab(self, filename):
'''
Returns an item-to-id and id-to-item mapping for all words (or characters) in filename.
Arguments:
filename: name of file for which the mapping will be built
Returns:
item_to_id mapping and id_to_item mapping
'''
data = self.read_items(filename)
counter = collections.Counter(data)
# counter.items() = list of the words in data + their frequencies, then sorted according to decreasing frequency
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
# words = list of all the words (in decreasing frequency)
items, _ = list(zip(*count_pairs))
# make a dictionary with a mapping from each word to an id; word with highest frequency gets lowest id etc.
item_to_id = dict(zip(items, range(len(items))))
# remove empty element and space
if '' in item_to_id:
item_to_id.pop('')
if ' ' in item_to_id and not 'char' in self.config:
item_to_id.pop(' ')
# reverse dictionary
id_to_item = dict(zip(range(len(items)), items))
# make sure there is a special token for unknown words
if not self.unk in item_to_id:
item_to_id[self.unk] = len(item_to_id)
id_to_item[len(id_to_item)] = self.unk
# add <bos>: used for sentence-level batches, or
# for discourse-level models that are use for e.g. rescoring
item_to_id['<bos>'] = len(item_to_id)
id_to_item[len(id_to_item)] = '<bos>'
return item_to_id, id_to_item
def extend_vocab(self, filename):
'''
If there already is a vocabulary, this function extends the vocabulary with words
found in the data file 'filename'.
'''
data = self.read_items(filename)
vocab_curr = set(data)
for word in vocab_curr:
if word not in self.item_to_id:
print(u'word {0} not yet seen'.format(word).encode(self.encoding))
self.item_to_id[word] = len(self.item_to_id)
self.id_to_item[len(self.id_to_item)] = word
def add_padding_symbol(self):
'''
Add the correct padding symbol to the vocabulary
'''
if self.PADDING_SYMBOL not in self.item_to_id:
self.item_to_id[self.PADDING_SYMBOL] = len(self.item_to_id)
self.id_to_item[len(self.id_to_item)] = self.PADDING_SYMBOL
# if the default symbol for padding is already in the vocabulary
else:
# another symbol should be specified in the config file
if not 'padding_symbol' in self.config:
raise ValueError("{0} used as padding symbol but occurs in text. " \
"Specify another padding symbol with 'padding_symbol' in the config file.".format(
self.PADDING_SYMBOL))
else:
self.PADDING_SYMBOL = self.config['padding_symbol']
# check whether the padding symbol specified in the config file occurs in the data or not
if self.PADDING_SYMBOL not in self.item_to_id:
self.item_to_id[self.PADDING_SYMBOL] = len(self.item_to_id)
self.id_to_item[len(self.id_to_item)] = self.PADDING_SYMBOL
else:
raise ValueError("The padding symbol specified in the config file ({0}) " \
"already occurs in the text.".format(self.PADDING_SYMBOL))
@abstractmethod
def build_ngram_vocab(self, filename):
raise NotImplementedError("Abstract class.")
@abstractmethod
def build_skipgram_vocab(self, filename, skip):
raise NotImplementedError("Abstract class.")
def file_to_item_ids(self, filename, item_to_id=None):
'''
Returns list of all words/characters (mapped to their ids) in the file,
either one long list or a list of lists per sentence.
Arguments:
filename: name of file for which the words should be mapped to their ids
Optional:
item_to_id: dictionary that should be used for the mapping (otherwise self.item_to_id is used)
'''
if item_to_id == None:
item_to_id = self.item_to_id
data = self.read_items(filename)
tmp_l = []
for w in data:
if w in item_to_id:
tmp_l.append(item_to_id[w])
else:
print(u'{0} not in item_to_id'.format(w).encode('utf-8'))
return [item_to_id[item] if item in item_to_id else item_to_id[self.unk] for item in data]
@abstractmethod
def file_to_ngram_ids(self, filename):
raise NotImplementedError("Abstract class.")
@abstractmethod
def file_to_skipgram_ids(self, filename):
raise NotImplementedError("Abstract class.")
def read_data(self):
'''
Makes sure there is a vocabulary and reads all necessary data.
Returns:
all_data: tuple of three lists : train_data, valid_data and test_data
'''
if 'read_vocab_from_file' in self.config:
# read vocabulary mapping from file
self.item_to_id, self.id_to_item = load_item_to_id(self.config['read_vocab_from_file'], self.encoding)
# check whether the data file contains words that are not yet in the vocabulary mapping
self.extend_vocab(self.train_path)
if 'per_sentence' in self.config:
self.add_padding_symbol()
else:
# if the vocabulary mapping is not saved on disk, make one based on the training data
self.item_to_id, self.id_to_item = self.build_vocab(self.train_path)
# sentence-level model or model that will be used for rescoring: needs padding symbol in vocabulary
if 'rescore_later' in self.config or 'per_sentence' in self.config:
self.add_padding_symbol()
# save the item_to_id mapping such that it can be re-used
if 'save_dict' in self.config:
save_item_to_id(self.item_to_id, '{0}.dict'.format(self.config['save_dict']), self.encoding)
# make a label file to visualize the embeddings
# with the correct labels (= words instead of ids) in tensorboard
self.label_file = os.path.join(self.config['save_path'], "labels.tsv")
# write label file
with io.open(self.label_file, 'w', encoding=self.encoding) as f:
for i in range(len(self.id_to_item)):
f.write(u'{0}\n'.format(self.id_to_item[i]))
# list of all words in training data converted to their ids
if self.TRAIN:
train_data = self.file_to_item_ids(self.train_path)
else:
train_data = []
# list of all words in validation data converted to their ids
if self.VALID:
valid_data = self.file_to_item_ids(self.valid_path)
else:
valid_data = []
# list of all words in test data converted to their ids
if self.TEST:
test_data = self.file_to_item_ids(self.test_path)
else:
test_data = []
all_data = (train_data, valid_data, test_data)
return all_data
def get_data(self):
'''
Retrieve the necessary data and vocabulary size.
'''
all_data = self.read_data()
return all_data, len(self.id_to_item), 0
def init_batching(self, data, test=False):
'''
Prepare for batching.
'''
if test:
batch_size = self.eval_batch_size
self.num_steps = self.eval_num_steps
else:
batch_size = self.batch_size
# beginning of data set: set self.end_reached to False (was set to True if another data set is already processed)
if self.iterator == 0:
self.end_reached = False
data_len = len(data)
# to divide data in batch_size batches, each of length batch_len
batch_len = data_len // batch_size
# number of samples that can be taken from the batch_len slices
self.num_samples = (batch_len // self.num_steps) - 1
# remove last part of the data that doesn't fit in the batch_size x num_steps samples
data = data[:batch_size * batch_len]
# convert to numpy array: batch_size x batch_len
self.data_array = np.array(data).reshape(batch_size, batch_len)
def get_batch(self):
'''
Gets a single batch.
Returns:
x: input data
y: target data
end_reached: boolean marking whether the end of the data file has been reached or not
'''
# take slice of batch_size x num_steps
x = self.data_array[:, self.iterator * self.num_steps :
(self.iterator * self.num_steps) + self.num_steps]
# targets = same slice but shifted one step to the right
y = self.data_array[:, (self.iterator * self.num_steps) +1 :
(self.iterator * self.num_steps) + self.num_steps + 1]
# if iterated over the whole dataset, set iterator to 0 to start again
if self.iterator >= self.num_samples:
self.iterator = 0
self.end_reached = True
# otherwise, increase count
else:
self.iterator += 1
return x, y, self.end_reached
class charData(LMData):
'''
Train on character level rather than word level.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def read_items(self, filename):
'''
Returns a list of all CHARACTERS in filename.
'''
with tf.gfile.GFile(filename, "r") as f:
data = ['<eos>' if x == '\n' else x for x in f.read().decode(self.encoding)]
return data
class wordSentenceData(LMData):
'''
Feed sentence per sentence to the network,
each sentence padded until the length of the longest sentence.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(wordSentenceData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
self.sentence_iterator = 0
def read_sentences(self, filename):
'''
Returns a list with all sentences in filename, each sentence is split in words.
'''
with tf.gfile.GFile(filename, "r") as f:
if "WikiText" in self.config['data_path']:
all_sentences = f.read().decode(self.encoding).replace("\n", "<eos>").replace(" . "," <eos> ").split("<eos>")
# this assumes that all other datasets contain 1 sentence per line
else:
all_sentences = f.read().decode(self.encoding).replace("\n", "<eos>").split("<eos>")
# remove empty element at the end
if all_sentences[-1] == '':
all_sentences = all_sentences[:-1]
# split sentence in words
for i in xrange(len(all_sentences)):
all_sentences[i] = all_sentences[i].split()
return all_sentences
def calc_longest_sent(self, all_data):
'''
Returns length of longest sentence occurring in all_data.
'''
max_length = 0
for dataset in all_data:
for sentence in dataset:
if len(sentence) > max_length:
max_length = len(sentence)
return max_length
def padding(self, dataset, total_length):
'''
Add <bos> and <eos> to each sentence in dataset + pad until max_length.
'''
seq_lengths = []
for sentence in dataset:
#seq_lengths.append(len(sentence)+1) # +1 ONLY <eos>
seq_lengths.append(len(sentence)+2) # +2 <bos> + <eos>
if 'hyp_with_ids' in self.config:
sentence.insert(1, self.item_to_id['<bos>']) # CHANGED
else:
sentence.insert(0, self.item_to_id['<bos>']) # CHANGED
# end of sentence symbol
sentence.append(self.item_to_id['<eos>'])
# pad rest of sentence until maximum length
num_pads = total_length - len(sentence)
for pos in xrange(num_pads):
if 'not_trained_with_padding' in self.config:
sentence.append(self.item_to_id[self.unk])
else:
try:
sentence.append(self.item_to_id[self.PADDING_SYMBOL])
except KeyError:
print("No padding symbol ({0}) in the dictionary. Either add 'not_trained_with_padding' " \
"in the config file if the model is trained without padding or " \
"specify the correct symbol with 'padding_symbol' in the config.".format(
self.PADDING_SYMBOL))
sys.exit(1)
return dataset, seq_lengths
def pad_data(self, all_data, max_length):
'''
Pad each dataset in all_data.
'''
# + 2 because <bos> and <eos> should be added
# + 1 for extra padding symbol to avoid having target sequences
# which end on the beginning of the next sentence
#total_length = max_length + 2
total_length = max_length + 3
if isinstance(all_data, tuple):
padded_all = ()
seq_lengths_all = ()
for dataset in all_data:
padded_dataset, seq_length = self.padding(dataset, total_length)
padded_all += (padded_dataset,)
seq_lengths_all += (seq_length,)
else:
padded_all, seq_lengths_all = self.padding(all_data, total_length)
return padded_all, seq_lengths_all
def file_to_item_ids(self, filename):
data = self.read_sentences(filename)
data_ids = []
for sentence in data:
if 'hyp_with_ids' in self.config:
# do not convert hypothesis id to integer
hyp = [self.item_to_id[item] if item in self.item_to_id else self.item_to_id[self.unk] for item in sentence[1:]]
data_ids.append([sentence[0]] + hyp)
else:
data_ids.append([self.item_to_id[item] if item in self.item_to_id else self.item_to_id[self.unk] for item in sentence])
return data_ids
def get_data(self):
all_data = self.read_data()
if not '<bos>' in self.item_to_id:
self.item_to_id['<bos>'] = len(self.item_to_id)
self.id_to_item[len(self.id_to_item)] = '<bos>'
if 'max_length' in self.config:
max_length = self.config['max_length']
else:
max_length = self.calc_longest_sent(all_data)
# + 2 for <eos> and extra padding symbol at the end
#self.num_steps = max_length + 2
self.num_steps = max_length + 3
padded_data, seq_lengths = self.pad_data(all_data, max_length)
# return max_length+1 and not +2 because the last padding symbol is only there
# to make sure that the target sequence does not end with the beginning of the next sequence
#return padded_data, len(self.id_to_item), max_length+1, seq_lengths
return padded_data, len(self.id_to_item), max_length+2, seq_lengths
def init_batching(self, data, test=False):
if test:
self.batch_size = self.eval_batch_size
self.num_steps = self.eval_num_steps
length_sentence = self.num_steps
if self.iterator == 0:
self.end_reached = False
self.test = test
words = data[0]
seq_lengths = data[1]
if not self.test:
data_len = len(words)*len(words[0])
# to divide data in batch_size batches, each of length batch_len
batch_len = data_len // self.batch_size
# number of sentences that fit in 1 batch_len
self.num_sentences_batch = batch_len // (length_sentence+1)
# we want batch_len to be a multiple of num_steps (=size of padded sentence)
batch_len = self.num_sentences_batch * (length_sentence+1)
# remove last part of the data that doesn't fit in the batch_size x num_steps samples
words = words[:self.batch_size * self.num_sentences_batch]
# convert to numpy array: batch_size x batch_len*num_steps
self.data_array = np.array(words).reshape(
self.batch_size, self.num_sentences_batch*length_sentence)
# convert seq_lengths to numpy array
self.seql_array = np.array(seq_lengths)
else:
# only for testing, this assumes that batch_size and num_steps are 1!
self.len_data = len(words)*len(words[0])
self.len_sentence = len(words[0])
self.data_array = np.array(words).reshape(len(words), len(words[0]))
def get_batch(self):
if not self.test:
# take slice of batch_size x num_steps
x = self.data_array[:, self.iterator * self.num_steps :
(self.iterator * self.num_steps) + self.num_steps - 1]
# targets = same slice but shifted one step to the right
y = self.data_array[:, (self.iterator * self.num_steps) +1 :
(self.iterator * self.num_steps) + self.num_steps ]
# take slice of sequence lengths for all elements in the batch
seql = self.seql_array[self.iterator * self.batch_size : (self.iterator+1) * self.batch_size]
# if iterated over the whole dataset, set iterator to 0 to start again
if self.iterator >= self.num_sentences_batch:
self.iterator = 0
self.end_reached = True
# otherwise, increase count
else:
self.iterator += 1
else:
x = self.data_array[self.sentence_iterator, self.iterator: self.iterator + 1]
y = self.data_array[self.sentence_iterator, self.iterator + 1 : self.iterator + 2]
# num_steps = 1 so no sequence length needed
seql = [1]
if self.sentence_iterator == self.len_data / self.len_sentence and self.iterator == self.len_sentence - 1:
self.end_reached = True
# otherwise, increase count
else:
self.iterator += 1
if self.iterator == self.len_sentence - 1:
# end of file reached
if self.sentence_iterator >= (self.len_data / self.len_sentence) - 1:
self.end_reached = True
# end of sentence reached
else:
self.iterator = 0
self.sentence_iterator += 1
x = [x]
y = [y]
return x, y, self.end_reached, seql
class charSentenceData(wordSentenceData):
'''
Same as wordSentenceData, except that the input unit is a character.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charSentenceData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def read_sentences(self, filename):
'''Returns a list with all sentences in filename, each sentence is split in words.'''
with tf.gfile.GFile(filename, "r") as f:
if "WikiText" in self.config['data_path']:
all_sentences = [x for x in f.read().decode(self.encoding).replace("\n", "<eos>").replace(
" . "," <eos> ").split("<eos>")]
else:
all_sentences = f.read().decode(self.encoding).replace("\n", "<eos>").split("<eos>")
# remove empty element at the end
if all_sentences[-1] == '':
all_sentences = all_sentences[:-1]
# split sentence in words
for i in xrange(len(all_sentences)):
all_sentences[i] = [x for x in all_sentences[i]]
return all_sentences
def read_items(self, filename):
'''
Returns a list of all CHARACTERS in filename.
'''
with tf.gfile.GFile(filename, "r") as f:
data = ['<eos>' if x == '\n' else x for x in f.read().decode(self.encoding)]
return data
class wordSentenceDataStream(wordSentenceData):
'''
Same as wordSentenceData but reads the data batch per batch instead of all at once.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(wordSentenceDataStream, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def calc_longest_sent(self, list_files):
'''
Calculates longest sentence based on list of files instead of already read data.
'''
max_length = 0
for f in list_files:
if os.path.isfile(f):
for line in io.open(f, 'r', encoding=self.encoding):
curr_length = len(line.strip().split(' '))
if curr_length > max_length:
max_length = curr_length
return max_length
def get_batch(self, f, test=False):
if test:
self.batch_size = self.eval_batch_size
end_reached = False
curr_batch = []
seq_lengths = []
for i in xrange(self.batch_size):
curr_sentence = f.readline().replace('\n',' <eos>')
if not curr_sentence:
end_reached = True
break
# if end of file is reached
if curr_sentence == '':
end_reached = True
f.close()
return None, None, end_reached, None
# input batch: convert words to indices
curr_sentence_idx = [self.item_to_id['<bos>']]
for w in curr_sentence.split(' '):
# ignore blanks
if w == '':
continue
elif w in self.item_to_id:
curr_sentence_idx.append(self.item_to_id[w])
# map OOV words to UNK-symbol
else:
curr_sentence_idx.append(self.item_to_id[self.unk])
# length of sentence (for dynamic rnn)
seq_lengths.append(len(curr_sentence_idx))
number_pads = self.max_length - len(curr_sentence_idx) + 1
padding = [self.item_to_id[self.PADDING_SYMBOL]]*number_pads
curr_sentence_idx.extend(padding)
curr_batch.append(curr_sentence_idx)
if end_reached:
return None, None, end_reached, None
else:
curr_batch_array = np.array(curr_batch)
x = curr_batch_array[:,:-1]
y = curr_batch_array[:,1:]
seq_lengths_array = np.array(seq_lengths)
return x, y, False, seq_lengths_array
def prepare_data(self):
if 'read_vocab_from_file' in self.config:
# read vocabulary mapping and maximum sentence length from file
self.item_to_id, self.id_to_item = load_item_to_id(self.config['read_vocab_from_file'], self.encoding)
if len(self.item_to_id) != self.config['vocab_size']:
raise IOError("The vocabulary size specified by 'vocab_size' ({0}) does not correspond \
to the size of the vocabulary file given ({1}).".format(
self.config['vocab_size'], len(self.item_to_id)))
self.max_length = int(open(os.path.join(self.config['data_path'], "max_sentence_length")).readlines()[0].strip())
else:
# build input vocabulary
self.item_to_id, self.id_to_item = self.build_vocab(self.train_path)
# get maximum length of sentence in all files
self.max_length = self.calc_longest_sent([self.train_path, self.valid_path, self.test_path])
# padding symbol needed
self.add_padding_symbol()
return (self.train_path, self.valid_path, self.test_path), len(self.item_to_id), self.max_length
def init_batching(self, data_path):
self.end_reached = False
data_file = io.open(data_path,"r", encoding=self.encoding)
return data_file
class charWordData(wordSentenceData):
'''
Character-level data, but per word (padded until the maximum word length).
Used for lm_char_rnn.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charWordData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def read_items(self, filename):
'''
Returns a list of all CHARACTERS in filename.
'''
with tf.gfile.GFile(filename, "r") as f:
# Wikitext: more than 1 sentence per line, also introduce <eos> at ' . '
if "WikiText" in self.config['data_path']:
data = [list(x) if (x != '<eos>' and x != self.unk) else x for x in f.read().decode(
self.encoding).replace("\n", " <eos> ").replace(" . "," <eos> ").split(" ")]
else:
data = [list(x) if (x != '<eos>' and x != self.unk) else x for x in f.read().decode(
self.encoding).replace("\n", " <eos> ").split(" ")]
data = flattern(data)
return data # single list with all characters in the file
def read_sentences(self, filename):
'''
Returns a list with all words in filename, each words is split in characters.
'''
with tf.gfile.GFile(filename, "r") as f:
if "WikiText" in self.config['data_path']:
all_words = [list(word) if (word != self.unk and word != '<eos>') else word for word in f.read().decode(
self.encoding).replace("\n", " <eos> ").replace(" . "," <eos> ").split(" ")]
else:
# split word in characters if it is not <unk> or <eos>
all_words = [list(word) if (word != self.unk and word != '<eos>') else word for word in f.read().decode(
self.encoding).replace("\n", " <eos> ").split(" ")]
# remove empty elements
all_words = [word for word in all_words if word != []]
return all_words
def padding(self, dataset, total_length):
'''
Pad until max_length without adding <eos> symbol first.
'''
seq_lengths = []
# total_length = max_length + 2 (inherited from wordSentenceData),
# but since we did not add <eos> in addition to the padding symbols, the actual length is -1
# if no extra padding symbol is used to ensure the last padding symbol still has a 'target', -2
total_length = total_length - 2
for word in dataset:
seq_lengths.append(len(word))
# pad rest of word until maximum length
num_pads = total_length - len(word)
for pos in xrange(num_pads):
word.append(self.item_to_id[self.PADDING_SYMBOL])
return dataset, seq_lengths
def file_to_item_ids(self, filename):
data = self.read_sentences(filename)
data_ids = []
for word in data:
if word == '<eos>' or word == self.unk:
data_ids.append([self.item_to_id[word]])
else:
data_ids.append([self.item_to_id[char] for char in word if char in self.item_to_id])
return data_ids
def get_data(self):
all_data = self.read_data()
self.add_padding_symbol()
max_length = self.calc_longest_sent(all_data)
self.max_length = max_length
#self.num_steps = max_length
#self.eval_num_steps = max_length
padded_data, seq_lengths = self.pad_data(all_data, max_length)
# return max_length+1 and not +2 because the last padding symbol is only there
# to make sure that the target sequence does not end with the beginning of the next sequence
return padded_data, len(self.id_to_item), max_length, seq_lengths
def init_batching(self, data, test=False):
if test:
self.batch_size = self.eval_batch_size
self.num_steps = self.eval_num_steps
#else:
#batch_size = self.batch_size
#num_steps = self.num_steps
if self.iterator == 0:
self.end_reached = False
self.test = test
words = data[0]
seq_lengths = data[1]
data_len = len(words)*len(words[0])
# to divide data in batch_size batches, each of length batch_len
batch_len = data_len // self.batch_size
# number of items in 1 batch_len = self.max_length (length of word) * self.num_steps (number of words)
# subtract one because there is not target for the last word
self.num_words_batch = batch_len // (self.max_length*self.num_steps) - 1
# we want batch_len to be a multiple of num_steps (=size of padded sentence)
#batch_len = self.num_words_batch * self.num_steps #v1
batch_len = self.num_words_batch * self.max_length * self.num_steps
# only batch_size x batch_len words fit,
# divide by self.max_length because 'words' = list of lists (each max.length long)
words = words[:(self.batch_size * batch_len)/self.max_length]
# convert to numpy array
#self.data_array = np.array(words).reshape(self.batch_size, self.num_words_batch*self.num_steps) #v1
self.data_array = np.array(words).reshape(self.batch_size, batch_len)
# convert seq_lengths to numpy array
seq_lengths = seq_lengths[:(self.batch_size * batch_len)/self.max_length]
self.seql_array = np.array(seq_lengths).reshape(self.batch_size, self.num_steps*self.num_words_batch)
def get_batch(self):
# take slice of batch_size x num_steps
x = self.data_array[:, self.iterator * self.num_steps : (self.iterator * self.num_steps) + (self.num_steps*self.max_length)]
x = x.reshape(self.batch_size, self.num_steps, self.max_length)
y = self.data_array[:, (self.iterator * self.num_steps)+1 : (self.iterator * self.num_steps) + (self.num_steps*self.max_length) +1]
y = y.reshape(self.batch_size, self.num_steps, self.max_length)
# !!! TO DO: last element of each word is first character of next word --> correct this
# take slice of sequence lengths for all elements in the batch
seql = self.seql_array[:, self.iterator * self.num_steps : (self.iterator+1) * self.num_steps]
# if iterated over the whole dataset, set iterator to 0 to start again
if self.iterator >= self.num_words_batch:
self.iterator = 0
self.end_reached = True
# otherwise, increase count
else:
self.iterator += 1
return x, y, self.end_reached, seql
class wordSentenceDataRescore(wordSentenceData):
'''
Rescore N-best lists with model trained across sentence boundaries.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(wordSentenceDataRescore, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def get_data(self):
all_data = self.read_data()
max_length = self.config['num_steps'] - 3
padded_data, _ = self.pad_data(all_data, max_length)
# return max_length+2 and not +3 because the last padding symbol is only there
# to make sure that the target sequence does not end with the beginning of the next sequence
return padded_data, len(self.id_to_item), max_length+2
class charSentenceDataRescore(charSentenceData, wordSentenceDataRescore):
'''
Same as wordSentenceDataRescore but on character level.
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charSentenceDataRescore, self).__init__(config, eval_config, TRAIN, VALID, TEST)
def file_to_item_ids(self, filename):
return wordSentenceDataRescore.file_to_item_ids(self, filename)
def get_data(self):
return wordSentenceDataRescore.get_data(self)
class charNGramData(LMData):
'''
Feed character n-grams to the network (but still predict words).
'''
def __init__(self, config, eval_config, TRAIN, VALID, TEST):
super(charNGramData, self).__init__(config, eval_config, TRAIN, VALID, TEST)
if not isinstance(self.config['char_ngram'],int):
raise IOError("Specify what n should be used for the character n-grams.")
else:
self.n = self.config['char_ngram']
self.special_symbols = ['<UNK>', '<unk>', '<eos>']
self.ngram_to_id = {}
self.id_to_ngram = {}
#if 'add_word' in self.config and 'input_vocab' in self.config:
if 'add_word' in self.config:
if not 'word_size' in self.config:
raise IOError("Specify the size that should be assigned to the word input (word_size).")
if not 'input_vocab_size' in self.config:
raise IOError("Specify the size of the word input vocabulary (input_vocab_size).")
self.input_item_to_id = {}
self.input_id_to_item = {}
def find_ngrams(self, data):
'''
Finds all ngrams in data.
Arguments:
data: list of all words in the training file
Returns:
freq_ngrams: dictionary containing all n-grams found + their frequency
'''
freq_ngrams = dict()
for word in data:
# add the special symbols as 1
if word in self.special_symbols:
if word in freq_ngrams:
freq_ngrams[word] += 1
else:
freq_ngrams[word] = 1
else:
# first ngram: append <bow> to the beginning of the word
first_ngram = '<bow>'+word[:self.n-1]
if 'capital' in self.config:
first_ngram = first_ngram.lower()
if first_ngram in freq_ngrams:
freq_ngrams[first_ngram] += 1
else:
freq_ngrams[first_ngram] = 1
# n-grams in the middle of the word
for pos in xrange(len(word)):
# only add the ngram if it is long enough (end of the word: not long enough)
if len(word[pos:pos+self.n]) == self.n:
curr_ngram = word[pos:pos+self.n]
# if special marker for capital: only use lower case n-grams
if 'capital' in self.config:
curr_ngram = curr_ngram.lower()
# add ngram if not yet in set
if curr_ngram in freq_ngrams:
freq_ngrams[curr_ngram] += 1
else:
freq_ngrams[curr_ngram] = 1
# last n-gram: append '<eow>' to end of word
last_ngram = word[-1-self.n+2:]+'<eow>'
if 'capital' in self.config:
last_ngram = last_ngram.lower()
if last_ngram in freq_ngrams:
freq_ngrams[last_ngram] += 1
else:
freq_ngrams[last_ngram] = 1
return freq_ngrams
def find_skipgrams(self, data, skip):
'''
Finds all skipgrams in data.
Arguments:
data: list of all words in the training file
skip: number of characters that should be skipped
Returns:
freq_ngrams: dictionary containing all skipgrams found + their frequency
'''
freq_skipgrams = dict()
for word in data:
# add the special symbols as 1
if word in self.special_symbols:
if word in freq_skipgrams:
freq_skipgrams[word] += 1
else:
freq_skipgrams[word] = 1
elif len(word) > 1:
# first skipgram: append '<bow>' to beginning of word
first_skipgram = '<bow>'+word[skip]
if 'capital' in self.config:
first_skipgram = first_skipgram.lower()
if first_skipgram in freq_skipgrams:
freq_skipgrams[first_skipgram] += 1
else:
freq_skipgrams[first_skipgram] = 1
for pos in xrange(len(word)):
# only add the skipgram if it is long enough (end of the word: not long enough)
if len(word[pos:]) >= skip+2:
curr_skipgram = word[pos] + word[pos+1+skip]
# if special marker for capital: only use lower case n-grams
if 'capital' in self.config:
curr_skipgram = curr_skipgram.lower()
if curr_skipgram in freq_skipgrams:
freq_skipgrams[curr_skipgram] += 1
else:
freq_skipgrams[curr_skipgram] = 1
# append '<eow>' to end of word
last_skipgram = word[-1-skip]+'<eow>'
if 'capital' in self.config:
last_skipgram = last_skipgram.lower()
if last_skipgram in freq_skipgrams:
freq_skipgrams[last_skipgram] += 1
else:
freq_skipgrams[last_skipgram] = 1
return freq_skipgrams
def build_ngram_vocab(self, filename, skip=None):
'''
Reads the data and builds ngram-to-id mapping and id-to-ngram mapping.
Arguments:
filename: data file from which the vocbulary is read
skip: if None, n-grams are read; if not None, 'skip' characters are skipped
'''
data = self.read_items(filename)
# find all n-grams/skipgram + their frequency
if skip != None:
freq_ngrams = self.find_skipgrams(data, skip)
else:
freq_ngrams = self.find_ngrams(data)
# for words that consist of only 1 character: add unigrams
# possible TO DO: if n > 2, what to do with words of 2 characters?
all_chars = set(''.join(data))
for word in data:
if word in all_chars:
if word in freq_ngrams:
freq_ngrams[word] += 1
else:
freq_ngrams[word] = 1
# if only ngrams with a frequency > ngram_cutoff have to be kept
if 'ngram_cutoff' in self.config:
if not isinstance(self.config['ngram_cutoff'],int):
raise ValueError("Specify what cutoff frequency should be used for the character n-grams.")
else:
freq_ngrams = {ngram:freq for ngram, freq in freq_ngrams.items() if freq > self.config['ngram_cutoff']}
ngrams = freq_ngrams.keys()
if 'capital' in self.config:
# special symbol to indicate whether the word contains (a) capital(s) or not
ngrams.append('<cap>')
# remove ngrams with capitals from vocabulary
ngrams = [gram for gram in ngrams if gram.islower()]
# unknown n-gram symbol
ngrams.append('<UNKngram>')
self.ngram_to_id = dict(zip(ngrams, range(len(ngrams))))
self.id_to_ngram = dict(zip(range(len(ngrams)), ngrams))
print('Size of n-gram vocabulary: {0}'.format(len(self.ngram_to_id)))
def map_ngrams_to_ids(self, ngram_repr, word):
'''
Maps all n-grams in the word to a count on the input vector.
Arguments:
ngram_repr: input vector
word: word that should be mapped to n-grams
Returns:
ngram_repr: input vector, with counts for all n-grams in 'word' added
'''
# first ngram
first_ngram = '<bow>'+word[:self.n-1]
if 'capital' in self.config:
first_ngram = first_ngram.lower()
# increase count at index of character ngram
if first_ngram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[first_ngram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
for pos in xrange(len(word)):
# not yet at the end of the word (otherwise the subword might be shorter than n)
if len(word[pos:pos+self.n]) == self.n:
curr_ngram = word[pos:pos+self.n]
# if special marker for capital: only use lower case n-grams
if 'capital' in self.config:
curr_ngram = curr_ngram.lower()
# increase count at index of character ngram
if curr_ngram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[curr_ngram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
# append '<eow>' to end of word
last_ngram = word[-1-self.n+2:]+'<eow>'
if 'capital' in self.config:
last_ngram = last_ngram.lower()
# increase count at index of character ngram
if last_ngram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[last_ngram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
return ngram_repr
def map_skipgrams_to_ids(self, ngram_repr, word, skip):
'''
Maps all skipgrams in the word to a count on the input vector.
Arguments:
ngram_repr: input vector
word: word that should be mapped to skipgrams
skip: number of characters that should be skipped
Returns:
ngram_repr: input vector, with counts for all skipgrams in 'word' added
'''
# first skipgram
first_skipgram = '<bow>'+word[skip]
if 'capital' in self.config:
first_skipgram = first_skipgram.lower()
# increase count at index of character skipgram
if first_skipgram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[first_skipgram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
for pos in xrange(len(word)):
# not yet at the end of the word (otherwise the subword might be shorter than n)
if len(word[pos:]) >= skip+2:
curr_skipgram = word[pos] + word[pos+1+skip]
# if special marker for capital: only use lower case n-grams
if 'capital' in self.config:
curr_skipgram = curr_skipgram.lower()
# increase count at index of character skipgram
if curr_skipgram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[curr_skipgram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
# append '<eow>' to end of word
last_skipgram = word[-1-skip]+'<eow>'
if 'capital' in self.config:
last_skipgram = last_skipgram.lower()
# increase count at index of character skipgram
if last_skipgram in self.ngram_to_id:
ngram_repr[self.ngram_to_id[last_skipgram]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
return ngram_repr
def file_to_ngram_ids(self, filename):
'''
Generates occurrence vectors for all words in the file.
Arguments:
filename: name of data file
Returns:
ngrams: a list of ngram_repr, which are numpy arrays containing the counts of each n-gram
'''
data = self.read_items(filename)
ngrams = []
for word in data:
# initialize zero vector of size of the ngram vocabulary
ngram_repr = np.zeros(len(self.ngram_to_id), dtype=np.float32)
# do not cut word in n-grams if it contains only 1 character or is a special symbol
if len(word) == 1 or word in self.special_symbols:
if word in self.ngram_to_id:
ngram_repr[self.ngram_to_id[word]] += 1
else:
ngram_repr[self.ngram_to_id['<UNKngram>']] += 1
else:
# if special marker for capital, check how many capitals the word has
if 'capital' in self.config:
num_capitals = sum(1 for char in word if char.isupper())
if num_capitals > 0:
# increase count at index of special capital marker
ngram_repr[self.ngram_to_id['<cap>']] += num_capitals
if not 'skipgram' in self.config:
ngram_repr = self.map_ngrams_to_ids(ngram_repr, word)
else:
ngram_repr = self.map_skipgrams_to_ids(ngram_repr, word, self.config['skipgram'])
ngrams.append(ngram_repr)
return ngrams
def read_data(self):
# n-gram input: use data with full vcoabulary, where words are not converted to <UNK>
train_path_full_vocab = os.path.join(self.config['data_path'], "train.txt")
valid_path_full_vocab = os.path.join(self.config['data_path'], "valid.txt")
test_path_full_vocab = os.path.join(self.config['data_path'], "test.txt")
if 'skipgram' in self.config:
if self.config['char_ngram'] != 2 or self.config['skipgram'] != 1:
raise NotImplementedError("Skipgrams have only been implemented for char_ngram = 2 and skipgram = 1.")
self.build_ngram_vocab(train_path_full_vocab, self.config['skipgram'])
else:
self.build_ngram_vocab(train_path_full_vocab)
# output vocabulary: use reduced vocabulary
self.item_to_id, self.id_to_item = self.build_vocab(self.train_path)
# combine character n-grams with word input
if 'add_word' in self.config:
# if input vocabulary is different from output vocabulary
if 'input_vocab' in self.config:
train_file = "train_" + str(self.config['input_vocab']) + "k-unk.txt"
valid_file = "valid_" + str(self.config['input_vocab']) + "k-unk.txt"
test_file = "test_" + str(self.config['input_vocab']) + "k-unk.txt"
else:
train_file = "train.txt"
valid_file = "valid.txt"
test_file = "test.txt"
input_train_path = os.path.join(self.config['data_path'], train_file)
input_valid_path = os.path.join(self.config['data_path'], valid_file)
input_test_path = os.path.join(self.config['data_path'], test_file)
# build vocab for input word representation
self.input_item_to_id, self.input_id_to_item = self.build_vocab(input_train_path)
# make a label file to visualize the embeddings
#with the correct labels (= words instead of ids) in tensorboard
self.label_file = os.path.join(self.config['save_path'], "labels.tsv")
# Write label file
with open(self.label_file,"w") as f:
for i in range(len(self.input_id_to_item)):
f.write('{0}\n'.format(self.input_id_to_item[i]))
# lists of all ngrams/words in training data converted to their ids
if self.TRAIN:
#if 'skipgram' in self.config:
# train_ngrams = self.file_to_skipgram_ids(train_path_full_vocab, self.config['skipgram'])
#else:
train_ngrams = self.file_to_ngram_ids(train_path_full_vocab)
if 'add_word' in self.config and 'input_vocab' in self.config:
train_input_words = self.file_to_item_ids(input_train_path, item_to_id=self.input_item_to_id)
train_words = self.file_to_item_ids(self.train_path)
else:
train_ngrams = []
train_words = []
train_input_words = []
# lists of all ngrams/words in validation data converted to their ids
if self.VALID:
#if 'skipgram' in self.config:
# valid_ngrams = self.file_to_skipgram_ids(valid_path_full_vocab, self.config['skipgram'])
#else:
valid_ngrams = self.file_to_ngram_ids(valid_path_full_vocab)
if 'add_word' in self.config and 'input_vocab' in self.config:
valid_input_words = self.file_to_item_ids(input_valid_path, item_to_id=self.input_item_to_id)
valid_words = self.file_to_item_ids(self.valid_path)
else:
valid_ngrams = []
valid_words = []
valid_input_words = []
# lists of all ngrams/words in test data converted to their ids
if self.TEST:
#if 'skipgram' in self.config:
# test_ngrams = self.file_to_skipgram_ids(test_path_full_vocab, self.config['skipgram'])
#else:
test_ngrams = self.file_to_ngram_ids(test_path_full_vocab)
if 'add_word' in self.config and 'input_vocab' in self.config:
test_input_words = self.file_to_item_ids(input_test_path, item_to_id=self.input_item_to_id)
test_words = self.file_to_item_ids(self.test_path)
else:
test_ngrams = []
test_words = []
test_input_words = []
if 'add_word' in self.config and 'input_vocab' in self.config:
train_words = (train_words, train_input_words)
valid_words = (valid_words, valid_input_words)
test_words = (test_words, test_input_words)
all_data = ((train_ngrams,train_words), (valid_ngrams,valid_words),(test_ngrams,test_words))
return all_data
def get_data(self):
all_data = self.read_data()
lengths = (len(self.id_to_ngram), len(self.id_to_item))
return all_data, lengths, 0
def init_batching(self, data, test=False):
if test:
batch_size = self.eval_batch_size
self.num_steps = self.eval_num_steps
else:
batch_size = self.batch_size
#self.num_steps = self.num_steps
ngram_data, word_data = data
if 'add_word' in self.config and 'input_vocab' in self.config:
word_data, input_word_data = word_data
input_size = self.config['input_size']
if self.iterator == 0:
self.end_reached = False
data_len = len(word_data)
# to divide data in batch_size batches, each of length batch_len
batch_len = data_len // batch_size
# number of samples that can be taken from the batch_len slices
if self.num_steps != 1:
self.num_samples = batch_len // self.num_steps
else:
self.num_samples = (batch_len // self.num_steps) - 1
# remove last part of the data that doesn't fit in the batch_size x num_steps samples
ngram_data = ngram_data[:batch_size * batch_len]
word_data = word_data[:batch_size * batch_len]
# for n-gram inputs: convert to numpy array: batch_size x batch_len x input_size
self.data_array_ngrams = np.array(ngram_data).reshape(batch_size, batch_len, input_size)
# for word outputs: convert to numpy array: batch_size x batch_len
self.data_array_words = np.array(word_data).reshape(batch_size, batch_len)
# if word representation is added to the input and input and output vocabulary are not the same
if 'add_word' in self.config and 'input_vocab' in self.config:
input_word_data = input_word_data[:batch_size * batch_len]
self.data_array_input_words = np.array(input_word_data).reshape(batch_size, batch_len)
self.batching_initialized = True
def get_batch(self):
if not self.batching_initialized:
raise ValueError("Batching is not yet initialized.")
# inputs = ngrams (take slice of batch_size x num_steps)
x = self.data_array_ngrams[:, self.iterator * self.num_steps : (self.iterator * self.num_steps) + self.num_steps]
if 'add_word' in self.config:
# different size for input and output vocabulary
if 'input_vocab' in self.config:
x_words = self.data_array_input_words[:, self.iterator * self.num_steps :
(self.iterator * self.num_steps) + self.num_steps]
# same size for input and output vocabulary
else:
x_words = self.data_array_words[:, self.iterator * self.num_steps :
(self.iterator * self.num_steps) + self.num_steps]
x = (x, x_words)
# targets = words (same slice but shifted one step to the right)
y = self.data_array_words[:, (self.iterator * self.num_steps) +1 :
(self.iterator * self.num_steps) + self.num_steps + 1]
self.iterator += 1
# if iterated over the whole dataset, set iterator to 0 to start again
if self.iterator >= self.num_samples:
self.iterator = 0
self.end_reached = True
return x, y, self.end_reached
|
python
|
# -*- coding: utf-8 -*-
""" RandOm Convolutional KErnel Transform (ROCKET)
"""
__author__ = ["Matthew Middlehurst", "Oleksii Kachaiev"]
__all__ = ["ROCKETClassifier"]
import numpy as np
from joblib import delayed, Parallel
from sklearn.base import clone
from sklearn.ensemble._base import _set_random_states
from sklearn.linear_model import RidgeClassifierCV
from sklearn.pipeline import make_pipeline
from sklearn.utils import check_random_state
from sklearn.utils.multiclass import class_distribution
from sktime.classification.base import BaseClassifier
from sktime.transformations.panel.rocket import Rocket
from sktime.utils.validation import check_n_jobs
from sktime.utils.validation.panel import check_X
from sktime.utils.validation.panel import check_X_y
import warnings
class ROCKETClassifier(BaseClassifier):
"""
Classifier wrapped for the ROCKET transformer using RidgeClassifierCV as the
base classifier.
Allows the creation of an ensemble of ROCKET classifiers to allow for
generation of probabilities as the expense of scalability.
Parameters
----------
num_kernels : int, number of kernels for ROCKET transform
(default=10,000)
n_estimators : int, ensemble size, optional (default=None). When set
to None (default) or 1, the classifier uses a single estimator rather than ensemble
random_state : int or None, seed for random, integer,
optional (default to no seed)
n_jobs : int, the number of jobs to run in parallel for `fit`,
optional (default=1)
Attributes
----------
estimators_ : array of individual classifiers
weights : weight of each classifier in the ensemble
weight_sum : sum of all weights
n_classes : extracted from the data
Notes
-----
@article{dempster_etal_2019,
author = {Dempster, Angus and Petitjean, Francois and Webb,
Geoffrey I},
title = {ROCKET: Exceptionally fast and accurate time series
classification using random convolutional kernels},
year = {2019},
journal = {arXiv:1910.13051}
}
Java version
https://github.com/uea-machine-learning/tsml/blob/master/src/main/java/
tsml/classifiers/hybrids/ROCKETClassifier.java
"""
# Capability tags
capabilities = {
"multivariate": True,
"unequal_length": False,
"missing_values": False,
}
def __init__(
self,
num_kernels=10000,
ensemble=None,
ensemble_size=25,
random_state=None,
n_estimators=None,
n_jobs=1,
):
self.num_kernels = num_kernels
self.random_state = random_state
self.n_jobs = n_jobs
self.n_estimators = n_estimators
# for compatibility only
self.ensemble = ensemble
self.ensemble_size = ensemble_size
# for compatibility only
if ensemble is not None and n_estimators is None:
self.n_estimators = ensemble_size
warnings.warn(
"ensemble and ensemble_size params are deprecated and will be "
"removed in future releases, use n_estimators instead",
PendingDeprecationWarning,
)
self.estimators_ = []
self.weights = []
self.weight_sum = 0
self.n_classes = 0
self.classes_ = []
self.class_dictionary = {}
super(ROCKETClassifier, self).__init__()
def fit(self, X, y):
"""
Build a single or ensemble of pipelines containing the ROCKET transformer and
RidgeClassifierCV classifier.
Parameters
----------
X : nested pandas DataFrame of shape [n_instances, 1]
Nested dataframe with univariate time-series in cells.
y : array-like, shape = [n_instances] The class labels.
Returns
-------
self : object
"""
X, y = check_X_y(X, y)
n_jobs = check_n_jobs(self.n_jobs)
self.n_classes = np.unique(y).shape[0]
self.classes_ = class_distribution(np.asarray(y).reshape(-1, 1))[0][0]
for index, class_val in enumerate(self.classes_):
self.class_dictionary[class_val] = index
if self.n_estimators is not None and self.n_estimators > 1:
base_estimator = _make_estimator(self.num_kernels, self.random_state)
self.estimators_ = Parallel(n_jobs=n_jobs)(
delayed(_fit_estimator)(
_clone_estimator(base_estimator, self.random_state), X, y
)
for _ in range(self.n_estimators)
)
for rocket_pipeline in self.estimators_:
weight = rocket_pipeline.steps[1][1].best_score_
self.weights.append(weight)
self.weight_sum += weight
else:
base_estimator = _make_estimator(self.num_kernels, self.random_state)
self.estimators_ = [_fit_estimator(base_estimator, X, y)]
self._is_fitted = True
return self
def predict(self, X):
if self.n_estimators is not None:
rng = check_random_state(self.random_state)
return np.array(
[
self.classes_[int(rng.choice(np.flatnonzero(prob == prob.max())))]
for prob in self.predict_proba(X)
]
)
else:
self.check_is_fitted()
return self.estimators_[0].predict(X)
def predict_proba(self, X):
self.check_is_fitted()
X = check_X(X)
if self.n_estimators is not None:
sums = np.zeros((X.shape[0], self.n_classes))
for n, clf in enumerate(self.estimators_):
preds = clf.predict(X)
for i in range(0, X.shape[0]):
sums[i, self.class_dictionary[preds[i]]] += self.weights[n]
dists = sums / (np.ones(self.n_classes) * self.weight_sum)
else:
dists = np.zeros((X.shape[0], self.n_classes))
preds = self.estimators_[0].predict(X)
for i in range(0, X.shape[0]):
dists[i, np.where(self.classes_ == preds[i])] = 1
return dists
# for compatibility
@property
def classifiers(self):
warnings.warn(
"classifiers attribute is deprecated and will be removed "
"in future releases, use estimators_ instead",
PendingDeprecationWarning,
)
return self.estimators_
def _fit_estimator(estimator, X, y):
return estimator.fit(X, y)
def _make_estimator(num_kernels, random_state):
return make_pipeline(
Rocket(num_kernels=num_kernels, random_state=random_state),
RidgeClassifierCV(alphas=np.logspace(-3, 3, 10), normalize=True),
)
def _clone_estimator(base_estimator, random_state=None):
estimator = clone(base_estimator)
if random_state is not None:
_set_random_states(estimator, random_state)
return estimator
|
python
|
# Generated by Django 2.2.1 on 2019-05-29 20:37
import json
from django.db import migrations
def normalize_webhook_values(apps, schema_editor):
Channel = apps.get_model("api", "Channel")
for ch in Channel.objects.filter(kind="webhook").only("value"):
# The old format of url_down, url_up, post_data separated by newlines:
if not ch.value.startswith("{"):
parts = ch.value.split("\n")
url_down = parts[0]
url_up = parts[1] if len(parts) > 1 else ""
post_data = parts[2] if len(parts) > 2 else ""
ch.value = json.dumps(
{
"method_down": "POST" if post_data else "GET",
"url_down": url_down,
"body_down": post_data,
"headers_down": {},
"method_up": "POST" if post_data else "GET",
"url_up": url_up,
"body_up": post_data,
"headers_up": {},
}
)
ch.save()
continue
doc = json.loads(ch.value)
# Legacy "post_data" in doc -- use the legacy fields
if "post_data" in doc:
ch.value = json.dumps(
{
"method_down": "POST" if doc["post_data"] else "GET",
"url_down": doc["url_down"],
"body_down": doc["post_data"],
"headers_down": doc["headers"],
"method_up": "POST" if doc["post_data"] else "GET",
"url_up": doc["url_up"],
"body_up": doc["post_data"],
"headers_up": doc["headers"],
}
)
ch.save()
continue
class Migration(migrations.Migration):
dependencies = [("api", "0060_tokenbucket")]
operations = [
migrations.RunPython(normalize_webhook_values, migrations.RunPython.noop)
]
|
python
|
from easyidp.io.tests import test
import easyidp.io.metashape
import easyidp.io.pix4d
import easyidp.io.pcd
|
python
|
#!/usr/bin/python
# (C) 2005 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: [email protected]
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
# Following script is tested on python-2.4.3 and pycrypto-2.0.1
# It adds a new class securedComponent which is subclass of component
# All other classes are the same as explained in MiniAxon tutorial
from Crypto.Cipher import AES
class microprocess(object):
def __init__(self):
super(microprocess, self).__init__()
def main(self):
yield 1
class scheduler(microprocess):
def __init__(self):
super(scheduler, self).__init__()
self.active = []
#self.queue = []
self.newqueue = []
def main(self):
for i in range(100):
for current in self.active:
yield 1 #something ?
try:
ret = current.next()
if ret != -1:
self.newqueue.append(current)
except StopIteration:
pass
self.active = self.newqueue
self.newqueue = []
def activateMicroprocess(self, someprocess):
ret = someprocess.main()
self.newqueue.append(ret)
class component(microprocess):
def __init__(self):
super(component, self).__init__()
self.boxes = {"inbox":[] , "outbox":[]}
def send(self, value, boxname):
self.boxes[boxname].append(value)
def recv(self, boxname):
return self.boxes[boxname].pop()
def dataReady(self, boxname):
return len(self.boxes[boxname])
class secureComponent(component): # New class
def __init__(self):
super(secureComponent, self).__init__()
self.key = 'A simple testkey'
self.crypt_obj = AES.new(self.key, AES.MODE_ECB) # Simplest mode for testing
def send(self, value, boxname):
diff = len(value) % 16 # Data required in blocks of 16 bytes
if diff is not 0:
value = value + ( '~' * (16 - diff)) # For testing
encrypted_value = self.crypt_obj.encrypt(value)
super(secureComponent, self).send(encrypted_value, boxname)
def recv(self, boxname):
encrypted_value = super(secureComponent, self).recv(boxname)
value = self.crypt_obj.decrypt(encrypted_value)
orig_len = value.find('~', len(value) - 16)
value = value[0:orig_len]
return value
class postman(microprocess):
def __init__(self, source, sourcebox, sink, sinkbox):
super(postman, self).__init__()
self.source = source
self.sourcebox = sourcebox
self.sink = sink
self.sinkbox = sinkbox
def main(self):
while 1:
yield 1
if self.source.dataReady(self.sourcebox):
data = self.source.recv(self.sourcebox)
self.sink.send(data, self.sinkbox)
#-------------------------------------------------------
# Testing
class Producer(secureComponent):
def __init__(self, message):
super(Producer, self).__init__()
self.message = message
def main(self):
count = 0
while 1:
yield 1
count += 1
msg = self.message + str(count)
self.send(msg, "outbox")
class Consumer(secureComponent):
def main(self):
while 1:
yield 1
if self.dataReady("inbox"):
data = self.recv("inbox")
print data
p = Producer("Hello World - test ")
c = Consumer()
delivery_girl = postman(p, "outbox", c, "inbox")
myscheduler = scheduler()
myscheduler.activateMicroprocess(p)
myscheduler.activateMicroprocess(c)
myscheduler.activateMicroprocess(delivery_girl)
for _ in myscheduler.main():
pass
## class printer(microprocess):
## def __init__(self, string):
## super(printer, self).__init__()
## self.string = string #String to be printed
## def main(self):
## while 1:
## yield 1
## print self.string
## X = printer("Hello World")
## Y = printer("Game Over")
## myscheduler = scheduler()
## myscheduler.activateMicroprocess(X)
## myscheduler.activateMicroprocess(Y)
## for _ in myscheduler.main():
## pass
|
python
|
import pytest
from channels.generic.websocket import (
AsyncJsonWebsocketConsumer, AsyncWebsocketConsumer, JsonWebsocketConsumer, WebsocketConsumer,
)
from channels.testing import WebsocketCommunicator
# @pytest.mark.asyncio
# async def test_websocket_consumer():
# """
# Tests that WebsocketConsumer is implemented correctly.
# """
# results = {}
#
# class TestConsumer(WebsocketConsumer):
# def connect(self):
# results["connected"] = True
# self.accept()
#
# def receive(self, text_data=None, bytes_data=None):
# results["received"] = (text_data, bytes_data)
# self.send(text_data=text_data, bytes_data=bytes_data)
#
# def disconnect(self, code):
# results["disconnected"] = code
#
# # Test a normal connection
# communicator = WebsocketCommunicator(TestConsumer, "/testws/")
# connected, _ = await communicator.connect()
# assert connected
# assert "connected" in results
# # Test sending text
# await communicator.send_to(text_data="hello")
# response = await communicator.receive_from()
# assert response == "hello"
# assert results["received"] == ("hello", None)
# # Test sending bytes
# await communicator.send_to(bytes_data=b"w\0\0\0")
# response = await communicator.receive_from()
# assert response == b"w\0\0\0"
# assert results["received"] == (None, b"w\0\0\0")
# # Close out
# await communicator.disconnect()
# assert "disconnected" in results
@pytest.mark.asyncio
async def test_async_websocket_consumer():
"""
Tests that AsyncWebsocketConsumer is implemented correctly.
"""
results = {}
class TestConsumer(AsyncWebsocketConsumer):
async def connect(self):
results["connected"] = True
await self.accept()
async def receive(self, text_data=None, bytes_data=None):
results["received"] = (text_data, bytes_data)
await self.send(text_data=text_data, bytes_data=bytes_data)
async def disconnect(self, code):
results["disconnected"] = code
# Test a normal connection
communicator = WebsocketCommunicator(TestConsumer, "/testws/")
connected, _ = await communicator.connect()
assert connected
assert "connected" in results
# Test sending text
await communicator.send_to(text_data="hello")
response = await communicator.receive_from()
assert response == "hello"
assert results["received"] == ("hello", None)
# Test sending bytes
await communicator.send_to(bytes_data=b"w\0\0\0")
response = await communicator.receive_from()
assert response == b"w\0\0\0"
assert results["received"] == (None, b"w\0\0\0")
# Close out
await communicator.disconnect()
assert "disconnected" in results
# @pytest.mark.asyncio
# async def test_json_websocket_consumer():
# """
# Tests that JsonWebsocketConsumer is implemented correctly.
# """
# results = {}
#
# class TestConsumer(JsonWebsocketConsumer):
# def connect(self):
# self.accept()
#
# def receive_json(self, data=None):
# results["received"] = data
# self.send_json(data)
#
# # Open a connection
# communicator = WebsocketCommunicator(TestConsumer, "/testws/")
# connected, _ = await communicator.connect()
# assert connected
# # Test sending
# await communicator.send_json_to({"hello": "world"})
# response = await communicator.receive_json_from()
# assert response == {"hello": "world"}
# assert results["received"] == {"hello": "world"}
# # Test sending bytes breaks it
# await communicator.send_to(bytes_data=b"w\0\0\0")
# with pytest.raises(ValueError):
# await communicator.wait()
#
#
@pytest.mark.asyncio
async def test_async_json_websocket_consumer():
"""
Tests that AsyncJsonWebsocketConsumer is implemented correctly.
"""
results = {}
class TestConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
await self.accept()
async def receive_json(self, data=None):
results["received"] = data
await self.send_json(data)
# Open a connection
communicator = WebsocketCommunicator(TestConsumer, "/testws/")
connected, _ = await communicator.connect()
assert connected
# Test sending
await communicator.send_json_to({"hello": "world"})
response = await communicator.receive_json_from()
assert response == {"hello": "world"}
assert results["received"] == {"hello": "world"}
# Test sending bytes breaks it
await communicator.send_to(bytes_data=b"w\0\0\0")
with pytest.raises(ValueError):
await communicator.wait()
|
python
|
from PySide6.QtCore import QAbstractTableModel, Qt
class PandasModel(QAbstractTableModel):
def __init__(self, data):
super().__init__()
self._data = data
def rowCount(self, index):
return self._data.shape[0]
def columnCount(self, parnet=None):
return self._data.shape[1]
def data(self, index, role=Qt.DisplayRole):
if index.isValid():
if role == Qt.DisplayRole or role == Qt.EditRole:
value = self._data.iloc[index.row(), index.column()]
return str(value)
def setData(self, index, value, role):
if role == Qt.EditRole:
self._data.iloc[index.row(), index.column()] = value
return True
return False
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self._data.columns[col]
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
__author__ = "Gabriel Zapodeanu TME, ENB"
__email__ = "[email protected]"
__version__ = "0.1.0"
__copyright__ = "Copyright (c) 2019 Cisco and/or its affiliates."
__license__ = "Cisco Sample Code License, Version 1.1"
import difflib
import urllib3
from urllib3.exceptions import InsecureRequestWarning # for insecure https warnings
urllib3.disable_warnings(InsecureRequestWarning) # disable insecure https warnings
def compare_configs(cfg1, cfg2):
"""
This function, using the unified diff function, will compare two config files and identify the changes.
'+' or '-' will be prepended in front of the lines with changes
:param cfg1: old configuration file path and filename
:param cfg2: new configuration file path and filename
:return: text with the configuration lines that changed. The return will include the configuration for the sections
that include the changes
"""
# open the old and new configuration files
f1 = open(cfg1, 'r')
old_cfg = f1.readlines()
f1.close()
f2 = open(cfg2, 'r')
new_cfg = f2.readlines()
f2.close()
# compare the two specified config files {cfg1} and {cfg2}
d = difflib.unified_diff(old_cfg, new_cfg, n=9)
# create a diff_list that will include all the lines that changed
# create a diff_output string that will collect the generator output from the unified_diff function
diff_list = []
diff_output = ''
for line in d:
diff_output += line
if line.find('Current configuration') == -1:
if line.find('Last configuration change') == -1:
if (line.find('+++') == -1) and (line.find('---') == -1):
if (line.find('-!') == -1) and (line.find('+!') == -1):
if line.startswith('+'):
diff_list.append('\n' + line)
elif line.startswith('-'):
diff_list.append('\n' + line)
# process the diff_output to select only the sections between '!' characters for the sections that changed,
# replace the empty '+' or '-' lines with space
diff_output = diff_output.replace('+!', '!')
diff_output = diff_output.replace('-!', '!')
diff_output_list = diff_output.split('!')
all_changes = []
for changes in diff_list:
for config_changes in diff_output_list:
if changes in config_changes:
if config_changes not in all_changes:
all_changes.append(config_changes)
# create a config_text string with all the sections that include changes
config_text = ''
for items in all_changes:
config_text += items
return config_text
|
python
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import re
import logging
from rapidsms.apps.base import AppBase
from .models import Location
logger = logging.getLogger(__name__)
class App(AppBase):
PATTERN = re.compile(r"^(.+)\b(?:at)\b(.+?)$")
def __find_location(self, text):
try:
# check for a location code first
return Location.objects.get(slug__iexact=text)
# nothing else is supported, for now!
except Location.DoesNotExist:
return None
def parse(self, msg):
# if this message ends in "at SOMEWHERE",
# we have work to do. otherwise, ignore it
m = self.PATTERN.match(msg.text)
if m is not None:
# resolve the string into a Location object
# (or None), and attach it to msg for other
# apps to deal with
text = m.group(2).strip()
# split the text by space to find if it has a village
# locCode,village = text.split()
# location = self.__find_location(locCode)
# location.village = village
# msg.location = location
msg.location = self.__find_location(text)
# strip the location tag from the message,
# so other apps don't have to deal with it
msg.text = m.group(1)
# we should probably log this crazy behavior...
logger.info("Stripped Location code: %s" % text)
logger.info("Message is now: %s" % msg.text)
|
python
|
class Weapon:
def __init__(self, name, damage, range):
self.name = name
self.damage = damage
self.range = range
def hit(self, actor, target):
if target.is_alive():
if (self.range >= (target.pos_x - actor.pos_x) +
(target.pos_y - actor.pos_y)):
print(f'Врагу нанесен урон оружием {self.name} в размере {self.damage}')
target.hp -= self.damage
else:
print(f'Враг слишком далеко для оружия {self.name}')
else:
print('Враг уже повержен')
def __str__(self):
return self.name
class BaseCharacter:
def __init__(self, x, y, hp):
self.pos_x = x
self.pos_y = y
self.hp = hp
def move(self, delta_x, delta_y):
self.pos_x += delta_x
self.pos_y += delta_y
def is_alive(self):
return self.hp > 0
def get_damage(self, amount):
if self.is_alive():
self.hp -= amount
def get_coords(self):
return self.pos_x, self.pos_y
class BaseEnemy(BaseCharacter):
def __init__(self, pos_x, pos_y, weapon, hp):
super().__init__(pos_x, pos_y, hp)
self.weapon = weapon
def hit(self, target):
if target.__class__.__name__ == 'MainHero':
self.weapon.hit(self, target)
else:
print('Могу ударить только Главного героя')
def __str__(self):
return f'Враг на позиции ({self.pos_x}, {self.pos_y}) с оружием {self.weapon.name}'
class MainHero(BaseCharacter):
def __init__(self, pos_x, pos_y, name, hp):
super().__init__(pos_x, pos_y, hp)
self.name = name
self.weapons = []
self.current_weapon = 0
def hit(self, target):
if self.weapons:
if target.__class__.__name__ == 'BaseEnemy':
self.weapons[self.current_weapon].hit(self, target)
else:
print('Могу ударить только Врага')
else:
print('Я безоружен')
def add_weapon(self, weapon):
if weapon.__class__.__name__ == 'Weapon':
self.weapons.append(weapon)
print(f'Подобрал {weapon}')
else:
print('Это не оружие')
def next_weapon(self):
if len(self.weapons) == 1:
print('У меня только одно оружие')
elif len(self.weapons) > 1:
self.current_weapon += 1
if self.current_weapon == len(self.weapons):
self.current_weapon = 0
print(f'Сменил оружие на {self.weapons[self.current_weapon]}')
else:
print('Я безоружен')
def heal(self, amount):
self.hp += amount
if self.hp > 200:
self.hp = 200
print(f'Полечился, теперь здоровья {self.hp}')
weapon1 = Weapon("Короткий меч", 5, 1)
weapon2 = Weapon("Длинный меч", 7, 2)
weapon3 = Weapon("Лук", 3, 10)
weapon4 = Weapon("Лазерная орбитальная пушка", 1000, 1000)
princess = BaseCharacter(100, 100, 100)
archer = BaseEnemy(50, 50, weapon3, 100)
armored_swordsman = BaseEnemy(10, 10, weapon2, 500)
archer.hit(armored_swordsman)
armored_swordsman.move(10, 10)
print(armored_swordsman.get_coords())
main_hero = MainHero(0, 0, "Король Артур", 200)
main_hero.hit(armored_swordsman)
main_hero.next_weapon()
main_hero.add_weapon(weapon1)
main_hero.hit(armored_swordsman)
main_hero.add_weapon(weapon4)
main_hero.hit(armored_swordsman)
main_hero.next_weapon()
main_hero.hit(princess)
main_hero.hit(armored_swordsman)
main_hero.hit(armored_swordsman)
|
python
|
# flake8: noqa
#
# Root of the SAM package where we expose public classes & methods for other consumers of this SAM Translator to use.
# This is essentially our Public API
#
|
python
|
class Solution:
def trap(self, height: List[int]) -> int:
n = len(height)
if n<=2:return 0
stack = []
ans = 0
for i,num in enumerate(height):
while stack and height[stack[-1]]<num:
cur = stack.pop()
if stack:
ans+=(min(height[stack[-1]],num)-height[cur])*(i-stack[-1]-1)
stack.append(i)
return ans
|
python
|
import random
def int_to_list(n):
n=str(n)
l=list(n)
return l
class CowsAndBulls:
def __init__(self):
self.number = ""
self.digits = 0
self.active = False
def makeRandom(self, digit):
digits = set(range(10))
first = random.randint(1, 9)
second_to_last = random.sample(digits - {first}, digit-1)
botNumber = str(first) + ''.join(map(str, second_to_last))
return botNumber
def compareNumbers(numA, numB):
if(len(numA)!=len(numB)):
return -1, -1
else:
bulls=0
cows=0
n1 = int(numA)
n2 = int(numB)
l1 = int_to_list(n1)
l2 = int_to_list(n2)
i = 0
for digit in l1:
if digit == l2[i]:
bulls+=1
l2[i]='a'
i+=1
for digit in l1:
for dig in l2:
if dig==digit:
cows+=1
return bulls, cows
|
python
|
"""
Build a parse tree to evaluate a fully parenthesised mathematical expression, ((7+3)∗(5−2)) = ?
*
/ \
+ -
/ \ / \
7 3 5 2
"""
from datastruct.collections import HashTable
from datastruct.abstract import Stack
from datastruct.tree import BinaryTree
import operator
def build_parse_tree(fpexp):
"""
:param fpexp: fully parenthesised expression
:return: BinaryTree
"""
fpexp = fpexp.split()
tree = BinaryTree()
pStack = Stack() # with the help of stack, we can get the parent node back with respect to current node
pStack.push(tree)
currTree = tree
for c in fpexp:
if c == '(': # insert a new subtree
currTree.insertLeft(None)
pStack.push(currTree)
currTree = currTree.left
elif c in '+-*/': # c is an operator, change root value and descent to right subtree
currTree.key = c
currTree.insertRight(None)
pStack.push(currTree)
currTree = currTree.right
elif c == ')': # ths subtree has been filled up with operand and operators
currTree = pStack.pop()
else: # an operand, change root value and back to parent node
currTree.key = int(c)
currTree = pStack.pop()
return tree
def evaluate(parseTree: BinaryTree):
op = HashTable(11)
op['+'] = operator.add
op['-'] = operator.sub
op['*'] = operator.mul
op['/'] = operator.truediv
if parseTree.left and parseTree.right:
a = evaluate(parseTree.left)
b = evaluate(parseTree.right)
fn = op[parseTree.key]
return fn(a, b)
else:
return parseTree.key
if __name__ == '__main__':
pt = build_parse_tree("( ( 10 + 5 ) * 3 )")
print(pt)
print(evaluate(pt))
|
python
|
#! /usr/bin/env python
import re
import csv
import click
import numpy as np
from scipy.stats import spearmanr
from hivdbql import app
from hivdbql.utils import dbutils
from hivdbql.models.isolate import CRITERIA_SHORTCUTS
np.seterr(divide='raise', invalid='raise')
db = app.db
models = app.models
GENE2DRUGCLASS = {
'PR': 'PI',
'RT': 'RTI',
'IN': 'INSTI'
}
MUTATION_PATTERN = re.compile(r'^[A-Z]?(\d+)([A-Z*_-]+)$')
def read_mutations(fp):
mutations = set()
for line in fp:
line = line.strip()
match = MUTATION_PATTERN.match(line)
if line and match:
pos, aa = match.groups()
mutations.add((int(pos), aa))
orderedmuts = sorted(mutations)
return {m: orderedmuts.index(m) for m in orderedmuts}
def calc_spearman(both, m0only, m1only, none):
dataset = (
[(1, 1)] * both + [(1, 0)] * m0only +
[(0, 1)] * m1only + [(0, 0)] * none
)
return spearmanr(dataset)
@click.command()
@click.argument('input_mutations_file', type=click.File('r'))
@click.argument('output_file', type=click.File('w'))
@click.option('--include-mixture', is_flag=True,
help='Include specified mutations from mixtures')
@click.option('--include-zeros', is_flag=True,
help='Include sequence without any of the specified mutations')
@click.option('--species', type=click.Choice(['HIV1', 'HIV2']),
default='HIV1', help='specify an HIV species')
@click.option('--gene', type=click.Choice(['PR', 'RT', 'IN']),
help='specify an HIV gene')
@click.option('--filter', type=click.Choice(CRITERIA_SHORTCUTS.keys()),
multiple=True, default=('NO_CLONES', 'NO_QA_ISSUES',
'SANGER_ONLY'),
show_default=True, help='specify filter criteria')
def mutation_corellation(input_mutations_file, output_file,
include_mixture, include_zeros,
species, gene, filter):
mutations = read_mutations(input_mutations_file)
mutationitems = sorted(mutations.items(), key=lambda i: i[1])
nummuts = len(mutations)
writer = csv.writer(output_file)
matrix = np.zeros([nummuts, nummuts, 0b100], dtype=np.int64)
writer.writerow(['MutX', 'MutY', '#XY', '#X',
'#Y', '#Null', 'Rho', 'P'])
drugclass = GENE2DRUGCLASS[gene]
# query = models.Isolate.make_query(
# 'HIV1', 'INSTI', 'all', ['NO_CLONES',
# 'NO_QA_ISSUES',
# 'PUBLISHED_ONLY'])
query = (
models.Patient.query
.filter(models.Patient.isolates.any(db.and_(
*models.Isolate.make_criteria(species, drugclass, 'art', filter)
)))
.options(db.selectinload(models.Patient.isolates)
.selectinload(models.Isolate.sequences)
.selectinload(models.Sequence.insertions))
.options(db.selectinload(models.Patient.isolates)
.selectinload(models.Isolate.sequences)
.selectinload(models.Sequence.mixtures))
)
patients = dbutils.chunk_query(
query, models.Patient.id, chunksize=500,
on_progress=(lambda o, t:
print('{0}/{1} patients...'.format(o, t), end='\r')),
on_finish=(lambda t:
print('{0} patients. '.format(t)))
)
patcount = 0
seqcount = 0
for patient in patients:
patmatrix = np.zeros_like(matrix)
patflag = False
for isolate in patient.isolates:
if isolate.gene != gene:
continue
seq = isolate.get_or_create_consensus()
first_aa = seq.first_aa
last_aa = seq.last_aa
# Here we ignored mixtures
if include_mixture:
seqmuts = {(pos, aa)
for pos, aas in seq.aas
for aa in aas if (pos, aa) in mutations}
else:
seqmuts = {m for m in seq.aas if m in mutations}
if not include_zeros and not seqmuts:
continue
seqcount += 1
patflag = True
for m0, m0idx in mutationitems:
if m0[0] < first_aa or m0[0] > last_aa:
# disqualified because of out of range
continue
for m1, m1idx in mutationitems[m0idx + 1:]:
if m1[0] < first_aa or m1[0] > last_aa:
# disqualified because of out of range
continue
hasm0 = m0 in seqmuts
hasm1 = m1 in seqmuts
if hasm0 and hasm1:
# contains both
patmatrix[m0idx, m1idx, 0b11] = 1
elif hasm0 and not hasm1:
# contains m0
patmatrix[m0idx, m1idx, 0b10] = 1
elif not hasm0 and hasm1:
# contains m1
patmatrix[m0idx, m1idx, 0b01] = 1
else: # elif not hasm0 and not hasm1:
# contains none
patmatrix[m0idx, m1idx, 0b00] = 1
matrix += patmatrix
patcount += patflag
print('{} patients ({} sequences) have at least one given mutation.'
.format(patcount, seqcount))
for m0, m0idx in mutationitems:
for m1, m1idx in mutationitems[m0idx + 1:]:
both = matrix[m0idx, m1idx, 0b11]
m0only = matrix[m0idx, m1idx, 0b10]
m1only = matrix[m0idx, m1idx, 0b01]
none = matrix[m0idx, m1idx, 0b00]
if both != 0 or m0only * m1only != 0:
rho, p = calc_spearman(both, m0only, m1only, none)
else:
rho = p = ''
writer.writerow([
'{}{}'.format(*m0),
'{}{}'.format(*m1),
both, m0only, m1only, none, rho, p
])
if __name__ == '__main__':
with app.app_context():
mutation_corellation()
|
python
|
""" Manages drawing of the game """
from typing import List, Tuple, Any
import colorsys
import random
import pygame
import settings
from game_state import GameState, Snake, Pizza
Color = Tuple[int, int, int]
class Colors:
""" Basic colors """
CLEAR_COLOR = (240, 240, 240)
BLACK = (0, 0, 0)
DARK_YELLOW = (200, 200, 0)
HOT_PINK = (220, 0, 127)
PINK = (255, 192, 203)
FUCHSIA = (255, 130, 255)
LIME = (0, 255, 0)
P1_GREEN = (100, 255, 10)
P1_YELLOW = (255, 255, 10)
P2_RED = (255, 10, 10)
P2_ORANGE = (255, 200, 10)
P3_BLUE = (10, 10, 255)
P3_CYAN = (10, 200, 200)
P4_VIOLET = (150, 50, 255)
P4_BLUE = (50, 50, 100)
MINT = (170, 255, 195)
GOLD = (249, 166, 2)
ROYAL = (250, 218, 94)
PLAYER_COLORS = [(Colors.P1_GREEN, Colors.P1_YELLOW),
(Colors.P2_RED, Colors.P2_ORANGE),
(Colors.P3_BLUE, Colors.P3_CYAN),
(Colors.P4_VIOLET, Colors.P4_BLUE),
(Colors.HOT_PINK, Colors.PINK),
(Colors.BLACK, Colors.DARK_YELLOW),
(Colors.ROYAL, Colors.GOLD), (Colors.FUCHSIA, Colors.MINT)]
def generate_gradient(colors: Tuple[Color, Color], steps: int) -> List[Color]:
""" Generate a color gradient with 2*steps for two input colors """
def lerp(val1: int, val2: int, scale: float) -> int:
""" interpolate between values val1 and val2 with scale [0, 1] """
return int(val1 + (val2 - val1) * scale)
palette = []
c1_red, c1_green, c1_blue = colors[0]
c2_red, c2_green, c2_blue = colors[1]
for i in range(steps):
scale = i / steps
red = lerp(c1_red, c2_red, scale)
green = lerp(c1_green, c2_green, scale)
blue = lerp(c1_blue, c2_blue, scale)
palette.append((red, green, blue))
for i in range(steps):
scale = i / steps
red = lerp(c2_red, c1_red, scale)
green = lerp(c2_green, c1_green, scale)
blue = lerp(c2_blue, c1_blue, scale)
palette.append((red, green, blue))
return palette
class SnakeGraphics:
""" Implements Snake drawing with 8-bit texture
and palette color rotations """
def __init__(self) -> None:
def hsl_color_pair(seed: float,
player_index: int) -> Tuple[Color, Color]:
""" Generate a hsl color with unique hue for each player """
def hsl_color(hue: float, saturation: float,
lightness: float) -> Color:
""" Convert hsl to rgb """
hue = hue - 1 if hue > 1 else hue
red, green, blue = (
int(256 * i)
for i in colorsys.hls_to_rgb(hue, lightness, saturation))
return (red, green, blue)
pidx = player_index / settings.MAX_PLAYERS
return (hsl_color(seed + pidx, 0.99,
0.5), hsl_color(seed + pidx, 0.7, 0.3))
self.image = pygame.Surface(settings.PLAY_AREA, 0, 8)
self.image.fill((0, 0, 0))
self.image.set_colorkey((0, 0, 0))
self.gradients = [
generate_gradient(
PLAYER_COLORS[index] if index < len(PLAYER_COLORS) else
hsl_color_pair(random.random(), index),
settings.PLAYER_COLOR_GRADIENT_SIZE // 2)
for index in range(settings.MAX_PLAYERS)
]
assert len(self.gradients) == settings.MAX_PLAYERS
self.palette = [(0, 0, 0)] * 256
self.rotate: float = 0.0
self.update_palette()
def rotate_palette(self) -> None:
""" Rotate the color gradients for each player to create animation """
self.rotate += settings.SNAKE_COLOR_ROT
rot = int(self.rotate)
size = settings.PLAYER_COLOR_GRADIENT_SIZE
for pidx in range(settings.MAX_PLAYERS):
base = 1 + pidx * size
for i in range(size):
self.palette[base + i] = self.gradients[pidx][(i + rot) % size]
def update_palette(self) -> None:
""" Animate color palette and apply it to the snake texture """
self.rotate_palette()
self.image.set_palette(self.palette)
def draw_snake(self, player_idx: int, snake: Snake) -> None:
""" Apply updates to the snake texture """
def player_color_index(pidx: int, value: int) -> int:
""" return player color index in the shared palette """
size = settings.PLAYER_COLOR_GRADIENT_SIZE
return 1 + pidx * size + value % size
for part in snake.new_parts:
index = player_color_index(player_idx, part[2])
pygame.draw.circle(self.image, index, [part[0], part[1]],
settings.SNAKE_RADIUS)
snake.new_parts.clear()
for part in snake.removed_parts:
pygame.draw.circle(self.image, 0, [part[0], part[1]],
settings.SNAKE_RADIUS)
snake.removed_parts.clear()
# Replace last part as it was partially removed,
# clearing could be implemented better with masking
if len(snake.parts) > 0:
part = snake.parts[0]
corr_col_index = player_color_index(player_idx, part[2])
pygame.draw.circle(self.image, corr_col_index, [part[0], part[1]],
settings.SNAKE_RADIUS)
def draw_snakes(self, screen: Any, snakes: List[Snake]) -> None:
""" Draw all provided snake objects and rotate palette """
for snake_id, snake in enumerate(snakes):
self.draw_snake(snake_id, snake)
self.update_palette()
screen.blit(self.image, (0, 0))
class GameRenderer:
""" Handles game state rendering """
def __init__(self) -> None:
self.snake_graphics = SnakeGraphics()
self.screen = pygame.display.set_mode(settings.PLAY_AREA)
def draw_pizza(self, pizza: Pizza) -> None:
""" Draw a pizza object to the screen """
pygame.draw.circle(self.screen, (180, 160, 10), [pizza.x, pizza.y],
pizza.radius)
pygame.draw.circle(self.screen, (255, 210, 10), [pizza.x, pizza.y],
pizza.radius - 3)
pygame.draw.circle(self.screen, (255, 100, 10), [pizza.x, pizza.y],
pizza.radius - 6)
def draw_pizzas(self, pizzas: List[Pizza]) -> None:
""" Draw all pizzas in a list """
for pizza in pizzas:
self.draw_pizza(pizza)
def draw_game(self, game_state: GameState) -> None:
""" Draw game """
self.screen.fill(Colors.CLEAR_COLOR)
self.draw_pizzas(game_state.pizzas)
self.snake_graphics.draw_snakes(self.screen, game_state.snakes)
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from re import findall
from string import printable
from struct import unpack
from src.capturePkt.networkProtocol import NetworkProtocol
class Telnet(NetworkProtocol):
IAC = 0xff
codeDict = {236: 'EOF',
237: 'SUSP',
238: 'ABORT',
239: 'EOR',
240: 'SE',
241: 'NOP',
242: 'DM',
243: 'BRK',
244: 'IP',
245: 'AO',
246: 'AYT',
247: 'EC',
248: 'EL',
249: 'GA',
250: 'SB',
251: 'WILL',
252: 'WONT',
253: 'DO',
254: 'DONT',
255: 'IAC',
}
# https://www.iana.org/assignments/telnet-options/telnet-options.xhtml
optionDict = {0: 'Binary Transmission',
1: 'Echo',
2: 'Reconnection',
3: 'Suppress Go Ahead',
4: 'Approx Message Size Negotiation',
5: 'Status',
6: 'Timing Mark',
7: 'Remote Controlled Trans and Echo',
8: 'Output Line Width',
9: 'Output Page Size',
10: 'Output Carriage-Return Disposition',
11: 'Output Horizontal Tab Stops',
12: 'Output Horizontal Tab Disposition',
13: 'Output Formfeed Disposition',
14: 'Output Vertical Tabstops',
15: 'Output Vertical Tab Disposition',
16: 'Output Linefeed Disposition',
17: 'Extended ASCII',
18: 'Logout',
19: 'Byte Macro',
20: 'Data Entry Terminal',
21: 'SUPDUP',
22: 'SUPDUP Output',
23: 'Send Location',
24: 'Terminal Type',
25: 'End of Record',
26: 'TACACS User Identification',
27: 'Output Marking',
28: 'Terminal Location Number',
29: 'Telnet 3270 Regime',
30: 'X.3 PAD',
31: 'Negotiate About Window Size',
32: 'Terminal Speed',
33: 'Remote Flow Control',
34: 'Linemode',
35: 'X Display Location',
36: 'Environment Option',
37: 'Authentication Option',
38: 'Encryption Option',
39: 'New Environment Option',
40: 'TN3270E',
41: 'XAUTH',
42: 'CHARSET',
43: 'Telnet Remote Serial Port (RSP)',
44: 'Com Port Control Option',
45: 'Telnet Suppress Local Echo',
46: 'Telnet Start TLS',
47: 'KERMIT',
48: 'SEND-URL',
49: 'FORWARD_X',
138: 'TELOPT PRAGMA LOGON',
139: 'TELOPT SSPI LOGON',
140: 'TELOPT PRAGMA HEARTBEAT',
255: 'Extended-Options-List',
}
def __init__(self, packet):
self.extendField = tuple()
self.extendParse = tuple()
while packet:
if packet[0] == self.IAC:
telnet = unpack('!B B', packet[1:3])
self.code = Telnet.codeDict.get(telnet[0], 'Unknown')
self.option = Telnet.optionDict.get(telnet[1], 'Unknown')
commandStr = '{} {}'.format(self.code, self.option)
self.extendField = self.extendField + ('Telnet Command',)
self.extendParse = self.extendParse + (commandStr,)
packet = packet[3:]
continue
else:
data = packet.decode('utf-8', 'ignore')
data = list(filter(lambda x: x in printable, data))
data = ''.join(data)
if len(data) > 80:
data = '\n'.join(findall(r'.{80}', data))
else:
data = data.replace('\r\n', '')
self.extendField = self.extendField + ('Data',)
self.extendParse = self.extendParse + (data,)
break
def getFields(self):
return self.extendField
def getParses(self):
return self.extendParse
|
python
|
#-*- coding: utf-8 -*-
'''
Created on 2017. 11. 06
Updated on 2017. 11. 06
'''
from __future__ import print_function
import os
import cgi
import re
import time
import codecs
import sys
import subprocess
import math
import dateutil.parser
from datetime import datetime
from commons import Subjects
from xml.etree import ElementTree
from features.Corpus import Corpus
from bs4 import BeautifulSoup
from unidiff import PatchSet
from utils import Progress
from repository.GitLog import GitLog
from repository.BugFilter import BugFilter
from repository.GitVersion import GitVersion
###############################################################
# make bug information
###############################################################
def load_file_corpus(_filepath):
data = {}
f = open(_filepath, 'r')
while True:
line = f.readline()
if line is None or line == "":break
identifier, words = line.split('\t')
identifier = identifier.replace('/','.')
idx = identifier.find('org.')
if idx >= 0:
identifier = identifier[idx:]
data[identifier] = words.split(' ')
return data
def load_bug_corpus(_filename):
'''
return words for each items(ex. file, bug report...)
:param _filename:
:return: {'itemID1':['word1', 'word2',....], 'itemID2':[]....}
'''
f = open(_filename, 'r')
lines = f.readlines()
f.close()
corpus = {}
for line in lines:
idx = line.find('\t')
key = int(line[:idx])
words = line[idx + 1:-1]
words = words.strip().split(' ') if len(words) > 0 else []
# remove blank items
idx = 0
while idx < len(words):
if words[idx] == '':
del words[idx]
else:
idx += 1
corpus[key] = words
return corpus
###############################################################
# make comment information
###############################################################
def load_bug_xml(_filepath):
'''
get bugs information according to _selector from bug repository XML file
:param _repo:
:param _selector:
:return:
'''
bug = {}
try:
root = ElementTree.parse(_filepath).getroot()
itemtag = root[0].find('item') #channel > item
bug['title'] = itemtag.find('title').text
bug['desc'] = itemtag.find('description').text
bug['comments'] = []
comments = itemtag.find('comments')
if comments is not None:
for comment in comments:
cID = int(comment.attrib['id'])
cTime = dateutil.parser.parse(comment.attrib['created'])
cTime = time.mktime(cTime.timetuple())
# cTime = datetime.strptime(comment.attrib['created'], "%a, %d %b %Y %H:%M:%S")
cAuthor = comment.attrib['author']
cText = comment.text
bug['comments'].append({'id':cID, 'timestamp':cTime, 'author':cAuthor, 'text':cText})
except Exception as e:
print(e)
return bug
def make_comment_corpus(_project, _bugIDs, _bugPath, _featurePath):
corpusPath = os.path.join(_featurePath, 'bugs', '_corpus')
if os.path.exists(corpusPath) is False:
os.makedirs(corpusPath)
result_file = os.path.join(corpusPath, 'comments.corpus')
if os.path.exists(result_file) is True:
return True
corpus = Corpus(_camelSplit=False)
f = codecs.open(result_file, 'w', 'UTF-8')
count = 0
progress = Progress(u'[%s] making comment corpus' % _project, 2, 10, True)
progress.set_upperbound(len(_bugIDs))
progress.start()
for bugID in _bugIDs:
# print(u'[%s] Working %d ...' % (_project, bugID), end=u'')
# load XML
filepath = os.path.join(_bugPath, 'bugs', '%s-%d.xml'% (_project, bugID))
bug = load_bug_xml(filepath)
if len(bug['comments']) == 0:
print(u'!', end=u'') #\t\tNo comment!')
count +=1
# make Corpus
for comment in bug['comments']:
# Convert some formats (date and text...)
# re.sub = remove compound character except english caracter and numbers and some special characters
text = BeautifulSoup(comment['text'], "html.parser").get_text()
text = cgi.escape(re.sub(r'[^\x00-\x80]+', '', text))
text = cgi.escape(re.sub(chr(27), '', text))
comment_corpus = corpus.make_text_corpus(text)
corpus_text = ' '.join(comment_corpus)
f.write('%d\t%d\t%d\t%s\t%s\n' % (bugID, comment['id'], comment['timestamp'], comment['author'], corpus_text))
progress.check()
f.close()
progress.done()
print(u'missed bugs : %d' % count)
pass
def load_comment_corpus(_project, _bugIDs, _bugPath, _featurePath, _force=False):
corpusPath = os.path.join(_featurePath, 'bugs', '_corpus', 'comments.corpus')
if _force is True or os.path.exists(corpusPath) is False:
make_comment_corpus(_project, _bugIDs, _bugPath, _featurePath)
data = {}
f = codecs.open(corpusPath, 'r', 'UTF-8')
while True:
line = f.readline()
if line is None or line=="": break
line = line[:-1]
items = line.split('\t')
bugID = int(items[0])
if bugID not in data: data[bugID] = []
corpus = items[4].split(' ') if len(items[4]) > 0 else[]
data[bugID].append({'id':items[1], 'timestamp':items[2], 'author':items[3], 'corpus':corpus})
f.close()
return data
###############################################################
# make comment information
###############################################################
def get_patches(_hash, _gitPath):
'''
_hash에 해당하는 patch정보를 로드
commit log는 제외되어있음.
:return:
'''
# check this branch
command = [u'git', u'log', u'-1', u'-U', _hash]
result = subprocess.check_output(command, stderr=sys.stderr, cwd=_gitPath)
if result is None:
print(u'Failed')
return False
# if the head is not up-to-date, checkout up-to-date
# common_log_msg = result[:result.find('diff --git ')] # common_log_msg가 포함되어있음. log_msg를 처리하려면 분석하면 됨.
result = result[result.find('diff --git '):].strip()
result = re.sub(r'[^\x00-\x80]+', '', result)
result = result.decode('UTF-8', 'ignore')
patch = PatchSet(result.split('\n'))
return patch
def make_hunk(_bug, _gitPath):
'''
:param _bug: {'commits':[list of commit hash], 'files':[list of related files]}
:param _gitPath:
:return: {'classpath':[corpus], .... }
'''
changes = {}
fixed_files = [item['name'] for item in _bug['files']]
for commit in _bug['commits']:
patches = get_patches(commit, _gitPath)
for patch in patches:
classpath = patch.path.replace('/', '.')
classpath = classpath[classpath.find('org.'):]
if classpath not in fixed_files:continue
hunk_text = u''
for hunk in patch:
# related method name + codes with linesep
hunk_text += hunk.section_header + reduce(lambda x, y: str(x) + os.linesep + str(y), hunk) + os.linesep
changes[classpath] = hunk_text
return changes
def make_hunk_corpus(_project, _bugs, _gitPath, _featurePath):
# sources의 버전은 고려하지 않음 (commit log의 hunk와 비교하므로 버전은 제외함
# bugID와 관련된 수정된 파일들과 commit 목록을 가져옴 (commit hash가 포함되어야 함)
# create hunk corpus path
corpusPath = os.path.join(_featurePath, 'bugs', '_corpus')
if os.path.exists(corpusPath) is False:
os.makedirs(corpusPath)
f = codecs.open(os.path.join(corpusPath, 'hunk.corpus'), 'w', 'UTF-8')
# create hunk and save
progress = Progress(u'[%s] making hunk corpus' % _project, 2, 10, True)
progress.set_upperbound(len(_bugs))
progress.start()
for bugID, info in _bugs.iteritems():
#_bugs : {bugID:{'hash':[], 'files':[{'type:'M', 'name':'fileclass'}, ...]
#print('[Hunk] working %d' % bugID)
# make hunk
hunks = make_hunk(info, _gitPath)
# make hunk corpus
corpus = Corpus(_camelSplit=False)
for classpath, hunk_text in hunks.iteritems():
terms = corpus.make_text_corpus(hunk_text)
terms_text = ' '.join(terms)
f.write('%d\t%s\t%s\n' % (bugID, classpath, terms_text))
progress.check()
f.close()
progress.done()
pass
def load_hunk_corpus(_project, _bugs, _gitPath, _featurePath, _force=False):
'''
각 버그리포트 별로 정답파일들의 변경된 hunk들에 대해서 corpus를 생성.
:param _bugID:
:param _gitPath:
:param _featurePath:
:return:
'''
#check the path
corpusPath = os.path.join(_featurePath, 'bugs', '_corpus', 'hunk.corpus')
if _force is True or os.path.exists(corpusPath) is False:
make_hunk_corpus(_project, _bugs, _gitPath, _featurePath)
# load hunk corpus
data = {}
f = codecs.open(corpusPath, 'r', 'UTF-8')
while True:
line = f.readline()
if line is None or line == "": break
line = line[:-1]
items = line.split('\t')
bugID = int(items[0])
classpath = items[1]
corpus = items[2].split(' ') if len(items[2]) > 0 else []
# Add to data
if bugID not in data: data[bugID] = {}
data[bugID][classpath] = corpus
f.close()
return data
def make_bug_hash(_project, _bugIDs, _bugPath, _gitPath, _featurePath):
'''
:param _project:
:param _bugIDs:
:param _bugPath:
:param _gitPath:
:param _featurePath:
:return: {bugID:{'commits':[commit hash list], 'files':[{'type':'M', 'name':'fileclass'}, ...]
'''
gitlogPath = os.path.join(_featurePath, 'bugs', '_corpus', u'.git.log')
gitversionPath = os.path.join(_featurePath, 'bugs', '_corpus', u'.git_version.txt')
gitLog = GitLog(_project, _gitPath, gitlogPath)
gitVersion = GitVersion(_project, _gitPath, gitversionPath)
bugFilter = BugFilter(_project, os.path.join(_bugPath, u'bugs'))
print(u'[%s] start making bug infromation *************' % (_project))
logs = gitLog.load()
tagmaps = gitVersion.load()
items, dupgroups = bugFilter.run(logs, tagmaps)
print(u'[%s] start making bug infromation ************* Done' % (_project))
# making bugs
bugs = {}
for item in items:
bugID = int(item['id'][item['id'].find('-')+1:])
if bugID not in _bugIDs: continue
if item['id'] not in logs:
print('**********item ID %s not exists in logs! It\'s wired' % item['id'])
bugs[bugID] = {'commits':[] , 'files':[]}
continue
commits = logs[item['id']]
hash_list = [commit['hash'] for commit in commits]
files = []
for fileitem in item['fixedFiles']:
if fileitem['name'].find('test') >= 0: continue
if fileitem['name'].find('Test') >= 0: continue
files.append(fileitem)
if len(hash_list)==0 or len(files) ==0: continue
bugs[bugID] = {'commits':hash_list , 'files':files}
os.remove(gitlogPath)
os.remove(gitversionPath)
print(u'[%s] making hash info for bugs ************* Done' % (_project))
from utils.PrettyStringBuilder import PrettyStringBuilder
builder = PrettyStringBuilder()
text = builder.get_dicttext(bugs, _indent=1)
f = codecs.open(os.path.join(_featurePath, 'bugs', '.bug.hash'), 'w', 'UTF-8')
f.write(text)
f.close()
return bugs
def load_bug_hash(_project, _bugIDs, _bugPath, _gitPath, _featurePath, _force=False):
bugHashPath = os.path.join(_featurePath, 'bugs', '.bug.hash')
if _force is True or os.path.exists(bugHashPath) is False:
make_bug_hash(_project, _bugIDs, _bugPath, _gitPath, _featurePath)
f = codecs.open(bugHashPath, 'r', 'UTF-8')
text = f.read()
f.close()
return eval(text)
###############################################################
# make comment information
###############################################################
def make_IDF(descriptions, comments, hunks):
document_count = 0
# description IDF
IDF = {}
for bugID in descriptions:
document_count += 1
unique_terms = set(descriptions[bugID])
for term in unique_terms:
IDF[term] = (IDF[term] + 1) if term in IDF else 1
# comments IDF
for bugID in comments:
for comment in comments[bugID]:
document_count += 1
unique_terms = set(comment['corpus'])
for term in unique_terms:
IDF[term] = (IDF[term] + 1) if term in IDF else 1
# hunk IDF
for bugID in hunks:
for filepath, corpus in hunks[bugID].iteritems():
document_count += 1
unique_terms = set(corpus)
for term in unique_terms:
IDF[term] = (IDF[term] + 1) if term in IDF else 1
print('len of all tokens is : %d' % len(IDF))
return document_count, IDF
def get_TF(_corpus):
TF = {}
# hunk IDF
for term in _corpus:
TF[term] = (TF[term]+1)if term in TF else 1
return TF
def get_TFIDF(_TF, _IDF, _nD):
'''
basic TF-IDF
:param _TF:
:param _IDF:
:param _nD:
:return:
'''
TFIDF = {}
for term, count in _TF.iteritems():
TFIDF[term] = float(count) * (1.0 + math.log(float(_nD) / _IDF[term])) #basic TF-IDF
return TFIDF
def get_similarity(_vectorA, _vectorB):
'''
return sparse vector's similarity between vA and vB
:param _vectorA:
:param _vectorB:
:return:
'''
common_set = set(_vectorA.keys()) & set(_vectorB.keys())
# A dot B
product = sum(_vectorA[term] * _vectorB[term] for term in common_set)
# |A|, |B|
valueA = sum(_vectorA[term] * _vectorA[term] for term in _vectorA)
valueB = sum(_vectorB[term] * _vectorB[term] for term in _vectorB)
if valueA == 0 or valueB ==0:
return 0
return product / (valueA * valueB)
def calculate_similarity(_group, _project, _descs, _comments, _hunks, _featurePath, _outputPath):
# calculated Number of Documents, Number of IDF
nD, nIDF = make_IDF(_descs, _comments, _hunks) # number of Documents, number of IDF
# for term in nIDF:
# print(u'%s\t%d' % (term, nIDF[term]))
def get_average_similarity(_bugID, _vecA, _hunks):
countFiles = len(_hunks[bugID])
similarity = 0.0
for classpath, corpus in _hunks[_bugID].iteritems():
tfH = get_TF(corpus)
vecH = get_TFIDF(tfH, nIDF, nD)
similarity += get_similarity(_vecA, vecH)
similarity = similarity / countFiles # 관련된 전체 파일에 대한 평균 유사도.
return similarity
progress = Progress("[%s] calculating similarity for reports and file hunks" % _project, 2, 10, True)
progress.set_upperbound(len(_descs))
progress.start()
translationPath = os.path.join(_outputPath, '_translation')
if os.path.exists(translationPath) is False: os.makedirs(translationPath)
output = codecs.open(os.path.join(translationPath, '%s-translation-relation.csv'%_project), 'w', 'UTF-8')
#output.write(u'Group\tProject\tBugID\tcommentID\tTime\tAuthor\tSimilarity\t#comments\tMemo\n')
# for each bug report,
for bugID in _descs:
# except for reports which have no hunks
if bugID not in _hunks: continue
tfR = get_TF(_descs[bugID])
vectorR = get_TFIDF(tfR, nIDF, nD)
# 리포트와 정답파일들 간의 평균 유사도
simR = get_average_similarity(bugID, vectorR, _hunks)
simMax = {'id':bugID, 'time':'', 'author':'description', 'similarity':simR}
if bugID not in _comments:
# bugID가 comment에 없으면 그냥 simR로 대체.
output.write(u'%s,%s,%d,%d,%.8f\n' % (_project, bugID, 0, 0, simMax['similarity']))
output.flush()
continue
# comment와 file간의 유사도를 비교해서 가장 큰 값을 찾음.
for comment in _comments[bugID]:
tfC = get_TF(comment['corpus'])
vectorC = get_TFIDF(tfC, nIDF, nD)
simC = get_average_similarity(bugID, vectorC, _hunks)
if simMax['similarity'] < simC:
simMax = {'id': comment['id'],
'time': datetime.fromtimestamp(int(comment['timestamp'])).strftime("%Y-%m-%d %H:%M:%S"),
'author': comment['author'], 'similarity': simC, 'corpus':comment['corpus']}
# corpus도 저장
fcorpus = []
for item in _hunks[bugID].values():
fcorpus += item
# max값에 대한 판단
if simMax['author'] == 'description' and simMax['id'] == bugID:
simpath = os.path.join(translationPath, _project + 'desc')
if os.path.exists(simpath) is False: os.makedirs(simpath)
print_tf(_descs[bugID], os.path.join(simpath,'%s-%d$desc.csv' % (_project, bugID)))
print_tf(fcorpus, os.path.join(simpath, '%s-%d$files.csv' % (_project, bugID)))
output.write(u'%s,%s,%d,%d,%.8f\n' % (_project, bugID, 0, 0, simMax['similarity']))
else:
simpath = os.path.join(translationPath, _project)
if os.path.exists(simpath) is False: os.makedirs(simpath)
print_tf(_descs[bugID], os.path.join(simpath, '%s-%d$desc.csv' % (_project, bugID)))
print_tf(simMax['corpus'], os.path.join(simpath, '%s-%d$comment-%s.csv' % (_project, bugID, simMax['id'])))
print_tf(fcorpus, os.path.join(simpath, '%s-%d$files.csv' % (_project, bugID)))
output.write(u'%s,%s,%d,%s,%.8f\n' % (_project, bugID, 1, simMax['id'], simMax['similarity']))
# output.write(u'%s\t%s\t%d\t%s\t%s\t%s\t%.8f\t%d\t%s\n' %
# (_group, _project, bugID, simMax['id'], simMax['time'], simMax['author'], simMax['similarity'], len(_comments[bugID]), ev))
output.flush()
progress.check()
output.close()
progress.done()
pass
def print_tf(_corpus, _filename):
# make term frequency
TF = {}
for item in _corpus:
if item not in TF:
TF[item] = 1
else:
TF[item] += 1
import operator
sorted_value = sorted(TF.items(), key=operator.itemgetter(1), reverse=True)
f = codecs.open(_filename, 'w', 'UTF-8')
f.write(u'\n'.join( u'%s,%d'%(key,value) for key, value in sorted_value))
f.close()
###############################################################
# main routine
###############################################################
def make(_group, _project, _bugIDs, _bugPath, _gitPath, _featurePath, _outputPath, _isDesc, _isCamel, _force=False):
# comment가 없거나 file이 매핑이 안된다거나...한 버그리포트들은 미리 제거해야되지 않을까?
# descriptions = {bugID:[corpus], bugID:[corpus], ...}
descriptions = load_bug_corpus(os.path.join(_featurePath, 'bugs', '_corpus', 'desc.corpus'))
# comments 가 존재하지 않는 151개 버그리포트는 제외됨.
# comments = {bugID:{'id':CommentID, 'timestamp':timestamp, 'author':Author, 'corpus':[corpus]}, ...}
comments = load_comment_corpus(_project, _bugIDs, _bugPath, _featurePath, _force=_force)
# bugs = {bugID:{'commits':[], 'files':[{'type':'M', 'name':'org....java'}]}, ...}
bugs = load_bug_hash(_project, _bugIDs, _bugPath, _gitPath, _featurePath, _force=_force)
# hunks = {bugID:{'classpath':[corpus], 'classpath':[corpus], ...}, ...}
hunks = load_hunk_corpus(_project, bugs, _gitPath, _featurePath, _force = _force)
# calculated Number of Documents, Number of IDF
calculate_similarity(_group, _project, descriptions, comments, hunks, _featurePath, _outputPath)
pass
#####################################
# command
#####################################
def work():
import json
S = Subjects()
desc = True
camel = False
for group in ['Apache']:#S.groups:#S.groups: #['Commons']:#
for project in ['CAMEL'] : #S.projects[group]:#S.projects[group]: ['CODEC']
make(group, project, S.bugs[project]['all'],
S.getPath_bugrepo(group,project),
S.getPath_gitrepo(group, project),
S.getPath_featurebase(group, project),
S.root_feature,
_isDesc=desc,
_isCamel=camel,
_force = False)
###############################################################################################################
###############################################################################################################
if __name__ == "__main__":
'''
'''
work()
pass
|
python
|
#!/usr/bin/env python3
#
# Copyright (c) 2018 Sébastien RAMAGE
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
import logging
import argparse
import time
from zigate import connect
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--debug', help='Debug',
default=False, action='store_true')
parser.add_argument('--port', help='ZiGate usb port',
default=None)
parser.add_argument('--host', help='Wifi ZiGate host:port',
default=None)
parser.add_argument('--path', help='ZiGate state file path',
default='~/.zigate.json')
parser.add_argument('--gpio', help='Enable PiZigate', default=False, action='store_true')
parser.add_argument('--channel', help='Zigbee channel', default=None)
parser.add_argument('--admin_panel', help='Enable Admin panel', default=True, action='store_true')
parser.add_argument('--admin_panel_port', help='Admin panel url prefix', default=9998)
parser.add_argument('--admin_panel_mount', help='Admin panel url mount point', default=None)
parser.add_argument('--admin_panel_prefix', help='Admin panel url prefix', default=None)
args = parser.parse_args()
if args.debug:
logging.root.setLevel(logging.DEBUG)
z = connect(args.port, args.host, args.path, True, True, args.channel, args.gpio)
if args.admin_panel:
logging.root.info('Starting Admin Panel on port %s', args.admin_panel_port)
if args.admin_panel_mount:
logging.root.info('Mount point is %s', args.admin_panel_mount)
if args.admin_panel_prefix:
logging.root.info('URL prefix is %s', args.admin_panel_prefix)
z.start_adminpanel(port=int(args.admin_panel_port), mount=args.admin_panel_mount, prefix=args.admin_panel_prefix,
debug=args.debug)
print('Press Ctrl+C to quit')
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print('Interrupted by user')
z.save_state()
z.close()
|
python
|
import os
import sys
import argparse
from cuttsum.event import read_events_xml
from cuttsum.nuggets import read_nuggets_tsv
from cuttsum.util import gen_dates
import streamcorpus as sc
import numpy as np
from sklearn.feature_extraction import DictVectorizer
import codecs
def main():
event_file, rc_dir, event_title, ofile = parse_args()
event = load_event(event_title, event_file)
hours = [dth for dth in gen_dates(event.start, event.end)]
num_hours = len(hours)
meta_data = []
bow_dicts = []
for h, hour in enumerate(hours, 1):
path = os.path.join(rc_dir, '{}.sc.gz'.format(hour))
for si in sc.Chunk(path=path):
uni2id = {}
for sid, sentence in enumerate(si.body.sentences[u'serif'], 0):
uni2id[sentence_uni(sentence)] = sid
for sent in si.body.sentences[u'article-clf']:
bow_dict = {}
for token in sent.tokens:
t = token.token.decode(u'utf-8').lower()
bow_dict[t] = 1
bow_dicts.append(bow_dict)
uni = sentence_uni(sent)
sent_id = uni2id[uni]
meta_data.append((hour, si.stream_id, sent_id, uni))
vctr = DictVectorizer()
X = vctr.fit_transform(bow_dicts)
with codecs.open(ofile, 'w', 'utf-8') as f:
for i, (hour, stream_id, sent_id, uni) in enumerate(meta_data):
uni = uni.replace(u'\n', u' ').replace(u'\t', u' ')
f.write(u'{}\t{}\t{}\t{}\t'.format(hour, stream_id, sent_id, uni))
x = u' '.join([unicode(col) for col in X[i,:].indices])
f.write(x)
f.write(u'\n')
f.flush()
def sentence_uni(sent):
return u' '.join(token.token.decode(u'utf-8') for token in sent.tokens)
def load_event(event_title, event_xml):
events = read_events_xml(event_xml)
for event in events:
if event_title == event.title:
return event
raise ValueError(("No event title matches \"{}\" " \
+ "in file: {}").format(event_title, event_xml))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--event-file',
help=u'Event xml file.',
type=unicode, required=True)
parser.add_argument('-r', '--rel-chunks-dir',
help=u'Relevance Chunks dir',
type=str, required=True)
parser.add_argument('-t', '--event-title',
help=u'Event title',
type=unicode, required=True)
parser.add_argument('-o', '--output-file',
help=u'Location to write sims',
type=unicode, required=True)
args = parser.parse_args()
event_file = args.event_file
rc_dir = args.rel_chunks_dir
event_title = args.event_title
ofile = args.output_file
odir = os.path.dirname(ofile)
if odir != u'' and not os.path.exists(odir):
os.makedirs(odir)
if not os.path.exists(event_file) or os.path.isdir(event_file):
sys.stderr.write((u'--event-file argument {} either does not exist' \
+ u' or is a directory!\n').format(event_file))
sys.stderr.flush()
sys.exit()
if not os.path.exists(rc_dir) or not os.path.isdir(rc_dir):
sys.stderr.write((u'--rel-chunks-dir argument {} either does not' \
+ u' exist or is not a directory!\n').format(rc_dir))
sys.stderr.flush()
sys.exit()
return (event_file, rc_dir, event_title, ofile)
if __name__ == '__main__':
main()
|
python
|
# coding: utf-8
# 2020/1/3 @ tongshiwei
__all__ = ["get_net", "get_bp_loss"]
from mxnet import gluon
from .WCLSTM import WCLSTM
from .WCRLSTM import WCRLSTM
from .WRCLSTM import WRCLSTM
def get_net(model_type, class_num, embedding_dim, net_type="lstm", **kwargs):
if model_type == "wclstm":
return WCLSTM(net_type=net_type, class_num=class_num, embedding_dim=embedding_dim, **kwargs)
elif model_type == "wcrlstm":
return WCRLSTM(net_type=net_type, class_num=class_num, embedding_dim=embedding_dim, **kwargs)
elif model_type == "wrclstm":
return WRCLSTM(net_type=net_type, class_num=class_num, embedding_dim=embedding_dim, **kwargs)
else:
raise TypeError("unknown model_type: %s" % model_type)
def get_bp_loss(**kwargs):
return {"cross-entropy": gluon.loss.SoftmaxCrossEntropyLoss()}
|
python
|
"""LiteDRAM BankMachine (Rows/Columns management)."""
import math
from migen import *
from litex.soc.interconnect import stream
from litedram.common import *
from litedram.core.multiplexer import *
class _AddressSlicer:
def __init__(self, colbits, address_align):
self.colbits = colbits
self.address_align = address_align
def row(self, address):
split = self.colbits - self.address_align
return address[split:]
def col(self, address):
split = self.colbits - self.address_align
return Cat(Replicate(0, self.address_align), address[:split])
class BankMachine(Module):
def __init__(self, n, aw, address_align, nranks, settings):
self.req = req = Record(cmd_layout(aw))
self.refresh_req = refresh_req = Signal()
self.refresh_gnt = refresh_gnt = Signal()
a = settings.geom.addressbits
ba = settings.geom.bankbits + log2_int(nranks)
self.cmd = cmd = stream.Endpoint(cmd_request_rw_layout(a, ba))
# # #
auto_precharge = Signal()
# Command buffer
cmd_buffer_layout = [("we", 1), ("addr", len(req.addr))]
cmd_buffer_lookahead = stream.SyncFIFO(
cmd_buffer_layout, settings.cmd_buffer_depth,
buffered=settings.cmd_buffer_buffered)
cmd_buffer = stream.Buffer(cmd_buffer_layout) # 1 depth buffer to detect row change
self.submodules += cmd_buffer_lookahead, cmd_buffer
self.comb += [
req.connect(cmd_buffer_lookahead.sink, keep={"valid", "ready", "we", "addr"}),
cmd_buffer_lookahead.source.connect(cmd_buffer.sink),
cmd_buffer.source.ready.eq(req.wdata_ready | req.rdata_valid),
req.lock.eq(cmd_buffer_lookahead.source.valid | cmd_buffer.source.valid),
]
slicer = _AddressSlicer(settings.geom.colbits, address_align)
# Row tracking
row = Signal(settings.geom.rowbits)
row_opened = Signal()
row_hit = Signal()
row_open = Signal()
row_close = Signal()
self.comb += row_hit.eq(row == slicer.row(cmd_buffer.source.addr))
self.sync += \
If(row_close,
row_opened.eq(0)
).Elif(row_open,
row_opened.eq(1),
row.eq(slicer.row(cmd_buffer.source.addr))
)
# Address generation
row_col_n_addr_sel = Signal()
self.comb += [
cmd.ba.eq(n),
If(row_col_n_addr_sel,
cmd.a.eq(slicer.row(cmd_buffer.source.addr))
).Else(
cmd.a.eq((auto_precharge << 10) | slicer.col(cmd_buffer.source.addr))
)
]
# tWTP (write-to-precharge) controller
write_latency = math.ceil(settings.phy.cwl / settings.phy.nphases)
precharge_time = write_latency + settings.timing.tWR + settings.timing.tCCD # AL=0
self.submodules.twtpcon = twtpcon = tXXDController(precharge_time)
self.comb += twtpcon.valid.eq(cmd.valid & cmd.ready & cmd.is_write)
# tRC (activate-activate) controller
self.submodules.trccon = trccon = tXXDController(settings.timing.tRC)
self.comb += trccon.valid.eq(cmd.valid & cmd.ready & row_open)
# tRAS (activate-precharge) controller
self.submodules.trascon = trascon = tXXDController(settings.timing.tRAS)
self.comb += trascon.valid.eq(cmd.valid & cmd.ready & row_open)
# Auto Precharge generation
if settings.with_auto_precharge:
self.comb += \
If(cmd_buffer_lookahead.source.valid & cmd_buffer.source.valid,
If(slicer.row(cmd_buffer_lookahead.source.addr) !=
slicer.row(cmd_buffer.source.addr),
auto_precharge.eq(row_close == 0)
)
)
# Control and command generation FSM
# Note: tRRD, tFAW, tCCD, tWTR timings are enforced by the multiplexer
self.submodules.fsm = fsm = FSM()
fsm.act("REGULAR",
If(refresh_req,
NextState("REFRESH")
).Elif(cmd_buffer.source.valid,
If(row_opened,
If(row_hit,
cmd.valid.eq(1),
If(cmd_buffer.source.we,
req.wdata_ready.eq(cmd.ready),
cmd.is_write.eq(1),
cmd.we.eq(1),
).Else(
req.rdata_valid.eq(cmd.ready),
cmd.is_read.eq(1)
),
cmd.cas.eq(1),
If(cmd.ready & auto_precharge,
NextState("AUTOPRECHARGE")
)
).Else(
NextState("PRECHARGE")
)
).Else(
NextState("ACTIVATE")
)
)
)
fsm.act("PRECHARGE",
# Note: we are presenting the column address, A10 is always low
If(twtpcon.ready & trascon.ready,
cmd.valid.eq(1),
If(cmd.ready,
NextState("TRP")
),
cmd.ras.eq(1),
cmd.we.eq(1),
cmd.is_cmd.eq(1)
),
row_close.eq(1)
)
fsm.act("AUTOPRECHARGE",
If(twtpcon.ready & trascon.ready,
NextState("TRP")
),
row_close.eq(1)
)
fsm.act("ACTIVATE",
If(trccon.ready,
row_col_n_addr_sel.eq(1),
row_open.eq(1),
cmd.valid.eq(1),
cmd.is_cmd.eq(1),
If(cmd.ready,
NextState("TRCD")
),
cmd.ras.eq(1)
)
)
fsm.act("REFRESH",
If(twtpcon.ready,
refresh_gnt.eq(1),
),
row_close.eq(1),
cmd.is_cmd.eq(1),
If(~refresh_req,
NextState("REGULAR")
)
)
fsm.delayed_enter("TRP", "ACTIVATE", settings.timing.tRP - 1)
fsm.delayed_enter("TRCD", "REGULAR", settings.timing.tRCD - 1)
|
python
|
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from .models import Business, Category, Request
@admin.register(Business)
class BusinessAdmin(admin.ModelAdmin):
list_display = ('name', 'location', 'main_category')
ordering = ('name',)
filter_horizontal = ('other_categories', 'delivers_to')
@admin.register(Category)
class CategoryAdmin(MPTTModelAdmin):
list_display = ('name',)
mptt_level_indent = 20
@admin.register(Request)
class RequestAdmin(admin.ModelAdmin):
pass
|
python
|
import uuid
from cinderclient import exceptions as cinder_exceptions
from ddt import ddt, data
from django.conf import settings
from django.test import override_settings
from novaclient import exceptions as nova_exceptions
from rest_framework import status, test
import mock
from six.moves import urllib
from waldur_openstack.openstack.tests.unittests import test_backend
from waldur_core.structure.tests import factories as structure_factories
from . import factories, fixtures
from .. import models, views
@ddt
class InstanceCreateTest(test.APITransactionTestCase):
def setUp(self):
self.openstack_tenant_fixture = fixtures.OpenStackTenantFixture()
self.openstack_settings = self.openstack_tenant_fixture.openstack_tenant_service_settings
self.openstack_settings.options = {'external_network_id': uuid.uuid4().hex}
self.openstack_settings.save()
self.openstack_spl = self.openstack_tenant_fixture.spl
self.project = self.openstack_tenant_fixture.project
self.customer = self.openstack_tenant_fixture.customer
self.image = factories.ImageFactory(settings=self.openstack_settings, min_disk=10240, min_ram=1024)
self.flavor = factories.FlavorFactory(settings=self.openstack_settings)
self.subnet = self.openstack_tenant_fixture.subnet
self.client.force_authenticate(user=self.openstack_tenant_fixture.owner)
self.url = factories.InstanceFactory.get_list_url()
def get_valid_data(self, **extra):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
default = {
'service_project_link': factories.OpenStackTenantServiceProjectLinkFactory.get_url(self.openstack_spl),
'flavor': factories.FlavorFactory.get_url(self.flavor),
'image': factories.ImageFactory.get_url(self.image),
'name': 'Valid name',
'system_volume_size': self.image.min_disk,
'internal_ips_set': [{'subnet': subnet_url}],
}
default.update(extra)
return default
def test_quotas_update(self):
response = self.client.post(self.url, self.get_valid_data())
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
instance = models.Instance.objects.get(uuid=response.data['uuid'])
Quotas = self.openstack_settings.Quotas
self.assertEqual(self.openstack_settings.quotas.get(name=Quotas.ram).usage, instance.ram)
self.assertEqual(self.openstack_settings.quotas.get(name=Quotas.storage).usage, instance.disk)
self.assertEqual(self.openstack_settings.quotas.get(name=Quotas.vcpu).usage, instance.cores)
self.assertEqual(self.openstack_settings.quotas.get(name=Quotas.instances).usage, 1)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.ram).usage, instance.ram)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.storage).usage, instance.disk)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.vcpu).usage, instance.cores)
def test_project_quotas_updated_when_instance_is_created(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertEqual(self.project.quotas.get(name='os_cpu_count').usage, instance.cores)
self.assertEqual(self.project.quotas.get(name='os_ram_size').usage, instance.ram)
self.assertEqual(self.project.quotas.get(name='os_storage_size').usage, instance.disk)
def test_customer_quotas_updated_when_instance_is_created(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertEqual(self.customer.quotas.get(name='os_cpu_count').usage, instance.cores)
self.assertEqual(self.customer.quotas.get(name='os_ram_size').usage, instance.ram)
self.assertEqual(self.customer.quotas.get(name='os_storage_size').usage, instance.disk)
def test_spl_quota_updated_by_signal_handler_when_instance_is_removed(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
instance.delete()
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.vcpu).usage, 0)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.ram).usage, 0)
self.assertEqual(self.openstack_spl.quotas.get(name=self.openstack_spl.Quotas.storage).usage, 0)
def test_project_quotas_updated_when_instance_is_deleted(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
instance.delete()
self.assertEqual(self.project.quotas.get(name='os_cpu_count').usage, 0)
self.assertEqual(self.project.quotas.get(name='os_ram_size').usage, 0)
self.assertEqual(self.project.quotas.get(name='os_storage_size').usage, 0)
def test_customer_quotas_updated_when_instance_is_deleted(self):
response = self.client.post(self.url, self.get_valid_data())
instance = models.Instance.objects.get(uuid=response.data['uuid'])
instance.delete()
self.assertEqual(self.customer.quotas.get(name='os_cpu_count').usage, 0)
self.assertEqual(self.customer.quotas.get(name='os_ram_size').usage, 0)
self.assertEqual(self.customer.quotas.get(name='os_storage_size').usage, 0)
@data('storage', 'ram', 'vcpu')
def test_instance_cannot_be_created_if_service_project_link_quota_has_been_exceeded(self, quota):
payload = self.get_valid_data()
self.openstack_spl.set_quota_limit(quota, 0)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@data('instances')
def test_quota_validation(self, quota_name):
self.openstack_settings.quotas.filter(name=quota_name).update(limit=0)
response = self.client.post(self.url, self.get_valid_data())
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_provision_instance(self):
response = self.client.post(self.url, self.get_valid_data())
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
def test_user_can_define_instance_subnets(self):
subnet = self.openstack_tenant_fixture.subnet
data = self.get_valid_data(internal_ips_set=[{'subnet': factories.SubNetFactory.get_url(subnet)}])
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertTrue(models.InternalIP.objects.filter(subnet=subnet, instance=instance).exists())
def test_user_cannot_assign_subnet_from_other_settings_to_instance(self):
data = self.get_valid_data(internal_ips_set=[{'subnet': factories.SubNetFactory.get_url()}])
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_define_instance_floating_ips(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
floating_ip = self.openstack_tenant_fixture.floating_ip
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertIn(floating_ip, instance.floating_ips)
def test_user_cannot_assign_floating_ip_from_other_settings_to_instance(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
floating_ip = factories.FloatingIPFactory()
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_cannot_assign_floating_ip_to_disconnected_subnet(self):
disconnected_subnet = factories.SubNetFactory(
settings=self.openstack_tenant_fixture.openstack_tenant_service_settings)
disconnected_subnet_url = factories.SubNetFactory.get_url(disconnected_subnet)
floating_ip = self.openstack_tenant_fixture.floating_ip
data = self.get_valid_data(
floating_ips=[{'subnet': disconnected_subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_cannot_use_floating_ip_assigned_to_other_instance(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
internal_ip = factories.InternalIPFactory(subnet=self.subnet)
floating_ip = factories.FloatingIPFactory(
settings=self.openstack_settings,
runtime_state='ACTIVE',
internal_ip=internal_ip
)
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('floating_ips', response.data)
def test_user_can_assign_active_floating_ip(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
floating_ip = factories.FloatingIPFactory(settings=self.openstack_settings, runtime_state='ACTIVE')
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url, 'url': factories.FloatingIPFactory.get_url(floating_ip)}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_user_can_allocate_floating_ip(self):
subnet_url = factories.SubNetFactory.get_url(self.subnet)
self.openstack_tenant_fixture.floating_ip.status = 'ACTIVE'
self.openstack_tenant_fixture.floating_ip.save()
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
instance = models.Instance.objects.get(uuid=response.data['uuid'])
self.assertEqual(instance.floating_ips.count(), 1)
def test_user_cannot_allocate_floating_ip_if_quota_limit_is_reached(self):
self.openstack_settings.quotas.filter(name=self.openstack_settings.Quotas.floating_ip_count).update(limit=0)
subnet_url = factories.SubNetFactory.get_url(self.subnet)
self.openstack_tenant_fixture.floating_ip.status = 'ACTIVE'
self.openstack_tenant_fixture.floating_ip.save()
data = self.get_valid_data(
floating_ips=[{'subnet': subnet_url}],
)
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_cannot_create_instance_without_internal_ips(self):
data = self.get_valid_data()
del data['internal_ips_set']
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('internal_ips_set', response.data)
class InstanceDeleteTest(test_backend.BaseBackendTestCase):
def setUp(self):
super(InstanceDeleteTest, self).setUp()
self.instance = factories.InstanceFactory(
state=models.Instance.States.OK,
runtime_state=models.Instance.RuntimeStates.SHUTOFF,
backend_id='VALID_ID'
)
self.instance.increase_backend_quotas_usage()
self.mocked_nova().servers.get.side_effect = nova_exceptions.NotFound(code=404)
views.InstanceViewSet.async_executor = False
def tearDown(self):
super(InstanceDeleteTest, self).tearDown()
views.InstanceViewSet.async_executor = True
def mock_volumes(self, delete_data_volume=True):
self.data_volume = self.instance.volumes.get(bootable=False)
self.data_volume.backend_id = 'DATA_VOLUME_ID'
self.data_volume.state = models.Volume.States.OK
self.data_volume.save()
self.data_volume.increase_backend_quotas_usage()
self.system_volume = self.instance.volumes.get(bootable=True)
self.system_volume.backend_id = 'SYSTEM_VOLUME_ID'
self.system_volume.state = models.Volume.States.OK
self.system_volume.save()
self.system_volume.increase_backend_quotas_usage()
def get_volume(backend_id):
if not delete_data_volume and backend_id == self.data_volume.backend_id:
mocked_volume = mock.Mock()
mocked_volume.status = 'available'
return mocked_volume
raise cinder_exceptions.NotFound(code=404)
self.mocked_cinder().volumes.get.side_effect = get_volume
def delete_instance(self, query_params=None):
staff = structure_factories.UserFactory(is_staff=True)
self.client.force_authenticate(user=staff)
url = factories.InstanceFactory.get_url(self.instance)
if query_params:
url += '?' + urllib.parse.urlencode(query_params)
with override_settings(CELERY_ALWAYS_EAGER=True, CELERY_EAGER_PROPAGATES_EXCEPTIONS=True):
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED, response.data)
def assert_quota_usage(self, quotas, name, value):
self.assertEqual(quotas.get(name=name).usage, value)
def test_nova_methods_are_called_if_instance_is_deleted_with_volumes(self):
self.mock_volumes(True)
self.delete_instance()
nova = self.mocked_nova()
nova.servers.delete.assert_called_once_with(self.instance.backend_id)
nova.servers.get.assert_called_once_with(self.instance.backend_id)
self.assertFalse(nova.volumes.delete_server_volume.called)
def test_database_models_deleted(self):
self.mock_volumes(True)
self.delete_instance()
self.assertFalse(models.Instance.objects.filter(id=self.instance.id).exists())
for volume in self.instance.volumes.all():
self.assertFalse(models.Volume.objects.filter(id=volume.id).exists())
def test_quotas_updated_if_instance_is_deleted_with_volumes(self):
self.mock_volumes(True)
self.delete_instance()
self.instance.service_project_link.service.settings.refresh_from_db()
quotas = self.instance.service_project_link.service.settings.quotas
self.assert_quota_usage(quotas, 'instances', 0)
self.assert_quota_usage(quotas, 'vcpu', 0)
self.assert_quota_usage(quotas, 'ram', 0)
self.assert_quota_usage(quotas, 'volumes', 0)
self.assert_quota_usage(quotas, 'storage', 0)
def test_backend_methods_are_called_if_instance_is_deleted_without_volumes(self):
self.mock_volumes(False)
self.delete_instance({
'delete_volumes': False
})
nova = self.mocked_nova()
nova.volumes.delete_server_volume.assert_called_once_with(
self.instance.backend_id, self.data_volume.backend_id)
nova.servers.delete.assert_called_once_with(self.instance.backend_id)
nova.servers.get.assert_called_once_with(self.instance.backend_id)
def test_system_volume_is_deleted_but_data_volume_exists(self):
self.mock_volumes(False)
self.delete_instance({
'delete_volumes': False
})
self.assertFalse(models.Instance.objects.filter(id=self.instance.id).exists())
self.assertTrue(models.Volume.objects.filter(id=self.data_volume.id).exists())
self.assertFalse(models.Volume.objects.filter(id=self.system_volume.id).exists())
def test_quotas_updated_if_instance_is_deleted_without_volumes(self):
self.mock_volumes(False)
self.delete_instance({
'delete_volumes': False
})
settings = self.instance.service_project_link.service.settings
settings.refresh_from_db()
self.assert_quota_usage(settings.quotas, 'instances', 0)
self.assert_quota_usage(settings.quotas, 'vcpu', 0)
self.assert_quota_usage(settings.quotas, 'ram', 0)
self.assert_quota_usage(settings.quotas, 'volumes', 1)
self.assert_quota_usage(settings.quotas, 'storage', self.data_volume.size)
def test_instance_cannot_be_deleted_if_it_has_backups(self):
self.instance = factories.InstanceFactory(
state=models.Instance.States.OK,
runtime_state=models.Instance.RuntimeStates.SHUTOFF,
backend_id='VALID_ID'
)
staff = structure_factories.UserFactory(is_staff=True)
self.client.force_authenticate(user=staff)
factories.BackupFactory(instance=self.instance, state=models.Backup.States.OK)
url = factories.InstanceFactory.get_url(self.instance)
response = self.client.delete(url)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT, response.data)
def test_neutron_methods_are_called_if_instance_is_deleted_with_floating_ips(self):
fixture = fixtures.OpenStackTenantFixture()
internal_ip = factories.InternalIPFactory.create(instance=self.instance, subnet=fixture.subnet)
settings = self.instance.service_project_link.service.settings
floating_ip = factories.FloatingIPFactory.create(internal_ip=internal_ip, settings=settings)
self.delete_instance({'release_floating_ips': True})
self.mocked_neutron().delete_floatingip.assert_called_once_with(floating_ip.backend_id)
def test_neutron_methods_are_not_called_if_instance_does_not_have_any_floating_ips_yet(self):
self.delete_instance({'release_floating_ips': True})
self.assertEqual(self.mocked_neutron().delete_floatingip.call_count, 0)
def test_neutron_methods_are_not_called_if_user_did_not_ask_for_floating_ip_removal_explicitly(self):
self.mocked_neutron().show_floatingip.return_value = {'floatingip': {'status': 'DOWN'}}
fixture = fixtures.OpenStackTenantFixture()
internal_ip = factories.InternalIPFactory.create(instance=self.instance, subnet=fixture.subnet)
settings = self.instance.service_project_link.service.settings
factories.FloatingIPFactory.create(internal_ip=internal_ip, settings=settings)
self.delete_instance({'release_floating_ips': False})
self.assertEqual(self.mocked_neutron().delete_floatingip.call_count, 0)
class InstanceCreateBackupSchedule(test.APITransactionTestCase):
action_name = 'create_backup_schedule'
def setUp(self):
self.user = structure_factories.UserFactory.create(is_staff=True)
self.client.force_authenticate(user=self.user)
backupable = factories.InstanceFactory(state=models.Instance.States.OK)
self.create_url = factories.InstanceFactory.get_url(backupable, action=self.action_name)
self.backup_schedule_data = {
'name': 'test schedule',
'retention_time': 3,
'schedule': '0 * * * *',
'maximal_number_of_resources': 3,
}
def test_staff_can_create_backup_schedule(self):
response = self.client.post(self.create_url, self.backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['retention_time'], self.backup_schedule_data['retention_time'])
self.assertEqual(
response.data['maximal_number_of_resources'], self.backup_schedule_data['maximal_number_of_resources'])
self.assertEqual(response.data['schedule'], self.backup_schedule_data['schedule'])
def test_backup_schedule_default_state_is_OK(self):
response = self.client.post(self.create_url, self.backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
backup_schedule = models.BackupSchedule.objects.first()
self.assertIsNotNone(backup_schedule)
self.assertEqual(backup_schedule.state, backup_schedule.States.OK)
def test_backup_schedule_can_not_be_created_with_wrong_schedule(self):
# wrong schedule:
self.backup_schedule_data['schedule'] = 'wrong schedule'
response = self.client.post(self.create_url, self.backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('schedule', response.content)
def test_backup_schedule_creation_with_correct_timezone(self):
backupable = factories.InstanceFactory(state=models.Instance.States.OK)
create_url = factories.InstanceFactory.get_url(backupable, action=self.action_name)
backup_schedule_data = {
'name': 'test schedule',
'retention_time': 3,
'schedule': '0 * * * *',
'timezone': 'Europe/London',
'maximal_number_of_resources': 3,
}
response = self.client.post(create_url, backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['timezone'], 'Europe/London')
def test_backup_schedule_creation_with_incorrect_timezone(self):
backupable = factories.InstanceFactory(state=models.Instance.States.OK)
create_url = factories.InstanceFactory.get_url(backupable, action=self.action_name)
backup_schedule_data = {
'name': 'test schedule',
'retention_time': 3,
'schedule': '0 * * * *',
'timezone': 'incorrect',
'maximal_number_of_resources': 3,
}
response = self.client.post(create_url, backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('timezone', response.data)
def test_backup_schedule_creation_with_default_timezone(self):
backupable = factories.InstanceFactory(state=models.Instance.States.OK)
create_url = factories.InstanceFactory.get_url(backupable, action=self.action_name)
backup_schedule_data = {
'name': 'test schedule',
'retention_time': 3,
'schedule': '0 * * * *',
'maximal_number_of_resources': 3,
}
response = self.client.post(create_url, backup_schedule_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(response.data['timezone'], settings.TIME_ZONE)
class InstanceUpdateInternalIPsSetTest(test.APITransactionTestCase):
action_name = 'update_internal_ips_set'
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
self.client.force_authenticate(user=self.fixture.admin)
self.instance = self.fixture.instance
self.url = factories.InstanceFactory.get_url(self.instance, action=self.action_name)
def test_user_can_update_instance_internal_ips_set(self):
# instance had 2 internal IPs
ip_to_keep = factories.InternalIPFactory(instance=self.instance, subnet=self.fixture.subnet)
ip_to_delete = factories.InternalIPFactory(instance=self.instance)
# instance should be connected to new subnet
subnet_to_connect = factories.SubNetFactory(settings=self.fixture.openstack_tenant_service_settings)
response = self.client.post(self.url, data={
'internal_ips_set': [
{'subnet': factories.SubNetFactory.get_url(self.fixture.subnet)},
{'subnet': factories.SubNetFactory.get_url(subnet_to_connect)},
]
})
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue(self.instance.internal_ips_set.filter(pk=ip_to_keep.pk).exists())
self.assertFalse(self.instance.internal_ips_set.filter(pk=ip_to_delete.pk).exists())
self.assertTrue(self.instance.internal_ips_set.filter(subnet=subnet_to_connect).exists())
def test_user_cannot_add_intenal_ip_from_different_settings(self):
subnet = factories.SubNetFactory()
response = self.client.post(self.url, data={
'internal_ips_set': [
{'subnet': factories.SubNetFactory.get_url(subnet)},
]
})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.instance.internal_ips_set.filter(subnet=subnet).exists())
def test_user_cannot_connect_instance_to_one_subnet_twice(self):
response = self.client.post(self.url, data={
'internal_ips_set': [
{'subnet': factories.SubNetFactory.get_url(self.fixture.subnet)},
{'subnet': factories.SubNetFactory.get_url(self.fixture.subnet)},
]
})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertFalse(self.instance.internal_ips_set.filter(subnet=self.fixture.subnet).exists())
class InstanceUpdateFloatingIPsTest(test.APITransactionTestCase):
action_name = 'update_floating_ips'
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
self.fixture.openstack_tenant_service_settings.options = {'external_network_id': uuid.uuid4().hex}
self.fixture.openstack_tenant_service_settings.save()
self.client.force_authenticate(user=self.fixture.admin)
self.instance = self.fixture.instance
factories.InternalIPFactory.create(instance=self.instance, subnet=self.fixture.subnet)
self.url = factories.InstanceFactory.get_url(self.instance, action=self.action_name)
self.subnet_url = factories.SubNetFactory.get_url(self.fixture.subnet)
def test_user_can_update_instance_floating_ips(self):
floating_ip_url = factories.FloatingIPFactory.get_url(self.fixture.floating_ip)
data = {
'floating_ips': [
{'subnet': self.subnet_url, 'url': floating_ip_url},
]
}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(self.instance.floating_ips.count(), 1)
self.assertIn(self.fixture.floating_ip, self.instance.floating_ips)
def test_when_floating_ip_is_attached_action_details_are_updated(self):
floating_ip_url = factories.FloatingIPFactory.get_url(self.fixture.floating_ip)
data = {
'floating_ips': [
{'subnet': self.subnet_url, 'url': floating_ip_url},
]
}
self.client.post(self.url, data=data)
self.instance.refresh_from_db()
self.assertEqual(self.instance.action_details, {
'message': 'Attached floating IPs: %s.' % self.fixture.floating_ip.address,
'attached': [self.fixture.floating_ip.address],
'detached': [],
})
def test_when_floating_ip_is_detached_action_details_are_updated(self):
self.fixture.floating_ip.internal_ip = self.instance.internal_ips_set.first()
self.fixture.floating_ip.save()
self.client.post(self.url, data={
'floating_ips': []
})
self.instance.refresh_from_db()
self.assertEqual(self.instance.action_details, {
'message': 'Detached floating IPs: %s.' % self.fixture.floating_ip.address,
'attached': [],
'detached': [self.fixture.floating_ip.address],
})
def test_user_can_not_assign_floating_ip_used_by_other_instance(self):
internal_ip = factories.InternalIPFactory(subnet=self.fixture.subnet)
floating_ip = factories.FloatingIPFactory(
settings=self.fixture.openstack_tenant_service_settings,
runtime_state='DOWN',
internal_ip=internal_ip,
)
floating_ip_url = factories.FloatingIPFactory.get_url(floating_ip)
data = {
'floating_ips': [
{'subnet': self.subnet_url, 'url': floating_ip_url},
]
}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('floating_ips', response.data)
def test_user_cannot_add_floating_ip_via_subnet_that_is_not_connected_to_instance(self):
subnet_url = factories.SubNetFactory.get_url()
data = {'floating_ips': [{'subnet': subnet_url}]}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_can_remove_floating_ip_from_instance(self):
self.fixture.floating_ip.internal_ip = self.instance.internal_ips_set.first()
self.fixture.floating_ip.save()
data = {'floating_ips': []}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEqual(self.instance.floating_ips.count(), 0)
def test_free_floating_ip_is_used_for_allocation(self):
external_network_id = self.fixture.openstack_tenant_service_settings.options['external_network_id']
self.fixture.floating_ip.backend_network_id = external_network_id
self.fixture.floating_ip.save()
data = {'floating_ips': [{'subnet': self.subnet_url}]}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertIn(self.fixture.floating_ip, self.instance.floating_ips)
def test_user_cannot_use_same_subnet_twice(self):
data = {'floating_ips': [{'subnet': self.subnet_url}, {'subnet': self.subnet_url}]}
response = self.client.post(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class InstanceBackupTest(test.APITransactionTestCase):
action_name = 'backup'
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
self.client.force_authenticate(self.fixture.owner)
def test_backup_can_be_created_for_instance_with_2_volumes(self):
url = factories.InstanceFactory.get_url(self.fixture.instance, action='backup')
payload = self.get_payload()
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.Backup.objects.get(name=payload['name']).snapshots.count(), 2)
def test_backup_can_be_created_for_instance_only_with_system_volume(self):
instance = self.fixture.instance
instance.volumes.filter(bootable=False).delete()
url = factories.InstanceFactory.get_url(instance, action='backup')
payload = self.get_payload()
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(models.Backup.objects.get(name=payload['name']).snapshots.count(), 1)
def test_backup_can_be_created_for_instance_with_3_volumes(self):
instance = self.fixture.instance
instance.volumes.add(factories.VolumeFactory(service_project_link=instance.service_project_link))
url = factories.InstanceFactory.get_url(instance, action='backup')
payload = self.get_payload()
response = self.client.post(url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
self.assertEqual(models.Backup.objects.get(name=payload['name']).snapshots.count(), 3)
def test_user_cannot_backup_unstable_instance(self):
instance = self.fixture.instance
instance.state = models.Instance.States.UPDATING
instance.save()
url = factories.InstanceFactory.get_url(instance, action='backup')
response = self.client.post(url, data={'name': 'test backup'})
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def get_payload(self):
return {
'name': 'backup_name'
}
class BaseInstanceImportTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.OpenStackTenantFixture()
def _generate_backend_instances(self, count=1):
instances = []
for i in range(count):
instance = factories.InstanceFactory()
instance.delete()
instances.append(instance)
return instances
class InstanceImportableResourcesTest(BaseInstanceImportTest):
def setUp(self):
super(InstanceImportableResourcesTest, self).setUp()
self.url = factories.InstanceFactory.get_list_url('importable_resources')
self.client.force_authenticate(self.fixture.owner)
@mock.patch('waldur_openstack.openstack_tenant.backend.OpenStackTenantBackend.get_instances_for_import')
def test_importable_instances_are_returned(self, get_instances_for_import_mock):
backend_instances = self._generate_backend_instances()
get_instances_for_import_mock.return_value = backend_instances
data = {'service_project_link': factories.OpenStackTenantServiceProjectLinkFactory.get_url(self.fixture.spl)}
response = self.client.get(self.url, data=data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEquals(len(response.data), len(backend_instances))
returned_backend_ids = [item['backend_id'] for item in response.data]
expected_backend_ids = [item.backend_id for item in backend_instances]
self.assertItemsEqual(returned_backend_ids, expected_backend_ids)
get_instances_for_import_mock.assert_called()
class InstanceImportTest(BaseInstanceImportTest):
def setUp(self):
super(InstanceImportTest, self).setUp()
self.url = factories.InstanceFactory.get_list_url('import_resource')
self.client.force_authenticate(self.fixture.owner)
def _get_payload(self, backend_id):
return {
'backend_id': backend_id,
'service_project_link': factories.OpenStackTenantServiceProjectLinkFactory.get_url(self.fixture.spl),
}
@mock.patch('waldur_openstack.openstack_tenant.executors.InstancePullExecutor.execute')
@mock.patch('waldur_openstack.openstack_tenant.backend.OpenStackTenantBackend.import_instance')
def test_instance_can_be_imported(self, import_instance_mock, resource_import_execute_mock):
backend_id = 'backend_id'
def import_instance(backend_id, save, service_project_link):
return self._generate_backend_instances()[0]
import_instance_mock.side_effect = import_instance
payload = self._get_payload(backend_id)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.data)
resource_import_execute_mock.assert_called()
def test_existing_instance_cannot_be_imported(self):
payload = self._get_payload(factories.InstanceFactory().backend_id)
response = self.client.post(self.url, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.data)
|
python
|
##########################################################################
#
# Copyright (c) 2008-2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
class RandomTest( unittest.TestCase ) :
def testCosineHemisphere( self ) :
r = IECore.Rand32()
v = r.cosineHemispherefVector( 1000 )
for i in range( 0, v.size() ) :
self.assert_( v[i].z >= 0 )
self.assertAlmostEqual( v[i].length(), 1, 6 )
def testBarycentric( self ) :
r = IECore.Rand32()
f = r.barycentricf()
self.assert_( ( f[0] + f[1] + f[2] ) == 1.0 )
d = r.barycentricd()
self.assert_( ( d[0] + d[1] + d[2] ) == 1.0 )
fvs = r.barycentricfVector( IECore.IntVectorData( [ 1, 2, 3, 4, 5 ] ) )
for i in range( 0, fvs.size() ) :
self.assert_( ( fvs[i][0] + fvs[i][1] + fvs[i][2] ) == 1.0 )
fv = r.barycentricfVector( 5 )
for i in range( 0, fv.size() ) :
self.assert_( ( fv[i][0] + fv[i][1] + fv[i][2] ) == 1.0 )
dvs = r.barycentricdVector( IECore.IntVectorData( [ 1, 2, 3, 4, 5 ] ) )
for i in range( 0, dvs.size() ) :
self.assert_( ( dvs[i][0] + dvs[i][1] + dvs[i][2] ) == 1.0 )
dv = r.barycentricdVector( 5 )
for i in range( 0, dv.size() ) :
self.assert_( ( dv[i][0] + dv[i][1] + dv[i][2] ) == 1.0 )
if __name__ == "__main__":
unittest.main()
|
python
|
# -*- coding: utf-8 -*-
import codecs
import numpy as np
import tensorflow as tf
from Transformer.config.hyperparams import Hyperparams as pm
class Data_helper(object):
def __init__(self):
self.pointer = 0
def mini_batch(self):
X, Y = self.load_train_datasets()
num_batch = len(X) // pm.batch_size
X = tf.convert_to_tensor(X, tf.int32)
Y = tf.convert_to_tensor(Y, tf.int32)
# Input Queue by CPU
input_queues = tf.train.slice_input_producer([X, Y])
# Get mini batch from Queue
x, y = tf.train.shuffle_batch(input_queues,
num_threads=8,
batch_size=pm.batch_size,
capacity=pm.batch_size * 64, # Max_number of batches in queue
min_after_dequeue=pm.batch_size * 32, # Min_number of batches in queue after dequeue
allow_smaller_final_batch=False)
return x, y, num_batch
def load_train_datasets(self):
de_sents = [line for line in codecs.open(pm.source_train, 'r', 'utf-8').read().split("\n") if line]
en_sents = [line for line in codecs.open(pm.target_train, 'r', 'utf-8').read().split("\n") if line]
x, y, sources, targets = self.generate(de_sents, en_sents)
return x, y
def load_test_datasets(self):
de_sents = [line for line in codecs.open(pm.source_test, 'r', 'utf-8').read().split("\n") if line]
en_sents = [line for line in codecs.open(pm.target_test, 'r', 'utf-8').read().split("\n") if line]
x, y, sources, targets = self.generate(de_sents, en_sents)
return x, sources, targets
def generate(self, source_sents, target_sents):
de2idx, idx2de = self.load_vocab(pm.DECODER_VOCAB)
en2idx, idx2en = self.load_vocab(pm.ENCODER_VOCAB)
x_list, y_list, Sources, Targets = [], [], [], []
for source_sent, target_sent in zip(source_sents, target_sents):
x = [de2idx.get(word, 1) for word in (source_sent + " <EOS>").split()]
y = [en2idx.get(word, 1) for word in (target_sent + " <EOS>").split()]
if max(len(x), len(y)) <= pm.maxlen:
x_list.append(np.array(x))
y_list.append(np.array(y))
Sources.append(source_sent)
Targets.append(target_sent)
# Padding 0(<PAD>)
x_np = np.zeros([len(x_list), pm.maxlen], np.int32)
y_np = np.zeros([len(y_list), pm.maxlen], np.int32)
for i, (x, y) in enumerate(zip(x_list, y_list)):
x_np[i] = np.lib.pad(x, [0, pm.maxlen - len(x)], 'constant', constant_values=(0, 0))
y_np[i] = np.lib.pad(y, [0, pm.maxlen - len(y)], 'constant', constant_values=(0, 0))
return x_np, y_np, Sources, Targets
def load_vocab(self, file):
vocab = [line.split()[0] for line in codecs.open(file, 'r', encoding='utf-8').read().splitlines() if int(line.split()[1]) >= pm.min_cnt]
word2idx = {word: idx for idx, word in enumerate(vocab)}
idx2word = {word2idx[word]: word for word in word2idx}
return word2idx, idx2word
def next(self, X, Sources, Targets, num_batch):
x = X[self.pointer * pm.batch_size: (self.pointer + 1) * pm.batch_size]
sources = Sources[self.pointer * pm.batch_size: (self.pointer + 1) * pm.batch_size]
targets = Targets[self.pointer * pm.batch_size: (self.pointer + 1) * pm.batch_size]
self.pointer = (self.pointer + 1) % num_batch
return x, sources, targets
def reset_pointer(self):
self.pointer = 0
|
python
|
import os
import glob
def delete_given_file(image_name):
file_name = image_name.split(".")[0]
IMG_PATH = "./annotated_dataset/img"
TXT_PATH = "./annotated_dataset/txt_label"
XML_PATH = "./annotated_dataset/xml_label"
img_file = f"{IMG_PATH}/{file_name}.jpg"
txt_file = f"{TXT_PATH}/{file_name}.txt"
xml_file = f"{XML_PATH}/{file_name}.xml"
files_list = [img_file, txt_file, xml_file]
int_files = len(files_list)
try:
for item in files_list:
os.remove(item)
if int_files == 3:
print(f"removed {int_files} file of {file_name}")
except:
print(f"FAIL: removing {file_name} failed")
def remove_img_file(image_name):
file_name = image_name.split(".")[0]
IMG_PATH = "./unlabeled-imgs"
img_file = f"{IMG_PATH}/{file_name}.jpg"
try:
os.remove(img_file)
print(f"removed {img_file} file")
except:
print(f"FAIL: removing {img_file} failed")
# delete_given_file("naver_0074.jpg")
removable_file_list = [
"naver_0615.jpg",
"naver_0508.jpg",
"naver_0353.jpg",
"naver_0677.jpg",
"naver_0592.jpg",
"naver_0443.jpg",
"naver_0904.jpg",
"naver_0460.jpg",
"naver_0832.jpg",
"naver_0388.jpg",
"naver_0408.jpg",
"naver_0513.jpg",
"naver_0429.jpg",
]
for file_item in removable_file_list:
# delete_given_file(file_item)
remove_img_file(file_item)
|
python
|
#!/usr/bin/env python
# Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository
# <https://github.com/boschresearch/amira-blender-rendering>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bpy
from mathutils import Vector
from amira_blender_rendering.utils.blender import clear_orphaned_materials, remove_material_nodes, add_default_material
from amira_blender_rendering.utils import material as mutil
# from amira_blender_rendering.utils.logging import get_logger
# TODO: change into MaterialNodesMetalToolCap class
# TODO: it is really tedious and error-prone to set up materials this way. We
# should invest the time to write a blender plugin that generates
# python-code for us, or loads node setups from a configuration file, or
# something along the lines...
def setup_material(material: bpy.types.Material, empty: bpy.types.Object = None):
"""Setup material nodes for the metal tool cap"""
# TODO: refactor into smaller node-creation functions that can be re-used elsewhere
# logger = get_logger()
tree = material.node_tree
nodes = tree.nodes
# check if we have default nodes
n_output, n_bsdf = mutil.check_default_material(material)
# set BSDF default values
n_bsdf.inputs['Subsurface'].default_value = 0.6
n_bsdf.inputs['Subsurface Color'].default_value = (0.8, 0.444, 0.444, 1.0)
n_bsdf.inputs['Metallic'].default_value = 1.0
# thin metallic surface lines (used primarily for normal/bump map computation)
n_texcoord_bump = nodes.new('ShaderNodeTexCoord')
# setup empty (reference for distance computations)
if empty is None:
# get currently selected object
obj = bpy.context.object
# add empty
bpy.ops.object.empty_add(type='PLAIN_AXES')
empty = bpy.context.object
# locate at the top of the object
v0 = Vector(obj.bound_box[1])
v1 = Vector(obj.bound_box[2])
v2 = Vector(obj.bound_box[5])
v3 = Vector(obj.bound_box[6])
empty.location = (v0 + v1 + v2 + v3) / 4
# rotate into object space. afterwards we'll have linkage via parenting
empty.location = obj.matrix_world @ empty.location
# copy rotation
empty.rotation_euler = obj.rotation_euler
# deselect all
bpy.ops.object.select_all(action='DESELECT')
# take care to re-select everything
empty.select_set(state=True)
obj.select_set(state=True)
# make obj active again (will become parent of all selected objects)
bpy.context.view_layer.objects.active = obj
# make parent, keep transform
bpy.ops.object.parent_set(type='OBJECT', xmirror=False, keep_transform=True)
# set the empty as input for the texture
n_texcoord_bump.object = empty
# (dot)^2 (distance from empty)
n_dot = nodes.new('ShaderNodeVectorMath')
n_dot.operation = 'DOT_PRODUCT'
tree.links.new(n_texcoord_bump.outputs['Object'], n_dot.inputs[0])
tree.links.new(n_texcoord_bump.outputs['Object'], n_dot.inputs[1])
n_pow = nodes.new('ShaderNodeMath')
n_pow.operation = 'POWER'
tree.links.new(n_dot.outputs[1], n_pow.inputs[0])
# mapping input from empty to noise
n_mapping = nodes.new('ShaderNodeMapping')
tree.links.new(n_texcoord_bump.outputs['Object'], n_mapping.inputs[0])
# generate and link up required noise textures
n_noise0 = nodes.new('ShaderNodeTexNoise')
n_noise0.inputs['Scale'].default_value = 1.0
n_noise0.inputs['Detail'].default_value = 1.0
n_noise0.inputs['Distortion'].default_value = 2.0
tree.links.new(n_pow.outputs[0], n_noise0.inputs[0])
n_noise1 = nodes.new('ShaderNodeTexNoise')
n_noise1.inputs['Scale'].default_value = 300.0
n_noise1.inputs['Detail'].default_value = 0.0
n_noise1.inputs['Distortion'].default_value = 0.0
tree.links.new(n_pow.outputs[0], n_noise1.inputs[0])
# XXX: is this noise required?
n_noise2 = nodes.new('ShaderNodeTexNoise')
n_noise2.inputs['Scale'].default_value = 0.0
n_noise2.inputs['Detail'].default_value = 0.0
n_noise2.inputs['Distortion'].default_value = 0.1
tree.links.new(n_mapping.outputs['Vector'], n_noise2.inputs[0])
n_noise3 = nodes.new('ShaderNodeTexNoise')
n_noise3.inputs['Scale'].default_value = 5.0
n_noise3.inputs['Detail'].default_value = 2.0
n_noise3.inputs['Distortion'].default_value = 0.0
tree.links.new(n_mapping.outputs['Vector'], n_noise3.inputs[0])
# color output
n_colorramp_col = nodes.new('ShaderNodeValToRGB')
n_colorramp_col.color_ramp.color_mode = 'RGB'
n_colorramp_col.color_ramp.interpolation = 'LINEAR'
n_colorramp_col.color_ramp.elements[0].position = 0.118
n_colorramp_col.color_ramp.elements[1].position = 0.727
tree.links.new(n_noise0.outputs['Fac'], n_colorramp_col.inputs['Fac'])
n_output_color = nodes.new('ShaderNodeMixRGB')
n_output_color.inputs['Fac'].default_value = 0.400
n_output_color.inputs['Color1'].default_value = (0.485, 0.485, 0.485, 1.0)
tree.links.new(n_colorramp_col.outputs['Color'], n_output_color.inputs['Color2'])
# roughness finish
n_mul_r = nodes.new('ShaderNodeMath')
n_mul_r.operation = 'MULTIPLY'
n_mul_r.inputs[1].default_value = 0.100
tree.links.new(n_noise3.outputs['Fac'], n_mul_r.inputs[0])
n_output_roughness = nodes.new('ShaderNodeMath')
n_output_roughness.operation = 'ADD'
n_output_roughness.inputs[1].default_value = 0.050
tree.links.new(n_mul_r.outputs[0], n_output_roughness.inputs[0])
# math nodes to mix noise with distance and get ring-effect (modulo), leading to bump map
n_add0 = nodes.new('ShaderNodeMath')
n_add0.operation = 'ADD'
tree.links.new(n_pow.outputs[0], n_add0.inputs[0])
tree.links.new(n_noise2.outputs['Fac'], n_add0.inputs[1])
n_mul0 = nodes.new('ShaderNodeMath')
n_mul0.operation = 'MULTIPLY'
n_mul0.inputs[1].default_value = 300.000
tree.links.new(n_add0.outputs[0], n_mul0.inputs[0])
n_mod0 = nodes.new('ShaderNodeMath')
n_mod0.operation = 'MODULO'
n_mod0.inputs[1].default_value = 2.000
tree.links.new(n_mul0.outputs[0], n_mod0.inputs[0])
n_mul1 = nodes.new('ShaderNodeMath')
n_mul1.operation = 'MULTIPLY'
tree.links.new(n_noise1.outputs['Fac'], n_mul1.inputs[0])
tree.links.new(n_mod0.outputs[0], n_mul1.inputs[1])
n_min_n = nodes.new('ShaderNodeMath')
n_min_n.operation = 'MINIMUM'
tree.links.new(n_noise1.outputs['Fac'], n_min_n.inputs[0])
tree.links.new(n_mul1.outputs[0], n_min_n.inputs[1])
n_colorramp_rough = nodes.new('ShaderNodeValToRGB')
n_colorramp_rough.color_ramp.color_mode = 'RGB'
n_colorramp_rough.color_ramp.interpolation = 'LINEAR'
n_colorramp_rough.color_ramp.elements[0].position = 0.159
n_colorramp_rough.color_ramp.elements[1].position = 0.541
tree.links.new(n_min_n.outputs[0], n_colorramp_rough.inputs[0])
n_output_normal = nodes.new('ShaderNodeBump')
n_output_normal.inputs['Strength'].default_value = 0.075
n_output_normal.inputs['Distance'].default_value = 1.000
tree.links.new(n_colorramp_rough.outputs['Color'], n_output_normal.inputs['Height'])
# output nodes:
# n_output_color -> color / outputs['Color']
# n_output_roughness -> roughness / outputs['Value']
# n_output_normal -> normal / outputs['Normal']
# hook to bsdf shader node
tree.links.new(n_output_color.outputs['Color'], n_bsdf.inputs['Base Color'])
tree.links.new(n_output_roughness.outputs['Value'], n_bsdf.inputs['Roughness'])
tree.links.new(n_output_normal.outputs['Normal'], n_bsdf.inputs['Normal'])
# TODO: this should become a unit test
def main():
"""First tear down any material assigned with the object, then create everything from scratch"""
remove_material_nodes()
clear_orphaned_materials()
mat = add_default_material()
setup_material(mat)
if __name__ == "__main__":
main()
|
python
|
"""
Transitions (Perturbation Kernels)
==================================
Perturbation strategies. The classes defined here transition the current
population to the next one. pyABC implements global and local transitions.
Proposals for the subsequent generation are generated from the current
generation density estimates of the current generations.
This is equivalent to perturbing randomly chosen particles.
These can be passed to :class:`pyabc.smc.ABCSMC` via the ``transitions``
keyword argument.
"""
from .base import Transition, DiscreteTransition
from .multivariatenormal import (MultivariateNormalTransition,
silverman_rule_of_thumb,
scott_rule_of_thumb)
from .exceptions import NotEnoughParticles
from .model_selection import GridSearchCV
from .local_transition import LocalTransition
from .randomwalk import DiscreteRandomWalkTransition
__all__ = [
"Transition",
"DiscreteTransition",
"MultivariateNormalTransition",
"GridSearchCV",
"NotEnoughParticles",
"LocalTransition",
"scott_rule_of_thumb",
"silverman_rule_of_thumb",
"DiscreteRandomWalkTransition",
]
|
python
|
from test.ga.ga import GaTestCase
from test.ga.population import PopulationTestCase
from test.ga.individual import IndividualTestCase
__all__ = (
"GaTestCase", "PopulationTestCase",
"IndividualTestCase"
)
|
python
|
def get_key():
if isinstance(self.instance,
def clean():
pass
|
python
|
from screen.drawing.color import *
from screen.drawing.color import __all__ as _color__all__
from screen.drawing.colorinterpolationmethod import *
from screen.drawing.colorinterpolationmethod import __all__ as _colorinterpolationmethod__all__
from screen.drawing.style import *
from screen.drawing.style import __all__ as _style__all__
__all__ = [
*_color__all__,
*_colorinterpolationmethod__all__,
*_style__all__,
]
|
python
|
from types import FunctionType
import backends
__storage = backends.default()
def set_storage(BackendInstance):
global __storage
__storage = BackendInstance
def make_cached(make_key, f):
def cached(*args, **kwargs):
cache_key = make_key(args=args, kwargs=kwargs)
if __storage.has(cache_key):
return __storage.get(cache_key)
value = f(*args, **kwargs)
__storage.set(cache_key, value)
return value
return cached
def cache_function(function_or_key):
key = 'function:'
if type(function_or_key) is FunctionType:
"""No args to decorator makes the first arg the
function to be decorated"""
f = function_or_key
key = key + f.__name__
def make_key(args=None, kwargs=None):
return key + f.__name__ + str(args) + str(kwargs)
return make_cached(make_key, f)
else:
"""Arguments have been passed to the decorator.
The user wants to override automatic key creation and always
use the same, so do that here"""
key += function_or_key
def make_decorator(f):
def make_key(args=None, kwargs=None):
return key + ':' + f.__name__
return make_cached(make_key, f)
return make_decorator
__register = []
__open_queue = False
__in_init = False
__cache = {}
__next_provider = None
__update = []
def __register_update(id_, values):
__update.append((id_, values))
def do_updates():
global __update
for id_, values in __update:
__storage.set(id_, values)
__update = []
def __do_queue():
global __register
global __cache
global __open_queue
__open_queue = False
for id_, self, provider in __register:
if not __storage.has(id_):
__storage.set(id_, provider(self))
self.__cached__ = __storage.get(id_)
__register = []
def __register_class(id_, self, provider):
global __open_queue
__register.append((id_, self, provider))
__open_queue = True
def __make_id(cls, self, id_attribute):
return 'class:' + cls.__name__ + str(self.__dict__[id_attribute])
def __should_do_queue(self):
if not __open_queue:
return False
if '__in_init' in self.__dict__:
if self.__dict__['__in_init']:
return False
else:
return False
return True
def cache_class(id_attribute):
"""Cachable attributes don't have to be specified since
self.__cached__.keys() will provide all attributes that were
retrieved from cache (and could subsequently be updated).
"""
def make_class(cls):
global __next_provider
if __next_provider is None:
raise LookupError("No provider function declared. Put"
+ " the 'cache_provider' decorator on the"
+ " function that returns data for the"
+ " instance")
provider_function = __next_provider
__next_provider = None
old_init = cls.__init__
def new_init(self, *args, **kwargs):
self.__in_init = True
old_init(self, *args, **kwargs)
self.__in_init = False
__register_class(__make_id(cls, self, id_attribute),
self, provider_function)
cls.__init__ = new_init
old_getattribute = cls.__getattribute__
def new_getattribute(self, key):
if key != '__dict__' and key != '__cached__':
if __should_do_queue(self):
__do_queue()
if hasattr(self, '__cached__') and key in self.__cached__:
return self.__cached__[key]
return old_getattribute(self, key)
cls.__getattribute__ = new_getattribute
old_setattr = cls.__setattr__
def new_setattr(self, key, value):
if key != '__cache__':
if __should_do_queue(self):
__do_queue()
if hasattr(self, '__cached__'):
"""Only check for updatable cache values
when a cache dict exists"""
if not hasattr(self, '__cachable_attrs'):
self.__dict__['__cachable_attrs'] = \
self.__dict__['__cached__'].keys()
if key in self.__dict__['__cachable_attrs']:
if key != self.__dict__['__cached__'][key]:
self.__dict__['__cached__'][key] = value
__register_update(
__make_id(cls, self, id_attribute),
self.__cached__)
return
old_setattr(self, key, value)
cls.__setattr__ = new_setattr
def hasattr(self, key):
if __should_do_queue(self):
__do_queue()
if '__cache__' in self.__dict__:
if key in self.__dict__['__cache__']:
return True
if key in self.__dict__:
return True
return False
cls.__hasattr__ = hasattr
return cls
return make_class
def cache_provider(f):
global __next_provider
__next_provider = f
return f
|
python
|
"""
Created on Wed Jan 15 11:17:10 2020
@author: mesch
"""
from colorama import init, Fore, Back
init(autoreset=True) #to convert termcolor to wins color
import copy
from pyqum.instrument.benchtop import RSA5 as MXA
from pyqum.instrument.benchtop import PSGA
from pyqum.instrument.modular import AWG
from pyqum.instrument.logger import status_code
from pyqum.instrument.analyzer import curve
from numpy import sin, cos, pi, array, float64, sum, dot
# Initialize instruments:
# PSGA
saga = PSGA.Initiate()
PSGA.rfoutput(saga, action=['Set', 1])
PSGA.frequency(saga, action=['Set', "5.5" + "GHz"])
PSGA.power(saga, action=['Set', "13" + "dBm"])
# SA
mxa = MXA.Initiate()
MXA.frequency(mxa, action=['Set','5.525GHz'])
MXA.fspan(mxa, action=['Set','150MHz'])
MXA.rbw(mxa, action=['Set','1MHz'])
MXA.vbw(mxa, action=['Set','100kHz'])
MXA.trigger_source(mxa, action=['Set','EXTernal1'])
# AWG
awgsess = AWG.InitWithOptions()
AWG.Abort_Gen(awgsess)
AWG.ref_clock_source(awgsess, action=['Set',int(1)]) # External 10MHz clock-reference
AWG.predistortion_enabled(awgsess, action=['Set',True])
AWG.output_mode_adv(awgsess, action=['Set',int(2)]) # Sequence output mode
AWG.arb_sample_rate(awgsess, action=['Set',float(1250000000)]) # maximum sampling rate
AWG.active_marker(awgsess, action=['Set','3']) # master
AWG.marker_delay(awgsess, action=['Set',float(0)])
AWG.marker_pulse_width(awgsess, action=['Set',float(1e-7)])
AWG.marker_source(awgsess, action=['Set',int(7)])
samplingrate = AWG.arb_sample_rate(awgsess)[1]
dt = 1e9/samplingrate # in ns
# PRESET Output:
for ch in range(2):
channel = str(ch + 1)
AWG.output_config(awgsess, RepCap=channel, action=["Set", 0]) # Single-ended
AWG.output_filter_bandwidth(awgsess, RepCap=channel, action=["Set", 0])
AWG.arb_gain(awgsess, RepCap=channel, action=["Set", 0.5])
AWG.output_impedance(awgsess, RepCap=channel, action=["Set", 50])
# output settings:
for ch in range(2):
channel = str(ch + 1)
AWG.output_enabled(awgsess, RepCap=channel, action=["Set", int(1)]) # ON
AWG.output_filter_enabled(awgsess, RepCap=channel, action=["Set", True])
AWG.output_config(awgsess, RepCap=channel, action=["Set", int(2)]) # Amplified 1:2
AWG.output_filter_bandwidth(awgsess, RepCap=channel, action=["Set", 0])
AWG.arb_gain(awgsess, RepCap=channel, action=["Set", 0.5])
AWG.output_impedance(awgsess, RepCap=channel, action=["Set", 50])
def AWG_Sinewave(ifreq,IQparams):
'''
ifreq: IF frequency in MHz
'''
AWG.Clear_ArbMemory(awgsess)
WAVE = []
Ioffset, Qoffset, ampratio, Iphase, Qphase = IQparams
if (ampratio > -1.0) and (ampratio < 1.0):
Iamp = 1
Qamp = Iamp * ampratio
else:
Qamp = 1
Iamp = Qamp/ampratio
ifvoltag = [min(abs(Qamp),1), min(abs(Iamp),1)] # contain amplitude within 1V
iffunction = ['sin', 'cos']
iffreq = [ifreq, ifreq]
ifoffset = [Qoffset, Ioffset]
ifphase = [Qphase, Iphase]
# construct waveform:
for ch in range(2):
channel = str(ch + 1)
Nperiod = int(1000/iffreq[ch]/dt) # of points per period
Nperiod *= 8
wavefom = [ifvoltag[ch] * eval(iffunction[ch] + '(x*%s*%s/1000*2*pi + %s/180*pi)' %(dt,iffreq[ch],ifphase[ch])) + ifoffset[ch] for x in range(Nperiod)]
createdwave = AWG.CreateArbWaveform(awgsess, wavefom)
WAVE.append(createdwave[1])
# Building Sequences:
for ch in range(2):
channel = str(ch + 1)
createdseqhandl = AWG.CreateArbSequence(awgsess, [WAVE[ch]], [1]) # loop# canbe >1 if longer sequence is needed in the future!
# Channel Assignment:
AWG.arb_sequence_handle(awgsess, RepCap=channel, action=["Set", createdseqhandl[1]])
# Trigger Settings:
for ch in range(2):
channel = str(ch + 1)
AWG.operation_mode(awgsess, RepCap=channel, action=["Set", 0])
AWG.trigger_source_adv(awgsess, RepCap=channel, action=["Set", 0])
AWG.Init_Gen(awgsess)
AWG.Send_Pulse(awgsess, 1)
return
class IQ_Cal:
def __init__(self, suppression='LO', IQparams=array([0.,0.,1.,0.,0.]), STEP=array([-0.5,-0.5,0.5,12,12]), ratio=1):
self.IQparams = IQparams
self.STEP = STEP
self.suppression = suppression
if self.suppression == 'LO':
self.var = copy.copy(self.IQparams[:2])
self.step = self.STEP[:2]/(10**(ratio+1))
elif self.suppression == 'MR':
self.var = copy.copy(self.IQparams[2:])
self.step = self.STEP[2:]/(2**(ratio+1))
def nelder_mead(self, no_improve_thr=10e-6, no_improv_break=10, max_iter=0,
alpha=1., gamma=2., rho=-0.5, sigma=0.5, time=0):
'''
Pure Python/Numpy implementation of the Nelder-Mead algorithm.
Reference: https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method
'''
'''
@param f (function): function to optimize, must return a scalar score
and operate over a numpy array of the same dimensions as x_start
@param x_start (numpy array): initial position
@param step (float): look-around radius in initial step
@no_improv_thr, no_improv_break (float, int): break after no_improv_break iterations with
an improvement lower than no_improv_thr
@max_iter (int): always break after this number of iterations.
Set it to 0 to loop indefinitely.
@alpha, gamma, rho, sigma (floats): parameters of the algorithm
(see Wikipedia page for reference)
return: tuple (best parameter array, best score)
'''
index = time%2
dim = len(self.var)
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
MXA.preamp(mxa, action=['Set','OFF'])
# MXA.preamp_band(mxa, action=['Set','FULL'])
# MXA.attenuation(mxa, action=['Set','14dB'])
MXA.attenuation_auto(mxa, action=['Set','ON'])
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
prev_best = power
no_improv = 0
res = [[self.var, prev_best]]
# while True:
# print("LOPower: %s" %power)
# if bool(input('hello')): break
for i in range(dim):
x = copy.copy(self.var)
x[i] = x[i] + self.step[i]
"tell AWG to apply DC offset(x) on I & Q"
if self.suppression == 'LO': self.IQparams[:2] = x
elif self.suppression == 'MR': self.IQparams[2:] = x
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
score = power
res.append([x, score])
# simplex iter
iters = 0
while 1:
# order
res.sort(key=lambda x: x[1])
if self.suppression == 'LO': self.IQparams[:2] = res[0][0]
elif self.suppression == 'MR': self.IQparams[2:] = res[0][0]
# print(Fore.YELLOW + "\rProgress time#%s: %s" %(time, self.IQparams), end='\r', flush=True)
best = res[0][1]
# break after max_iter
if max_iter and iters >= max_iter:
return res[0]
iters += 1
# AWG_Sinewave(25, self.IQparams)
# if float((RSA5.fpower(rsa, str(5.5)+'GHz')).split('dBm')[0]) < -65. and float((RSA5.fpower(rsa, str(5.475)+'GHz')).split('dBm')[0]) < -65.:
# return array([self.IQparams, best, 0.])
if best < prev_best - no_improve_thr or best == prev_best:
no_improv = 0
prev_best = best
else:
no_improv += 1
if no_improv >= no_improv_break:
AWG_Sinewave(25, self.IQparams)
print("Rest at Optimized IQ Settings: %s" %self.IQparams)
return array([self.IQparams, best]) # Optimized parameters
# centroid
x0 = [0.] * dim
for tup in res[:-1]:
for i, c in enumerate(tup[0]):
x0[i] += c / (len(res)-1)
# reflection
xr = x0 + alpha*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xr
elif self.suppression == 'MR': self.IQparams[2:] = xr
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
rscore = power
if res[0][1] <= rscore < res[-2][1]:
del res[-1]
res.append([xr, rscore])
continue
# expansion
if rscore < res[0][1]:
xe = x0 + gamma*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xe
elif self.suppression == 'MR': self.IQparams[2:] = xe
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
escore = power
if escore < rscore:
del res[-1]
res.append([xe, escore])
continue
else:
del res[-1]
res.append([xr, rscore])
continue
# contraction
xc = x0 + rho*(x0 - res[-1][0])
if self.suppression == 'LO': self.IQparams[:2] = xc
elif self.suppression == 'MR': self.IQparams[2:] = xc
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
cscore = power
if cscore < res[-1][1]:
del res[-1]
res.append([xc, cscore])
continue
# reduction
x1 = res[0][0]
nres = []
for tup in res:
redx = x1 + sigma*(tup[0] - x1)
if self.suppression == 'LO': self.IQparams[:2] = redx
elif self.suppression == 'MR': self.IQparams[2:] = redx
"tell AWG to apply DC offset(x) on I & Q"
AWG_Sinewave(25, self.IQparams)
"read signal amplitude at LO frequency in and assign it as score"
power = float((MXA.fpower(mxa, str(5.5 - 0.025*index)+'GHz')).split('dBm')[0]) - index*float((MXA.fpower(mxa, str(5.5 + 0.025*index)+'GHz')).split('dBm')[0])
score = power
nres.append([redx, score])
res = nres
if __name__ == "__main__":
LO_0 = float((MXA.fpower(mxa, str(5.5)+'GHz')).split('dBm')[0])
Mirror_0 = float((MXA.fpower(mxa, str(5.475)+'GHz')).split('dBm')[0])
Initial = [0., 0., 1., 0., 0.]
time = 0
OPT = IQ_Cal()
OPT.IQparams = array(Initial,dtype=float64) #overwrite initial values
result = OPT.nelder_mead(time = time)
prev = result[0]
no_improv, no_improv_thr, no_improv_break = 0, 1e-5, 6
LO, Mirror, T = [], [], []
while True:
time += 1
if time%2: OPT = IQ_Cal('MR',result[0], ratio = time)
else: OPT = IQ_Cal('LO',result[0], ratio = time)
result = OPT.nelder_mead(time = time)
# if len(result) == 3:
# print("Optimized IQ parameters:\n %s" %result)
# break
LO.append(float((MXA.fpower(mxa, str(5.5)+'GHz')).split('dBm')[0]) - LO_0)
Mirror.append(float((MXA.fpower(mxa, str(5.475)+'GHz')).split('dBm')[0]) - Mirror_0)
print(Back.BLUE + Fore.WHITE + "Mirror has been suppressed for %s from %s" %(Mirror[-1],Mirror_0))
T.append(time)
ssq = sum((result[0] - prev)**2)
if ssq > no_improv_thr:
no_improv = 0
prev = result[0]
else:
no_improv += 1
if no_improv >= no_improv_break:
AWG_Sinewave(25, OPT.IQparams)
print(type(OPT.IQparams))
print("Optimized IQ parameters:\n %s" %result)
print("Amplitude Imbalance:\n %s" %OPT.IQparams[2])
if OPT.IQparams[3] > OPT.IQparams[4] and OPT.IQparams[3]-OPT.IQparams[4] < 180:
print("phase skew I-Q:\n %s" %(OPT.IQparams[3]-OPT.IQparams[4]))
if OPT.IQparams[3] > OPT.IQparams[4] and OPT.IQparams[3]-OPT.IQparams[4] > 180:
print("phase skew Q-I:\n %s" %(360-(OPT.IQparams[3]-OPT.IQparams[4])))
if (OPT.IQparams[4] > OPT.IQparams[3] and OPT.IQparams[4]-OPT.IQparams[3] < 180) or (OPT.IQparams[3] > OPT.IQparams[4] and OPT.IQparams[3]-OPT.IQparams[4] > 180):
print("phase skew Q-I:\n %s" %(OPT.IQparams[4]-OPT.IQparams[3]))
if (OPT.IQparams[2] > -1.0) and (OPT.IQparams[2] < 1.0):
Iamp = 1
Qamp = Iamp * OPT.IQparams[2]
else:
Qamp = 1
Iamp = Qamp/OPT.IQparams[2]
print("Ioffset:\n %s" %OPT.IQparams[0])
print("Qoffset:\n %s" %OPT.IQparams[1])
print("Iamp:\n %s" %Iamp)
print("Qamp:\n %s" %Qamp)
print("Iphase:\n %s" %OPT.IQparams[3])
print("Qphase:\n %s" %OPT.IQparams[4])
break
curve(T,LO,'LO Leakage vs time','T(#)','DLO(dB)')
curve(T,Mirror,'Mirror Image vs time','T(#)','DMirror(dB)')
# closing instruments:
ans = input("Press any keys to close AWG, PSGA and RSA-5 ")
AWG.Abort_Gen(awgsess)
AWG.close(awgsess)
PSGA.rfoutput(saga, action=['Set', 0])
PSGA.close(saga, False)
MXA.close(mxa,False)
|
python
|
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request, url_for
from flask_login import UserMixin, AnonymousUserMixin
from app.exceptions import ValidationError
from . import db, login_manager
class Permission:
VIEW = 0x01
SEARCH = 0x02
EDIT = 0x04
#WRITE_ARTICLES = 0x04
# MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'Visitor':(Permission.VIEW,True),
'Inneruser': (Permission.SEARCH|Permission.VIEW , False),
'Manager': (Permission.SEARCH |
Permission.EDIT , False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
#登录模块,用户创建
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
passwd=db.Column(db.String(32))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow) #创建时间
last_seen = db.Column(db.DateTime(), default=datetime.utcnow) #最后登录时间
avatar_hash = db.Column(db.String(32))
# 对应策略
bids = db.relationship('Auction_data', backref='author', lazy='dynamic') #一对一 ,lazy='immediate',uselist=False
actions = db.relationship('BID_action', backref='author', lazy='dynamic') #一对一,uselist=False
@property #这可以让你将一个类方法转变成一个类属性,表示只读。
def password(self):
raise AttributeError('password is not a readable attribute')
#散列密码
@password.setter #同时有@property和@x.setter表示可读可写,@property和@x.setter和@x.deleter表示可读可写可删除
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
#判断是否有相应权限
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
# 刷新用户最后登录时间
def ping(self):
self.last_seen = datetime.utcnow() #UTC世界时间
db.session.add(self)
###添加用户头像
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.username.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
#使用编码后的用户id 字段值生成一个签名令牌
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %r>' % self.username
#####拍牌数据库
class Auction_data(db.Model):
__tablename__ = 'bids'
id = db.Column(db.Integer, primary_key=True)
IDnumber = db.Column(db.Integer)
BIDnumber = db.Column(db.Integer)
BIDpassword = db.Column(db.Integer)
author_id = db.Column(db.Integer, db.ForeignKey('users.id')) # 对应backref
# action_id =db.Column(db.Integer, db.ForeignKey('actions.id'))
def __repr__(self):
return '<Auction %r>' % self.IDnumber
def to_json(self):
json_post = {
'IDnumber': self.IDnumber,
'BIDnumber': self.BIDnumber,
'BIDpassword': self.BIDpassword,
'author': url_for('api.get_user', id=self.author_id,
_external=True),
}
return json_post
class BID_action(db.Model):
__tablename__ = 'actions'
id = db.Column(db.Integer, primary_key=True)
diff = db.Column(db.Integer) #参考时间差价
refer_time = db.Column(db.Integer) #参考时间
bid_time = db.Column(db.Integer) #出价截止时间
delay_time = db.Column(db.Float) #出价延迟时间,0.1~0.9
ahead_price = db.Column(db.Integer) #出价提前价格
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
# auctions = db.relationship('Auction_data', backref='action', lazy='immediate') #一对一
def __repr__(self):
return '<BID %r>' % self.diff
#拍牌登录信息
class login_user(db.Model):
__tablename__='Account'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String) #用户名与User名相同
password=db.Column(db.String) #与Password相同,使用hash存储
login=db.Column(db.Integer) #登录状态
CODE=db.Column(db.String) #使用的标书号
codepsd=db.Column(db.String) #标书登录密码
ID_number=db.Column(db.Integer)
IP=db.Column #记录登录IP
MAC=db.Column(db.String) #记录登录MAC地址
COUNT=db.Column(db.Integer) #登录状态
#继承自Flask-Login 中的AnonymousUserMixin 类,并将其设为用户未登录时current_user 的值
#这样程序不用先检查用户是否登录,就能自由调用current_user.can() 和current_user.is_administrator()
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
#实现一个回调函数,使用指定的标识符加载用户
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
python
|
from typing import Any, Dict, Iterable, List
import pandas as pd
from fugue.dataframe import ArrayDataFrame
from fugue.exceptions import FugueInterfacelessError
from fugue.extensions.transformer import (
Transformer,
_to_transformer,
transformer,
register_transformer,
)
from pytest import raises
from triad.collections.schema import Schema
from triad.utils.hash import to_uuid
def test_transformer():
assert isinstance(t1, Transformer)
df = ArrayDataFrame([[0]], "a:int")
t1._output_schema = t1.get_output_schema(df)
assert t1.output_schema == "a:int,b:int"
t2._output_schema = t2.get_output_schema(df)
assert t2.output_schema == "b:int,a:int"
t3._output_schema = t3.get_output_schema(df)
assert t3.output_schema == "a:int,b:int"
assert [[0, 1]] == list(t3(df.as_array_iterable()))
def test__to_transformer():
a = _to_transformer(MockTransformer)
assert isinstance(a, MockTransformer)
b = _to_transformer("MockTransformer")
assert isinstance(b, MockTransformer)
a = _to_transformer(t1, None)
assert isinstance(a, Transformer)
a._x = 1
# every parse should produce a different transformer even the input is
# a transformer instance
b = _to_transformer(t1, None)
assert isinstance(b, Transformer)
assert "_x" not in b.__dict__
c = _to_transformer("t1", None)
assert isinstance(c, Transformer)
assert "_x" not in c.__dict__
c._x = 1
d = _to_transformer("t1", None)
assert isinstance(d, Transformer)
assert "_x" not in d.__dict__
raises(FugueInterfacelessError, lambda: _to_transformer(t4, None))
raises(FugueInterfacelessError, lambda: _to_transformer("t4", None))
e = _to_transformer("t4", "*,b:int")
assert isinstance(e, Transformer)
f = _to_transformer("t5")
assert isinstance(f, Transformer)
g = _to_transformer("t6", "*,b:int")
assert isinstance(g, Transformer)
h = _to_transformer("t7")
assert isinstance(h, Transformer)
i = _to_transformer("t8")
assert isinstance(i, Transformer)
j = _to_transformer("t9")
assert isinstance(j, Transformer)
k = _to_transformer("t10")
assert isinstance(k, Transformer)
def test__register():
register_transformer("t_x", MockTransformer)
b = _to_transformer("t_x")
assert isinstance(b, MockTransformer)
register_transformer("t_t3", t3)
register_transformer("t_t4", t4)
register_transformer("t_t5", t5)
assert isinstance(_to_transformer("t_t3"), Transformer)
assert isinstance(_to_transformer("t_t4", "*,x:int"), Transformer)
assert isinstance(_to_transformer("t_t5"), Transformer)
# schema: *
def register_temp(df: pd.DataFrame) -> pd.DataFrame:
return df
t = _to_transformer("register_temp")
assert isinstance(t, Transformer)
assert not isinstance(t, MockTransformer)
# registered alias has the highest priority
register_transformer("register_temp", MockTransformer)
t = _to_transformer("register_temp")
assert isinstance(t, MockTransformer)
# can't overwrite
raises(
KeyError,
lambda: register_transformer("register_temp", MockTransformer, on_dup="raise"),
)
def test__to_transformer_determinism():
a = _to_transformer(t1, None)
b = _to_transformer(t1, None)
c = _to_transformer("t1", None)
assert a is not b
assert to_uuid(a) == to_uuid(b)
assert a is not c
assert to_uuid(a) == to_uuid(c)
a = _to_transformer(t4, "*,b:int")
b = _to_transformer("t4", "*,b:int")
assert a is not b
assert to_uuid(a) == to_uuid(b)
a = _to_transformer(t4, "a:int,b:int")
b = _to_transformer("t4", Schema("a:int,b:int"))
assert a is not b
assert to_uuid(a) == to_uuid(b)
a = _to_transformer(MockTransformer)
b = _to_transformer("MockTransformer")
assert a is not b
assert to_uuid(a) == to_uuid(b)
a = _to_transformer(t10)
b = _to_transformer("t10")
assert a is not b
assert to_uuid(a) == to_uuid(b)
def test_to_transformer_validation():
@transformer(["*", None, "b:int"], input_has=" a , b ")
def tv1(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
# input_has: a , b
# schema: *,b:int
def tv2(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
class MockTransformerV(Transformer):
@property
def validation_rules(self):
return {"input_is": "a:int,b:int"}
def get_output_schema(self, df):
pass
def transform(self, df):
pass
a = _to_transformer(tv1, None)
assert {"input_has": ["a", "b"]} == a.validation_rules
b = _to_transformer(tv2, None)
assert {"input_has": ["a", "b"]} == b.validation_rules
c = _to_transformer(MockTransformerV)
assert {"input_is": "a:int,b:int"} == c.validation_rules
def test_inside_class():
class Test(object):
# schema: *
# input_is: a:int , b :int
def t1(self, df: pd.DataFrame) -> pd.DataFrame:
return df
test = Test()
a = _to_transformer(test.t1)
assert isinstance(a, Transformer)
assert {"input_is": "a:int,b:int"} == a.validation_rules
@transformer(["*", None, "b:int"])
def t1(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
@transformer([Schema("b:int"), "*"])
def t2(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
@transformer(Schema("a:int, b:int"))
def t3(df: Iterable[List[Any]]) -> Iterable[List[Any]]:
for r in df:
r += [1]
yield r
def t4(df: Iterable[List[Any]]) -> Iterable[List[Any]]:
for r in df:
r += [1]
yield r
# schema: *,b:int
def t5(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
def t6(df: Iterable[Dict[str, Any]]) -> Iterable[Dict[str, Any]]:
for r in df:
r["b"] = 1
yield r
# schema: *
def t7(df: pd.DataFrame) -> Iterable[pd.DataFrame]:
yield df
# schema: *
def t8(df: Iterable[pd.DataFrame]) -> pd.DataFrame:
return pd.concat(list(df))
# schema: *
def t9(df: pd.DataFrame) -> Iterable[pd.DataFrame]:
yield df
# schema: *
def t10(df: pd.DataFrame, c: callable) -> pd.DataFrame:
yield df
class MockTransformer(Transformer):
def get_output_schema(self, df):
pass
def transform(self, df):
pass
|
python
|
from header_common import *
from header_dialogs import *
from header_operations import *
from module_constants import *
####################################################################################################################
# During a dialog, the dialog lines are scanned from top to bottom.
# If the dialog-line is spoken by the player, all the matching lines are displayed for the player to pick from.
# If the dialog-line is spoken by another, the first (top-most) matching line is selected.
#
# Each dialog line contains the following fields:
# 1) Dialogue partner: This should match the person player is talking to.
# Usually this is a troop-id.
# You can also use a party-template-id by appending '|party_tpl' to this field.
# Use the constant 'anyone' if you'd like the line to match anybody.
# Appending '|plyr' to this field means that the actual line is spoken by the player
# Appending '|other(troop_id)' means that this line is spoken by a third person on the scene.
# (You must make sure that this third person is present on the scene)
#
# 2) Starting dialog-state:
# During a dialog there's always an active Dialog-state.
# A dialog-line's starting dialog state must be the same as the active dialog state, for the line to be a possible candidate.
# If the dialog is started by meeting a party on the map, initially, the active dialog state is "start"
# If the dialog is started by speaking to an NPC in a town, initially, the active dialog state is "start"
# If the dialog is started by helping a party defeat another party, initially, the active dialog state is "party_relieved"
# If the dialog is started by liberating a prisoner, initially, the active dialog state is "prisoner_liberated"
# If the dialog is started by defeating a party led by a hero, initially, the active dialog state is "enemy_defeated"
# If the dialog is started by a trigger, initially, the active dialog state is "event_triggered"
# 3) Conditions block (list): This must be a valid operation block. See header_operations.py for reference.
# 4) Dialog Text (string):
# 5) Ending dialog-state:
# If a dialog line is picked, the active dialog-state will become the picked line's ending dialog-state.
# 6) Consequences block (list): This must be a valid operation block. See header_operations.py for reference.
# 7) Voice-over (string): sound filename for the voice over. Leave here empty for no voice over
####################################################################################################################
dialogs = [
[anyone|plyr,"member_talk", [
(troop_get_slot, ":is_skill_companion", "$g_talk_troop", slot_troop_skill_companion),
(eq, ":is_skill_companion", 0),
], "I'd like you to try to keep out of the fighting.", "member_keep_out_fighting",[]],
[anyone,"member_keep_out_fighting", [], "Oh? Are you sure?", "member_keep_out_fighting_confirm",[]],
[anyone|plyr,"member_keep_out_fighting_confirm", [], "Yes, you have other skills that are too valuable for me to risk losing you in battle.", "member_keep_out_fighting_yes",[
(troop_set_slot, "$g_talk_troop", slot_troop_skill_companion, 1),
]],
[anyone|plyr,"member_keep_out_fighting_confirm", [], "Actually, never mind.", "member_keep_out_fighting_no",[]],
[anyone,"member_keep_out_fighting_yes", [
(store_conversation_troop,"$g_talk_troop"),
(troop_is_hero,"$g_talk_troop"),
(troop_get_slot, ":honorific", "$g_talk_troop", slot_troop_honorific),
(str_store_string, s5, ":honorific"),
], "As you say {s5}. Unless you order me otherwise, I will try to be the last to enter the battle. Anything else?", "member_talk",[]],
[anyone,"member_keep_out_fighting_no", [], "Very well. Anything else?", "member_talk",[]],
[anyone|plyr,"member_talk", [
(troop_get_slot, ":is_skill_companion", "$g_talk_troop", slot_troop_skill_companion),
(eq, ":is_skill_companion", 1),
], "I'd like you to take an active role in battles from now on.", "member_join_in_fighting",[]],
[anyone,"member_join_in_fighting", [], "I see. Is this definitely what you want?", "member_join_in_fighting_confirm",[]],
[anyone|plyr,"member_join_in_fighting_confirm", [], "Yes, your skill on the battlefield is what we need now.", "member_join_in_fighting_yes",[
(troop_set_slot, "$g_talk_troop", slot_troop_skill_companion, 0),
]],
[anyone|plyr,"member_join_in_fighting_confirm", [], "Actually, never mind.", "member_join_in_fighting_no",[]],
[anyone,"member_join_in_fighting_yes", [
(store_conversation_troop,"$g_talk_troop"),
(troop_is_hero,"$g_talk_troop"),
(troop_get_slot, ":honorific", "$g_talk_troop", slot_troop_honorific),
(str_store_string, s5, ":honorific"),
], "As you command {s5}. I will take my position with the rest of the troops from now on. Anything else?", "member_talk",[]],
[anyone,"member_join_in_fighting_no", [], "Very well. Anything else?", "member_talk",[]],
]
def add_dialog(dialogs, new_dialog, bottom_offset):
if bottom_offset == 0:
dialogs.append(new_dialog)
else:
state = new_dialog[1]
indices = []
for i in xrange(0, len(dialogs)):
dialog = dialogs[i]
if dialog[1] == state:
indices.append(i)
if len(indices) == 0:
index = len(dialogs)
elif len(indices) < bottom_offset:
index = indices[0]
else:
index = indices[len(indices) - bottom_offset]
dialogs.insert(index, new_dialog)
def modmerge(var_set):
try:
var_name_1 = "dialogs"
orig_scripts = var_set[var_name_1]
# START do your own stuff to do merging
for dialog in dialogs:
state = dialog[1]
if state == "member_talk":
add_dialog(orig_scripts, dialog, 1)
else:
add_dialog(orig_scripts, dialog, 0)
# END do your own stuff
except KeyError:
errstring = "Variable set does not contain expected variable: \"%s\"." % var_name_1
raise ValueError(errstring)
|
python
|
#!/usr/bin/python
import sys
import h5py
if __name__ == "__main__":
files = sys.argv[1:]
files.extend(sys.stdin.readlines())
for file in files:
file = file.strip()
with h5py.File(file, 'r') as f:
f['/entry1/instrument/parameters/y_pixels_per_mm'][0] = 0.321
|
python
|
"""
Tests CoreML Scaler converter.
"""
import unittest
import numpy
import coremltools
from sklearn.preprocessing import StandardScaler
from onnxmltools.convert.coreml.convert import convert
from onnxmltools.utils import dump_data_and_model
class TestCoreMLScalerConverter(unittest.TestCase):
def test_scaler(self):
model = StandardScaler()
data = numpy.array([[0, 0, 3], [1, 1, 0], [0, 2, 1], [1, 0, 2]], dtype=numpy.float32)
model.fit(data)
model_coreml = coremltools.converters.sklearn.convert(model)
model_onnx = convert(model_coreml.get_spec())
self.assertTrue(model_onnx is not None)
dump_data_and_model(data, model, model_onnx, basename="CmlStandardScalerFloat32")
if __name__ == "__main__":
unittest.main()
|
python
|
total_bill = 124.56
procent_10 = 0.10
procent_12 = 0.12
procent_15 = 0.15
split_people = 7
tip = total_bill * procent_12 + total_bill
print(tip)
print(tip)
total = tip / float(split_people)
print(round(total, 2))
|
python
|
from guardian.core import ObjectPermissionChecker
class ObjectPermissionCheckerViewSetMixin:
"""add a ObjectPermissionChecker based on the accessing user to the serializer context."""
def get_serializer_context(self):
context = super().get_serializer_context()
if self.request:
perm_checker = ObjectPermissionChecker(
user_or_group=self.request.user)
perm_checker.prefetch_perms(self.get_queryset())
context.update({'perm_checker': perm_checker})
return context
|
python
|
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
This is a problem of building five houses in different locations.
The masonry, roofing, painting, etc. must be scheduled.
Some tasks must necessarily take place before others and these requirements are
expressed through precedence constraints.
There are three workers, and each task requires a worker.
There is also a cash budget which starts with a given balance.
Each task costs a given amount of cash per day which must be available at the start of the task.
A cash payment is received periodically.
The objective is to minimize the overall completion date.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import CpoModel, CpoStepFunction, INTERVAL_MAX, INT_MAX
import docplex.cp.utils_visu as visu
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# House building task descriptor
class BuildingTask(object):
def __init__(self, name, duration):
self.name = name
self.duration = duration
# List of tasks to be executed for each house
MASONRY = BuildingTask('masonry', 35)
CARPENTRY = BuildingTask('carpentry', 15)
PLUMBING = BuildingTask('plumbing', 40)
CEILING = BuildingTask('ceiling', 15)
ROOFING = BuildingTask('roofing', 5)
PAINTING = BuildingTask('painting', 10)
WINDOWS = BuildingTask('windows', 5)
FACADE = BuildingTask('facade', 10)
GARDEN = BuildingTask('garden', 5)
MOVING = BuildingTask('moving', 5)
# Tasks precedence constraints (each tuple (X, Y) means X ends before start of Y)
PRECEDENCES = ( (MASONRY, CARPENTRY),
(MASONRY, PLUMBING),
(MASONRY, CEILING),
(CARPENTRY, ROOFING),
(CEILING, PAINTING),
(ROOFING, WINDOWS),
(ROOFING, FACADE),
(PLUMBING, FACADE),
(ROOFING, GARDEN),
(PLUMBING, GARDEN),
(WINDOWS, MOVING),
(FACADE, MOVING),
(GARDEN, MOVING),
(PAINTING, MOVING),
)
# Number of workers
NB_WORKERS = 3
# List of houses to build. Value is the minimum start date
HOUSES = (31, 0, 90, 120, 90)
# Cash parameters
NB_PAYMENTS = 5
PAYMENT_AMOUNT = 30000
PAYMENT_INTERVAL = 60
#-----------------------------------------------------------------------------
# Prepare the data for modeling
#-----------------------------------------------------------------------------
# Assign an index to tasks
ALL_TASKS = (MASONRY, CARPENTRY, PLUMBING, CEILING, ROOFING, PAINTING, WINDOWS, FACADE, GARDEN, MOVING)
for i in range(len(ALL_TASKS)):
ALL_TASKS[i].id = i
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Initialize model variable sets
all_tasks = [] # Array of all tasks
desc = dict() # Dictionary task interval var -> task descriptor
house = dict() # Dictionary task interval var -> id of the corresponding house
workers_usage = mdl.step_at(0, 0) # Total worker usage
# Initialize cash function
cash = mdl.step_at(0, 0)
for p in range(NB_PAYMENTS):
cash += mdl.step_at(PAYMENT_INTERVAL * p, PAYMENT_AMOUNT)
# Utility function
def make_house(loc, rd):
''' Create model elements corresponding to the building of one house
loc: Identification (index) of the house to build
rd: Min start date
'''
# Create interval variable for each task for this house
tasks = [mdl.interval_var(size=t.duration,
start=(rd, INTERVAL_MAX),
name="H{}-{}".format(loc, t.name)) for t in ALL_TASKS]
all_tasks.extend(tasks)
# Add precedence constraints
for p, s in PRECEDENCES:
mdl.add(mdl.end_before_start(tasks[p.id], tasks[s.id]))
global workers_usage
global cash
# Allocate tasks to workers
for t in ALL_TASKS:
desc[tasks[t.id]] = t
house[tasks[t.id]] = loc
workers_usage += mdl.pulse(tasks[t.id], 1)
cash -= mdl.step_at_start(tasks[t.id], 200 * t.duration)
# Make houses
for i, sd in enumerate(HOUSES):
make_house(i, sd)
# Number of workers should not be greater than the limit
mdl.add(workers_usage <= NB_WORKERS)
# Cash should not be negative
mdl.add(cash >= 0)
# Minimize overall completion date
mdl.add(mdl.minimize(mdl.max([mdl.end_of(task) for task in all_tasks])))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
def compact(name):
# Example: H3-garden -> G3
# ^ ^
loc, task = name[1:].split('-', 1)
return task[0].upper() + loc
# Solve model
print("Solving model....")
msol = mdl.solve(FailLimit=10000, TimeLimit=10)
print("Solution: ")
msol.print_solution()
# Display result
if msol and visu.is_visu_enabled():
workersF = CpoStepFunction()
cashF = CpoStepFunction()
for p in range(5):
cashF.add_value(60 * p, INT_MAX, 30000)
for task in all_tasks:
itv = msol.get_var_solution(task)
workersF.add_value(itv.get_start(), itv.get_end(), 1)
cashF.add_value(itv.start, INT_MAX, -200 * desc[task].duration)
visu.timeline('Solution SchedCumul')
visu.panel(name="Schedule")
for task in all_tasks:
visu.interval(msol.get_var_solution(task), house[task], compact(task.get_name()))
visu.panel(name="Workers")
visu.function(segments=workersF, style='area')
visu.panel(name="Cash")
visu.function(segments=cashF, style='area', color='gold')
visu.show()
|
python
|
import os
from minicps.devices import PLC
from temperature_simulator import TemperatureSimulator
from Logger import hlog
import time
class EnipPLC1(PLC): #builds upon the tags of the swat example
# These constants are used mostly during setting up of topology
NAME = 'plc1'
IP = ' 10.0.2.110'
MAC = '00:1D:9C:C7:B0:10'
# PLC1_PROTOCOL defines type of this PLC (see PLC class in minicps package)
PLC1_PROTOCOL = {
'name': 'enip',
'mode': 1,
'server': {
'address': IP,
'tags': (
('LIT101', 1, 'REAL'),
('LIT101', 2, 'REAL'),
('LIT101', 3, 'REAL'),
)
}
}
# This PLC doesn't use data yet
PLC1_DATA = {
'TODO': 'TODO',
}
# State of this PLC is stored in Sqlite database on this path
STATE = {
'name': 'swat_s1',
'path': 'swat_s1_db.sqlite'
}
def __init__(self):
self.temperature_simulator = TemperatureSimulator(0.0, 50.0, 5.0)
PLC.__init__(
self,
name='plc1',
state=EnipPLC1.STATE,
protocol=EnipPLC1.PLC1_PROTOCOL,
memory=EnipPLC1.PLC1_DATA,
disk=EnipPLC1.PLC1_DATA)
# Executed before main loop is started
def pre_loop(self, sleep=0.1):
hlog ('DEBUG:plc1 enters pre_loop')
print
time.sleep(sleep)
# Main loop keeps sending ENIP messages with one LIT101 tag and value
# obtained from temperature simulator.
def main_loop(self):
hlog ('DEBUG: plc1 enters main_loop.')
print
count = 0
while(count <= 1000000):
lit101 = float(self.temperature_simulator.get_next())
hlog ('DEBUG plc1 lit101: %.5f' % lit101)
self.send(('LIT101', 3), lit101, EnipPLC1.IP)
count += 1
hlog ('DEBUG plc1 shutdown')
if __name__ == "__main__":
hlog('DEBUG plc1 start')
plc1 = EnipPLC1()
|
python
|
from setuptools import setup, find_packages
setup(name='donkeypart_keras_behavior_cloning',
version='0.1.3',
description='Library to control steering and throttle actuators.',
long_description="no long description given",
long_description_content_type="text/markdown",
url='https://github.com/autorope/donkeypart_PCA9685_actuators',
author='Will Roscoe',
author_email='[email protected]',
license='MIT',
entry_points={
'console_scripts': [
'donkey=donkeycar.management.base:execute_from_command_line',
],
},
install_requires=['numpy',
'tensorflow==1.11',
],
extras_require={'dev': ['pytest-cov']},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='selfdriving cars donkeycar diyrobocars datastore',
packages=find_packages(exclude=(['tests', 'docs', 'site', 'env'])),
)
|
python
|
from trading_bot import app, create_app
def init_app():
# TODO add test config class
create_app()
def reset_managers():
create_app()
app.symbol_manager._symbols = []
app.exchange_manager._exchanges = []
app.exchange_manager._exchanges_by_name = {}
app.indicator_manager._indicators = []
app.indicator_manager._indicators_by_name = {}
app.indicator_value_manager._indicator_values = []
app.indicator_value_manager._indicator_values_by_key = {}
app.trading_system_manager._trading_systems = []
|
python
|
import cv2
import numpy as np
import math
import time
import testAAE
import testAAEWithClassifier
import region
def quantizeAngle(angle):
if angle >= 0:
if angle >= 90:
if angle >= 45:
quantized = 2
else:
quantized = 1
elif angle >= 135:
quantized = 4
else:
quantized = 8
elif angle <= -90:
if angle <= -135:
quantized = 16
else:
quantized = 32
elif angle <= -45:
quantized = 64
else:
quantized = 128
return int(quantized)
def angleFilter(mask, quantized, quantized_flag = False):
temp_angle = mask*quantized
kernal = 9
m,n = mask.shape
hist = {}
hist_sorted = []
strong_angle = np.zeros(mask.shape, np.uint8)
contour = np.zeros(mask.shape, np.uint8)
score_map = np.array([[5,3,1,0,0,0,1,3],
[3,5,3,1,0,0,0,1],
[1,3,5,3,1,0,0,0],
[0,1,3,5,3,1,0,0],
[0,0,1,3,5,3,1,0],
[0,0,0,1,3,5,3,1],
[1,0,0,0,1,3,5,3],
[3,1,0,0,0,1,3,5]])
bias = math.floor(kernal /2)
qt_angle = np.array([1,2,4,8,16,32,64,128])
for i in range(m):
for j in range(n):
if mask[i,j] > 0:
if i-bias < 0:
h_t=0
else:
h_t = i-bias
if i+bias > m-1:
h_b=m-1
else:
h_b = i+bias
if j-bias < 0:
w_l=0
else:
w_l=j-bias
if j+bias > m-1:
w_r=m-1
else:
w_r=j+bias
temp = temp_angle[h_t:h_b+1,w_l:w_r+1]
a,b = temp.shape
temp = temp.flat[:]
if not quantized_flag:
for k in range(a*b):
if temp[k] > 0:
temp[k] = quantizeAngle(temp[k])
temp = temp.astype(np.uint8)
temp_ = temp[temp.nonzero()]
bcounts = np.bincount(temp_)
strong_temp = np.zeros(a*b)
score_temp = np.zeros(a*b)
hist.clear
hist_sorted.clear
hist = dict(zip(np.unique(temp_),bcounts[bcounts.nonzero()]))
hist_sorted = sorted(hist.items(), key=lambda x: x[1], reverse=True)
max_count = hist_sorted[0][1]
strong_angle[i,j] = hist_sorted[0][0]
count = 0
for c in range(a*b):
if temp[c] > 0:
score_temp[c] = score_map[int(math.log2(quantizeAngle(temp_angle[i,j]))),int(math.log2(temp[c]))]
strong_temp[c] = score_map[int(math.log2(strong_angle[i,j])),int(math.log2(temp[c]))]
count+=1
pix_score = np.sum(score_temp)/count
strong_score = np.sum(strong_temp)/count
if max_count > 5 and (pix_score > 2 or strong_score > 2):
contour[i,j] = 1
return contour
def preprocessing():
total_num = 28
sample_id = 0
threshold = 160
exposure = 6
write_flag = False
sobel_mask_vect = []
src_vect = []
sobel_x =np.array([[-1, 0, 1],[-1, 0, 1],[-1, 0, 1]], dtype=np.float32)
sobel_y =np.array([[1, 1, 1],[0, 0, 0],[-1, -1, -1]], dtype=np.float32)
new_img = np.zeros((256,256), np.uint8)
for pic_num in range(1, total_num):
if write_flag:
src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.jpg'
output_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
IN_src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'SQI' + '/' + '{:02d}'.format(pic_num) + '.png'
# output_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'TT' + '/' + '{:02d}'.format(pic_num) + '.png'
# region_file = './roi/region_' + str(pic_num) + '.png'
print(src_file)
img = cv2.imread(src_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
m,n = img.shape
img = img[0:n]
new_img[3:253,3:253] = img
cv2.imwrite(output_file, new_img)
new_img_copy = new_img.copy()
IN_img = cv2.imread(IN_src_file)
IN_img = cv2.cvtColor(IN_img, cv2.COLOR_BGR2GRAY)
src_vect.append(IN_img)
else:
src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
IN_src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'SQI' + '/' + '{:02d}'.format(pic_num) + '.png'
new_img = cv2.imread(src_file)
new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2GRAY)
IN_img = cv2.imread(IN_src_file)
IN_img = cv2.cvtColor(IN_img, cv2.COLOR_BGR2GRAY)
src_vect.append(IN_img)
sobel_mag = np.zeros(new_img.shape, np.float)
sobel_angle = np.zeros(new_img.shape, np.float)
quantized_angle = np.zeros(new_img.shape, np.uint8)
sobel_mask = np.zeros(new_img.shape, np.uint8)
# img_Guassian = cv2.GaussianBlur(new_img,(5,5),0)
# img_Guassian.astype(np.uint8)
# m,n = img_Guassian.shape
# m,n = new_img.shape
# for i in range(2,m-1):
# for j in range(2,n-1):
# Gx = np.sum(new_img[i-1:i+2, j-1:j+2] * sobel_x)
# Gy = np.sum(new_img[i-1:i+2, j-1:j+2] * sobel_y)
# sobel_mag[i,j] = math.sqrt(math.pow(Gx,2) + math.pow(Gy,2))
# sobel_angle[i,j] = math.atan2(Gy, Gx) * 180 / math.pi
# # quantized_angle[i,j] = quantizeAngle(sobel_angle[i,j])
# if sobel_mag[i,j] >= threshold:
# sobel_mask[i,j] = 1
# contour = angleFilter(sobel_mask, quantized_angle)
# contour = cv2.blur(contour, (3,3))
# sobelx = cv2.Sobel(new_img,cv2.CV_32F,1,0) #默认ksize=3
# sobely = cv2.Sobel(new_img,cv2.CV_32F,0,1)
sobelx = cv2.filter2D(new_img, cv2.CV_32F, sobel_x)
sobely = cv2.filter2D(new_img, cv2.CV_32F, sobel_y)
sobel_mag = np.sqrt(pow(sobelx,2) + pow(sobely,2))
sobel_angle = np.arctan2(sobely,sobelx) * 180 /math.pi
sobel_mag = cv2.convertScaleAbs(sobel_mag)
_, sobel_mask = cv2.threshold(sobel_mag, threshold, 255, 0)
# contour = angleFilter(sobel_mask, sobel_angle)
# contour = cv2.blur(contour, (3,3))
# sobel_mask = cv2.blur(sobel_mask, (3,3))
# contour_vect.append(contour)
# cv2.imshow('sobel', sobel_mask)
# cv2.waitKey(0)
sobel_mask_vect.append(sobel_mask)
return sobel_mask_vect, src_vect
if __name__ == "__main__":
time_start = time.time()
sobel_mask_vect, src_vect = preprocessing()
time_end = time.time()
print('Proprecessing time cost:{:.3f}'.format(time_end - time_start))
# for sobel_mask in sobel_mask_vect:
# # cv2.imshow("sobel",255*sobel_mask.astype(np.uint8))
# cv2.imshow("sobel",sobel_mask)
# # cv2.imshow("extend", 255*contour.astype(np.uint8))
# # cv2.imshow("sub",255*(sobel_mask - contour).astype(np.uint8))
# cv2.waitKey(0)
output_img_vect = testAAE.AEprocessing(sobel_mask_vect)
# output_img_vect = testAAEWithClassifier.AEprocessing(sobel_mask_vect)
print('AAE time cost:{:.3f}'.format(time.time() - time_end))
for i, singleimg in enumerate(output_img_vect):
# singleimg = np.squeeze(singleimg, axis=(2,))
singleimg = singleimg.astype(np.uint8)
src = src_vect[i]
# cv2.imshow('src',src)
# cv2.waitKey(0)
region_file = '../roi/region_{:02d}'.format(i) + '.png'
mask_file = '../Template/bin_mask/region_{:02d}'.format(i) + '.png'
mask = region.regionGenerate(singleimg)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
eroded = cv2.erode(mask,kernel)
eroded_2 = cv2.erode(eroded,kernel)
eroded_3 = cv2.erode(eroded_2,kernel)
roi = cv2.bitwise_and(src, src, mask=eroded)
sub = eroded - eroded_3
m,n = sub.shape
for row in range(m):
for col in range(n):
if sub[row, col] and roi[row, col] < 80:
roi[row,col] = 0
eroded[row, col] = 0
background = cv2.bitwise_not(eroded)
# cv2.imwrite(region_file, roi)
# cv2.imwrite(mask_file, eroded)
# cv2.imshow('region', roi+background)
# cv2.waitKey(0)
print('Totally time cost:{:.3f}'.format(time.time() - time_start))
|
python
|
def read_input():
# for puzzles where each input line is an object
with open('input.txt') as fh:
for line in fh.readlines():
if line.strip():
yield line.strip()
def read_input_objs():
# for puzzles with newline-separated objects as input
with open('input.txt') as fh:
obj = []
for line in fh.readlines():
if not line.strip():
yield ' '.join(obj).strip()
obj = []
else:
obj.append(line.strip())
if obj:
yield obj
fwd = 0
depth = 0
i = 0
for obj in read_input():
d, num = obj.split(' ')
num = int(num)
if d == 'forward':
fwd += num
if d == 'down':
depth += num
if d == 'up':
depth -= num
print(fwd * depth)
|
python
|
import PySimpleGUI as sg
use_custom_titlebar = False
def make_window(theme=None):
NAME_SIZE = 23
def name(name):
dots = NAME_SIZE - len(name) - 2
return sg.Text(
name + ' ' + '•' * dots,
size=(NAME_SIZE, 1),
justification='r',
pad=(0, 0),
font='Courier 10',
)
sg.theme(theme)
treedata = sg.TreeData()
treedata.Insert(
'',
'_A_',
'Tree Item 1',
[1234],
)
treedata.Insert('', '_B_', 'B', [])
treedata.Insert(
'_A_',
'_A1_',
'Sub Item 1',
['can', 'be', 'anything'],
)
layout_l = [
[name('Text'), sg.Text('Text')],
[name('Input'), sg.Input(s=15)],
[name('Multiline'), sg.Multiline(s=(15, 2))],
[name('Output'), sg.Output(s=(15, 2))],
[
name('Combo'),
sg.Combo(
sg.theme_list(),
default_value=sg.theme(),
s=(15, 22),
enable_events=True,
readonly=True,
k='-COMBO-',
),
],
[
name('OptionMenu'),
sg.OptionMenu(
[
'OptionMenu',
],
s=(15, 2),
),
],
[name('Checkbox'), sg.Checkbox('Checkbox')],
[name('Radio'), sg.Radio('Radio', 1)],
[
name('Spin'),
sg.Spin(
[
'Spin',
],
s=(15, 2),
),
],
[name('Button'), sg.Button('Button')],
[
name('ButtonMenu'),
sg.ButtonMenu('ButtonMenu', sg.MENU_RIGHT_CLICK_EDITME_EXIT),
],
[name('Slider'), sg.Slider((0, 10), orientation='h', s=(10, 15))],
[
name('Listbox'),
sg.Listbox(['Listbox', 'Listbox 2'], no_scrollbar=True, s=(15, 2)),
],
[name('Image'), sg.Image(sg.EMOJI_BASE64_HAPPY_THUMBS_UP)],
[name('Graph'), sg.Graph((125, 50), (0, 0), (125, 50), k='-GRAPH-')],
]
layout_r = [
[
name('Canvas'),
sg.Canvas(
background_color=sg.theme_button_color()[1], size=(125, 50)
),
],
[
name('ProgressBar'),
sg.ProgressBar(100, orientation='h', s=(10, 20), k='-PBAR-'),
],
[
name('Table'),
sg.Table(
[[1, 2, 3], [4, 5, 6]], ['Col 1', 'Col 2', 'Col 3'], num_rows=2
),
],
[
name('Tree'),
sg.Tree(
treedata,
[
'Heading',
],
num_rows=3,
),
],
[name('Horizontal Separator'), sg.HSep()],
[name('Vertical Separator'), sg.VSep()],
[name('Frame'), sg.Frame('Frame', [[sg.T(s=15)]])],
[name('Column'), sg.Column([[sg.T(s=15)]])],
[
name('Tab, TabGroup'),
sg.TabGroup(
[[sg.Tab('Tab1', [[sg.T(s=(15, 2))]]), sg.Tab('Tab2', [[]])]]
),
],
[
name('Pane'),
sg.Pane([sg.Col([[sg.T('Pane 1')]]), sg.Col([[sg.T('Pane 2')]])]),
],
[name('Push'), sg.Push(), sg.T('Pushed over')],
[name('VPush'), sg.VPush()],
[name('Sizer'), sg.Sizer(1, 1)],
[name('StatusBar'), sg.StatusBar('StatusBar')],
[name('Sizegrip'), sg.Sizegrip()],
]
layout = [
[
sg.MenubarCustom(
[
['File', ['Exit']],
[
'Edit',
[
'Edit Me',
],
],
],
k='-CUST MENUBAR-',
p=0,
)
]
if use_custom_titlebar
else [
sg.Menu(
[
['File', ['Exit']],
[
'Edit',
[
'Edit Me',
],
],
],
k='-CUST MENUBAR-',
p=0,
)
],
[
sg.Checkbox(
'Use Custom Titlebar & Menubar',
use_custom_titlebar,
enable_events=True,
k='-USE CUSTOM TITLEBAR-',
)
],
[
sg.T(
'PySimpleGUI Elements - Use Combo to Change Themes',
font='_ 18',
justification='c',
expand_x=True,
)
],
[sg.Col(layout_l), sg.Col(layout_r)],
]
window = sg.Window(
'The PySimpleGUI Element List',
layout,
finalize=True,
right_click_menu=sg.MENU_RIGHT_CLICK_EDITME_VER_EXIT,
keep_on_top=True,
use_custom_titlebar=use_custom_titlebar,
)
window['-PBAR-'].update(30) # Show 30% complete on ProgressBar
window['-GRAPH-'].draw_image(
data=sg.EMOJI_BASE64_HAPPY_JOY, location=(0, 50)
) # Draw something in the Graph Element
return window
# Start of the program...
window = make_window()
window.read()
window.close()
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for :mod:`orion.algo.tpe`."""
import numpy
import pytest
from scipy.stats import norm
from orion.algo.space import Categorical, Fidelity, Integer, Real, Space
from orion.algo.tpe import (
TPE,
CategoricalSampler,
GMMSampler,
adaptive_parzen_estimator,
compute_max_ei_point,
ramp_up_weights,
)
from orion.core.worker.transformer import build_required_space
@pytest.fixture()
def space():
"""Return an optimization space"""
space = Space()
dim1 = Real("yolo1", "uniform", -10, 20)
space.register(dim1)
dim2 = Integer("yolo2", "uniform", -5, 10)
space.register(dim2)
categories = ["a", 0.1, 2, "c"]
dim3 = Categorical("yolo3", categories)
space.register(dim3)
return space
@pytest.fixture
def tpe(space):
"""Return an instance of TPE."""
return TPE(space, seed=1)
def test_compute_max_ei_point():
"""Test that max ei point is computed correctly"""
points = numpy.linspace(-3, 3, num=10)
below_likelis = numpy.linspace(0.5, 0.9, num=10)
above_likes = numpy.linspace(0.2, 0.5, num=10)
numpy.random.shuffle(below_likelis)
numpy.random.shuffle(above_likes)
max_ei_index = (below_likelis - above_likes).argmax()
max_ei_point = compute_max_ei_point(points, below_likelis, above_likes)
assert max_ei_point == points[max_ei_index]
def test_ramp_up_weights():
"""Test TPE adjust observed points correctly"""
weights = ramp_up_weights(25, 15, True)
assert len(weights) == 25
assert numpy.all(weights == 1.0)
weights = ramp_up_weights(25, 15, False)
assert len(weights) == 25
assert numpy.all(weights[:10] == (numpy.linspace(1.0 / 25, 1.0, num=10)))
assert numpy.all(weights[10:] == 1.0)
weights = ramp_up_weights(10, 15, False)
assert len(weights) == 10
assert numpy.all(weights == 1.0)
weights = ramp_up_weights(25, 0, False)
assert len(weights) == 25
assert numpy.all(weights == (numpy.linspace(1.0 / 25, 1.0, num=25)))
def test_adaptive_parzen_normal_estimator():
"""Test adaptive parzen estimator"""
low = -1
high = 5
obs_mus = [1.2]
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert list(mus) == [1.2, 2]
assert list(sigmas) == [3, 6]
assert list(weights) == [1.0 / 2, 1.0 / 2]
obs_mus = [3.4]
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=0.5, equal_weight=False, flat_num=25
)
assert list(mus) == [2, 3.4]
assert list(sigmas) == [6, 3]
assert list(weights) == [0.5 / 1.5, 1.0 / 1.5]
obs_mus = numpy.linspace(-1, 5, num=30, endpoint=False)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
ramp = numpy.linspace(1.0 / 30, 1.0, num=30 - 25)
full = numpy.ones(25 + 1)
all_weights = numpy.concatenate([ramp, full])
assert len(mus) == len(sigmas) == len(weights) == 30 + 1
assert numpy.all(weights[: 30 - 25] == ramp / all_weights.sum())
assert numpy.all(weights[30 - 25 :] == 1 / all_weights.sum())
assert numpy.all(sigmas == 6 / 10)
def test_adaptive_parzen_normal_estimator_weight():
"""Test the weight for the normal components"""
obs_mus = numpy.linspace(-1, 5, num=30, endpoint=False)
low = -1
high = 5
# equal weight
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=True, flat_num=25
)
assert numpy.all(weights == 1 / 31)
assert numpy.all(sigmas == 6 / 10)
# prior weight
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=0.5, equal_weight=False, flat_num=25
)
ramp = numpy.linspace(1.0 / 30, 1.0, num=30 - 25)
full = numpy.ones(25 + 1)
all_weights = numpy.concatenate([ramp, full])
prior_pos = numpy.searchsorted(mus, 2)
all_weights[prior_pos] = 0.5
assert numpy.all(
weights[: 30 - 25]
== (numpy.linspace(1.0 / 30, 1.0, num=30 - 25) / all_weights.sum())
)
assert numpy.all(weights[33 - 25 : prior_pos] == 1 / all_weights.sum())
assert weights[prior_pos] == 0.5 / all_weights.sum()
assert numpy.all(weights[prior_pos + 1 :] == 1 / all_weights.sum())
assert numpy.all(sigmas == 6 / 10)
# full weights number
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=15
)
ramp = numpy.linspace(1.0 / 30, 1.0, num=30 - 15)
full = numpy.ones(15 + 1)
all_weights = numpy.concatenate([ramp, full])
prior_pos = numpy.searchsorted(mus, 2)
all_weights[prior_pos] = 1.0
assert numpy.all(
weights[: 30 - 15]
== (numpy.linspace(1.0 / 30, 1.0, num=30 - 15) / all_weights.sum())
)
assert numpy.all(weights[30 - 15 :] == 1 / all_weights.sum())
assert numpy.all(sigmas == 6 / 10)
def test_adaptive_parzen_normal_estimator_sigma_clip():
"""Test that the magic clip of sigmas for parzen estimator"""
low = -1
high = 5
obs_mus = numpy.linspace(-1, 5, num=8, endpoint=False)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert len(mus) == len(sigmas) == len(weights) == 8 + 1
assert numpy.all(weights == 1 / 9)
assert numpy.all(sigmas == 6 / 8)
obs_mus = numpy.random.uniform(-1, 5, 30)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert len(mus) == len(sigmas) == len(weights) == 30 + 1
assert numpy.all(weights[-25:] == weights[-1])
assert numpy.all(sigmas <= 6) and numpy.all(sigmas >= 6 / 10)
obs_mus = numpy.random.uniform(-1, 5, 400)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert len(mus) == len(sigmas) == len(weights) == 400 + 1
assert numpy.all(weights[-25:] == weights[-1])
assert numpy.all(sigmas <= 6) and numpy.all(sigmas >= 6 / 20)
obs_mus = numpy.random.uniform(-1, 5, 10000)
mus, sigmas, weights = adaptive_parzen_estimator(
obs_mus, low, high, prior_weight=1.0, equal_weight=False, flat_num=25
)
assert len(mus) == len(sigmas) == len(weights) == 10000 + 1
assert numpy.all(weights[-25:] == weights[-1])
assert numpy.all(sigmas <= 6) and numpy.all(sigmas >= 6 / 100)
class TestCategoricalSampler:
"""Tests for TPE Categorical Sampler"""
def test_cat_sampler_creation(self, tpe):
"""Test CategoricalSampler creation"""
obs = [0, 3, 9]
choices = list(range(-5, 5))
cat_sampler = CategoricalSampler(tpe, obs, choices)
assert len(cat_sampler.weights) == len(choices)
obs = [0, 3, 9]
choices = ["a", "b", 11, 15, 17, 18, 19, 20, 25, "c"]
cat_sampler = CategoricalSampler(tpe, obs, choices)
assert len(cat_sampler.weights) == len(choices)
tpe.equal_weight = True
tpe.prior_weight = 1.0
obs = numpy.random.randint(0, 10, 100)
cat_sampler = CategoricalSampler(tpe, obs, choices)
counts_obs = numpy.bincount(obs) + 1.0
weights = counts_obs / counts_obs.sum()
assert numpy.all(cat_sampler.weights == weights)
tpe.equal_weight = False
tpe.prior_weight = 0.5
tpe.full_weight_num = 30
obs = numpy.random.randint(0, 10, 100)
cat_sampler = CategoricalSampler(tpe, obs, choices)
ramp = numpy.linspace(1.0 / 100, 1.0, num=100 - 30)
full = numpy.ones(30)
ramp_weights = numpy.concatenate([ramp, full])
counts_obs = numpy.bincount(obs, weights=ramp_weights) + 0.5
weights = counts_obs / counts_obs.sum()
assert numpy.all(cat_sampler.weights == weights)
def test_sample(self, tpe):
"""Test CategoricalSampler sample function"""
obs = numpy.random.randint(0, 10, 100)
choices = ["a", "b", 11, 15, 17, 18, 19, 20, 25, "c"]
cat_sampler = CategoricalSampler(tpe, obs, choices)
points = cat_sampler.sample(25)
assert len(points) == 25
assert numpy.all(points >= 0)
assert numpy.all(points < 10)
weights = numpy.linspace(1, 10, num=10) ** 3
numpy.random.shuffle(weights)
weights = weights / weights.sum()
cat_sampler = CategoricalSampler(tpe, obs, choices)
cat_sampler.weights = weights
points = cat_sampler.sample(10000)
points = numpy.array(points)
hist = numpy.bincount(points)
assert numpy.all(hist.argsort() == weights.argsort())
assert len(points) == 10000
assert numpy.all(points >= 0)
assert numpy.all(points < 10)
def test_get_loglikelis(self, tpe):
"""Test to get log likelis of points"""
obs = numpy.random.randint(0, 10, 100)
choices = ["a", "b", 11, 15, 17, 18, 19, 20, 25, "c"]
cat_sampler = CategoricalSampler(tpe, obs, choices)
points = cat_sampler.sample(25)
likelis = cat_sampler.get_loglikelis(points)
assert numpy.all(
likelis == numpy.log(numpy.asarray(cat_sampler.weights)[points])
)
class TestGMMSampler:
"""Tests for TPE GMM Sampler"""
def test_gmm_sampler_creation(self, tpe):
"""Test GMMSampler creation"""
mus = numpy.linspace(-3, 3, num=12, endpoint=False)
sigmas = [0.5] * 12
gmm_sampler = GMMSampler(tpe, mus, sigmas, -3, 3)
assert len(gmm_sampler.weights) == 12
assert len(gmm_sampler.pdfs) == 12
def test_sample(self, tpe):
"""Test GMMSampler sample function"""
mus = numpy.linspace(-3, 3, num=12, endpoint=False)
sigmas = [0.5] * 12
gmm_sampler = GMMSampler(tpe, mus, sigmas, -3, 3)
points = gmm_sampler.sample(25)
points = numpy.array(points)
assert len(points) <= 25
assert numpy.all(points >= -3)
assert numpy.all(points < 3)
mus = numpy.linspace(-10, 10, num=10, endpoint=False)
sigmas = [0.00001] * 10
weights = numpy.linspace(1, 10, num=10) ** 3
numpy.random.shuffle(weights)
weights = weights / weights.sum()
gmm_sampler = GMMSampler(tpe, mus, sigmas, -11, 9, weights)
points = gmm_sampler.sample(10000)
points = numpy.array(points)
hist = numpy.histogram(points, bins=[-11, -9, -7, -5, -3, -1, 1, 3, 5, 7, 9])
assert numpy.all(hist[0].argsort() == numpy.array(weights).argsort())
assert numpy.all(points >= -11)
assert numpy.all(points < 9)
def test_get_loglikelis(self):
"""Test to get log likelis of points"""
mus = numpy.linspace(-10, 10, num=10, endpoint=False)
weights = numpy.linspace(1, 10, num=10) ** 3
numpy.random.shuffle(weights)
weights = weights / weights.sum()
sigmas = [0.00001] * 10
gmm_sampler = GMMSampler(tpe, mus, sigmas, -11, 9, weights)
points = [mus[7]]
pdf = norm(mus[7], sigmas[7])
point_likeli = numpy.log(pdf.pdf(mus[7]) * weights[7])
likelis = gmm_sampler.get_loglikelis(points)
assert list(likelis) == point_likeli
assert likelis[0] == point_likeli
sigmas = [2] * 10
gmm_sampler = GMMSampler(tpe, mus, sigmas, -11, 9, weights)
log_pdf = []
pdfs = []
for i in range(10):
pdfs.append(norm(mus[i], sigmas[i]))
for pdf, weight in zip(pdfs, weights):
log_pdf.append(numpy.log(pdf.pdf(0) * weight))
point_likeli = numpy.log(numpy.sum(numpy.exp(log_pdf)))
points = numpy.random.uniform(-11, 9, 30)
points = numpy.insert(points, 10, 0)
likelis = gmm_sampler.get_loglikelis(points)
point_likeli = numpy.format_float_scientific(point_likeli, precision=10)
gmm_likeli = numpy.format_float_scientific(likelis[10], precision=10)
assert point_likeli == gmm_likeli
assert len(likelis) == len(points)
class TestTPE:
"""Tests for the algo TPE."""
def test_seed_rng(self, tpe):
"""Test that algo is seeded properly"""
tpe.seed_rng(1)
a = tpe.suggest(1)[0]
assert not numpy.allclose(a, tpe.suggest(1)[0])
tpe.seed_rng(1)
assert numpy.allclose(a, tpe.suggest(1)[0])
def test_set_state(self, tpe):
"""Test that state is reset properly"""
tpe.seed_rng(1)
state = tpe.state_dict
a = tpe.suggest(1)[0]
assert not numpy.allclose(a, tpe.suggest(1)[0])
tpe.set_state(state)
assert numpy.allclose(a, tpe.suggest(1)[0])
def test_unsupported_space(self):
"""Test tpe only work for supported search space"""
space = Space()
dim1 = Real("yolo1", "uniform", -10, 10)
space.register(dim1)
dim2 = Real("yolo2", "reciprocal", 10, 20)
space.register(dim2)
categories = ["a", 0.1, 2, "c"]
dim3 = Categorical("yolo3", categories)
space.register(dim3)
dim4 = Fidelity("epoch", 1, 9, 3)
space.register(dim4)
TPE(space)
space = Space()
dim = Real("yolo1", "norm", 0.9)
space.register(dim)
with pytest.raises(ValueError) as ex:
tpe = TPE(space)
tpe.space = build_required_space(
space, shape_requirement=TPE.requires_shape
)
assert (
"TPE now only supports uniform, loguniform, uniform discrete and choices"
in str(ex.value)
)
def test_split_trials(self, tpe):
"""Test observed trials can be split based on TPE gamma"""
space = Space()
dim1 = Real("yolo1", "uniform", -3, 6)
space.register(dim1)
tpe.space = space
points = numpy.linspace(-3, 3, num=10, endpoint=False)
results = numpy.linspace(0, 1, num=10, endpoint=False)
points_results = list(zip(points, results))
numpy.random.shuffle(points_results)
points, results = zip(*points_results)
for point, result in zip(points, results):
tpe.observe([[point]], [{"objective": result}])
tpe.gamma = 0.25
below_points, above_points = tpe.split_trials()
assert below_points == [[-3.0], [-2.4], [-1.8]]
assert len(above_points) == 7
tpe.gamma = 0.2
below_points, above_points = tpe.split_trials()
assert below_points == [[-3.0], [-2.4]]
assert len(above_points) == 8
def test_sample_int_dimension(self):
"""Test sample values for a integer dimension"""
space = Space()
dim1 = Integer("yolo1", "uniform", -10, 20)
space.register(dim1)
dim2 = Integer("yolo2", "uniform", -5, 10, shape=(2))
space.register(dim2)
tpe = TPE(space)
obs_points = numpy.random.randint(-10, 10, 100)
below_points = [obs_points[:25]]
above_points = [obs_points[25:]]
points = tpe.sample_one_dimension(
dim1, 1, below_points, above_points, tpe._sample_int_point
)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= -10)
assert all(points < 10)
obs_points_below = numpy.random.randint(-10, 0, 25).reshape(1, 25)
obs_points_above = numpy.random.randint(0, 10, 75).reshape(1, 75)
points = tpe.sample_one_dimension(
dim1, 1, obs_points_below, obs_points_above, tpe._sample_int_point
)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= -10)
assert all(points < 0)
obs_points = numpy.random.randint(-5, 5, 100)
below_points = [obs_points[:25], obs_points[25:50]]
above_points = [obs_points[50:75], obs_points[75:]]
points = tpe.sample_one_dimension(
dim2, 2, below_points, above_points, tpe._sample_int_point
)
points = numpy.asarray(points)
assert len(points) == 2
assert all(points >= -10)
assert all(points < 10)
tpe.n_ei_candidates = 0
points = tpe.sample_one_dimension(
dim2, 2, below_points, above_points, tpe._sample_int_point
)
assert len(points) == 0
def test_sample_categorical_dimension(self):
"""Test sample values for a categorical dimension"""
space = Space()
categories = ["a", "b", 11, 15, 17, 18, 19, 20, 25, "c"]
dim1 = Categorical("yolo1", categories)
space.register(dim1)
dim2 = Categorical("yolo2", categories, shape=(2))
space.register(dim2)
tpe = TPE(space)
obs_points = numpy.random.randint(0, 10, 100)
obs_points = [categories[point] for point in obs_points]
below_points = [obs_points[:25]]
above_points = [obs_points[25:]]
points = tpe.sample_one_dimension(
dim1, 1, below_points, above_points, tpe._sample_categorical_point
)
assert len(points) == 1
assert points[0] in categories
obs_points_below = numpy.random.randint(0, 3, 25)
obs_points_above = numpy.random.randint(3, 10, 75)
below_points = [[categories[point] for point in obs_points_below]]
above_points = [[categories[point] for point in obs_points_above]]
points = tpe.sample_one_dimension(
dim1, 1, below_points, above_points, tpe._sample_categorical_point
)
assert len(points) == 1
assert points[0] in categories[:3]
obs_points = numpy.random.randint(0, 10, 100)
obs_points = [categories[point] for point in obs_points]
below_points = [obs_points[:25], obs_points[25:50]]
above_points = [obs_points[50:75], obs_points[75:]]
points = tpe.sample_one_dimension(
dim2, 2, below_points, above_points, tpe._sample_categorical_point
)
assert len(points) == 2
assert points[0] in categories
assert points[1] in categories
tpe.n_ei_candidates = 0
points = tpe.sample_one_dimension(
dim2, 2, below_points, above_points, tpe._sample_categorical_point
)
assert len(points) == 0
def test_sample_real_dimension(self):
"""Test sample values for a real dimension"""
space = Space()
dim1 = Real("yolo1", "uniform", -10, 20)
space.register(dim1)
dim2 = Real("yolo2", "uniform", -5, 10, shape=(2))
space.register(dim2)
dim3 = Real("yolo3", "reciprocal", 1, 20)
space.register(dim3)
tpe = TPE(space)
points = numpy.random.uniform(-10, 10, 20)
below_points = [points[:8]]
above_points = [points[8:]]
points = tpe._sample_real_dimension(dim1, 1, below_points, above_points)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= -10)
assert all(points < 10)
points = numpy.random.uniform(1, 20, 20)
below_points = [points[:8]]
above_points = [points[8:]]
points = tpe._sample_real_dimension(dim3, 1, below_points, above_points)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= 1)
assert all(points < 20)
below_points = numpy.random.uniform(-10, 0, 25).reshape(1, 25)
above_points = numpy.random.uniform(0, 10, 75).reshape(1, 75)
points = tpe._sample_real_dimension(dim1, 1, below_points, above_points)
points = numpy.asarray(points)
assert len(points) == 1
assert all(points >= -10)
assert all(points < 0)
points = numpy.random.uniform(-5, 5, 32)
below_points = [points[:8], points[8:16]]
above_points = [points[16:24], points[24:]]
points = tpe._sample_real_dimension(dim2, 2, below_points, above_points)
points = numpy.asarray(points)
assert len(points) == 2
assert all(points >= -10)
assert all(points < 10)
tpe.n_ei_candidates = 0
points = tpe._sample_real_dimension(dim2, 2, below_points, above_points)
assert len(points) == 0
def test_suggest(self, tpe):
"""Test suggest with no shape dimensions"""
tpe.n_initial_points = 10
results = numpy.random.random(10)
for i in range(10):
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 3
assert not isinstance(point[0][0], tuple)
tpe.observe(point, [{"objective": results[i]}])
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 3
assert not isinstance(point[0][0], tuple)
def test_1d_shape(self, tpe):
"""Test suggest with 1D shape dimensions"""
space = Space()
dim1 = Real("yolo1", "uniform", -3, 6, shape=(2))
space.register(dim1)
dim2 = Real("yolo2", "uniform", -2, 4)
space.register(dim2)
tpe.space = space
tpe.n_initial_points = 10
results = numpy.random.random(10)
for i in range(10):
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 2
assert len(point[0][0]) == 2
tpe.observe(point, [{"objective": results[i]}])
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 2
assert len(point[0][0]) == 2
def test_suggest_initial_points(self, tpe, monkeypatch):
"""Test that initial points can be sampled correctly"""
points = [(i, i - 6, "c") for i in range(1, 12)]
global index
index = 0
def sample(num=1, seed=None):
global index
pts = points[index : index + num]
index += num
return pts
monkeypatch.setattr(tpe.space, "sample", sample)
tpe.n_initial_points = 10
results = numpy.random.random(10)
for i in range(1, 11):
point = tpe.suggest(1)[0]
assert point == (i, i - 6, "c")
tpe.observe([point], [{"objective": results[i - 1]}])
point = tpe.suggest(1)[0]
assert point != (11, 5, "c")
def test_suggest_ei_candidates(self, tpe):
"""Test suggest with no shape dimensions"""
tpe.n_initial_points = 2
tpe.n_ei_candidates = 0
results = numpy.random.random(2)
for i in range(2):
point = tpe.suggest(1)
assert len(point) == 1
assert len(point[0]) == 3
assert not isinstance(point[0][0], tuple)
tpe.observe(point, [{"objective": results[i]}])
point = tpe.suggest(1)
assert not point
tpe.n_ei_candidates = 24
point = tpe.suggest(1)
assert len(point) > 0
|
python
|
import pygame
import random
screen_size = [360, 600]
screen = pygame.display.set_mode(screen_size)
pygame.font.init()
background = pygame.image.load('background.png')
user = pygame.image.load('user.png')
chicken = pygame.image.load('chicken.png')
def display_score(score):
font = pygame.font.SysFont('Comic Sans MS', 30)
score_text = 'Score: ' + str(score)
text_img = font.render(score_text, True, (0, 255, 0))
screen.blit(text_img, [20, 10])
def random_offset():
return -1*random.randint(100, 1500)
chicken_y = [random_offset(), random_offset(), random_offset()]
user_x = 150
score = 0
def crashed(idx):
global score
global keep_alive
score = score - 50
chicken_y[idx] = random_offset()
if score < -500:
keep_alive = False
def update_chicken_pos(idx):
global score
if chicken_y[idx] > 600:
chicken_y[idx] = random_offset()
score = score + 5
print('score', score)
else:
chicken_y[idx] = chicken_y[idx] + 5
keep_alive = True
clock = pygame.time.Clock()
while keep_alive:
pygame.event.get()
keys = pygame.key.get_pressed()
if keys[pygame.K_RIGHT] and user_x < 280:
user_x = user_x + 10
elif keys[pygame.K_LEFT] and user_x > 0:
user_x = user_x - 10
update_chicken_pos(0)
update_chicken_pos(1)
update_chicken_pos(2)
screen.blit(background, [0, 0])
screen.blit(user, [user_x, 520])
screen.blit(chicken, [0, chicken_y[0]])
screen.blit(chicken, [150, chicken_y[1]])
screen.blit(chicken, [280, chicken_y[2]])
if chicken_y[0] > 500 and user_x < 70:
crashed(0)
if chicken_y[1] > 500 and user_x > 80 and user_x < 200:
crashed(1)
if chicken_y[2] > 500 and user_x > 220:
crashed(2)
display_score(score)
pygame.display.update()
clock.tick(60)
|
python
|
from flask import request, jsonify, Blueprint
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_refresh_token_required,
get_jwt_identity
)
from flasgger import swag_from
from myapi.models import User
from myapi.extensions import pwd_context, jwt
from myapi.api.doc.login_doc import login_post
blueprint = Blueprint('auth', __name__, url_prefix='/auth')
@blueprint.route('/login', methods=['POST'])
@swag_from(login_post)
def login():
"""Authenticate user and return token
---
tags:
- Login
"""
# if not request.is_json:
# return jsonify({"msg": "Missing JSON in request"}), 400
username = request.form.get('username', None)
password = request.form.get('password', None)
if not username or not password:
return jsonify({"msg": "Missing username or password"}), 400
user = User.query.filter_by(username=username).first()
if user is None or not pwd_context.verify(password, user.password):
return jsonify({"msg": "Bad credentials"}), 400
access_token = create_access_token(identity=user.id)
refresh_token = create_refresh_token(identity=user.id)
ret = {
'access_token': access_token,
'refresh_token': refresh_token
}
return jsonify(ret), 200
@blueprint.route('/refresh', methods=['POST'])
@jwt_refresh_token_required
def refresh():
current_user = get_jwt_identity()
ret = {
'access_token': create_access_token(identity=current_user)
}
return jsonify(ret), 200
@jwt.user_loader_callback_loader
def user_loader_callback(identity):
return User.query.get(identity)
|
python
|
import psycopg2
import os
from dotenv import load_dotenv
import sqlite3
import pandas as pd
import datetime
from psycopg2.extras import execute_values
load_dotenv()
# connecting to our elephant sql rpg database
RPG_DB_NAME = os.getenv('RPG_DB_NAME', default='oops')
RPG_DB_USER = os.getenv('RPG_DB_USER', default='oops')
RPG_DB_PASSWORD = os.getenv('RPG_DB_PASSWORD', default='oops')
RPG_DB_HOST = os.getenv('RPG_DB_HOST', default='oops')
postgresql_connection = psycopg2.connect(dbname=RPG_DB_NAME, user=RPG_DB_USER, password=RPG_DB_PASSWORD, host=RPG_DB_HOST)
# connecting to local rpg database
DB_FILEPATH = os.path.join(os.path.dirname(__file__), '..', '..', 'module1-introduction-to-sql', 'rpg_db.sqlite3')
sqlite_connection = sqlite3.connect(DB_FILEPATH)
# creating cursors for both of the databases
sqlite_cursor = sqlite_connection.cursor()
postgresql_cursor = postgresql_connection.cursor()
# getting all of the tables names from the local rpg database
query = "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name"
table_names = sqlite_cursor.execute(query).fetchall()
query = 'SELECT * FROM armory_item'
result = sqlite_cursor.execute(query).fetchall()
total_query = ''
for table in table_names:
# getting all of the schema for the tables from the local db
query = f"SELECT sql from sqlite_master WHERE name = \'{table[0]}\'"
result = sqlite_cursor.execute(query).fetchall()
'''
was going to implement this using python code but had to hard code it in using sql queries in tables plus due to differences between sqlite and postgres
# creating the tables in the elephant db
query = result[0][0].replace('integer NOT NULL PRIMARY KEY AUTOINCREMENT', 'SERIAL PRIMARY KEY')
query = query.replace('CREATE TABLE ', 'CREATE TABLE IF NOT EXISTS ')
query = query.replace('datetime', 'date')
total_query += query + ';'
#print(total_query)
postgresql_cursor.execute(total_query)
'''
# getting data to insert into table from local db
query = f'SELECT * from {table[0]}'
result = sqlite_cursor.execute(query).fetchall()
print(f'\n{table[0]}')
insertion_query = f'INSERT INTO {table[0]} VALUES %s'
if table[0] in ['charactercreator_cleric', 'charactercreator_fighter', 'charactercreator_mage',
'charactercreator_necromancer', 'charactercreator_thief']:
new_result = []
for each in result:
each = list(each)
each[1] = bool(each[1])
new_result.append(each)
execute_values(postgresql_cursor, insertion_query, new_result)
elif table[0] != 'sqlite_sequence': # not sure how to create this table with an unsigned data type
execute_values(postgresql_cursor, insertion_query, result)
postgresql_connection.commit()
postgresql_cursor.close()
postgresql_connection.close()
|
python
|
import time
def time_training(fitter):
"""Print the time taken for a machine learning algorithm to train.
Parameters:
fitter(function): function used to train the model
Returns: None
"""
start = time.time()
fitter()
end = time.time()
diff = end - start
print(f'Training time: {diff} miliseconds.')
return None
|
python
|
################################################################################
# Module: decision.py
# Description: Agent decision function templates
# Rafal Kucharski @ TU Delft, The Netherlands
################################################################################
from math import exp
import random
import pandas as pd
from dotmap import DotMap
from numpy.random.mtrand import choice
from MaaSSim.driver import driverEvent
from MaaSSim.traveller import travellerEvent
#################
# DUMMIES #
#################
def dummy_False(*args, **kwargs):
# dummy function to always return False,
# used as default function inside of functionality
# (if the behaviour is not modelled)
return False
def dummy_True(*args, **kwargs):
# dummy function to always return True
return True
def f_dummy_repos(*args, **kwargs):
# handles the vehiciles when they become IDLE (after comppleting the request or entering the system)
repos = DotMap()
repos.flag = False
# repos.pos = None
# repos.time = 0
return repos
################
# DRIVER #
################
def f_driver_out(*args, **kwargs):
# returns boolean True if vehicle decides to opt out
leave_threshold = 0.25
back_threshold = 0.5
unserved_threshold = 0.005
anneal = 0.2
veh = kwargs.get('veh', None) # input
sim = veh.sim # input
flag = False # output
if len(sim.runs) == 0: # first day
msg = 'veh {} stays on'.format(veh.id)
else:
last_run = sim.run_ids[-1]
avg_yesterday = sim.res[last_run].veh_exp.nRIDES.quantile(
back_threshold) # how many rides was there on average
quant_yesterday = sim.res[last_run].veh_exp.nRIDES.quantile(
leave_threshold) # what was the lower quantile of rides
prev_rides = pd.Series([sim.res[_].veh_exp.loc[veh.id].nRIDES for _ in
sim.run_ids]).mean() # how many rides did I have on average before
rides_yesterday = sim.res[last_run].veh_exp.loc[veh.id].nRIDES # how many rides did I have yesterday
unserved_demand_yesterday = sim.res[last_run].pax_exp[sim.res[last_run].pax_exp.LOSES_PATIENCE > 0].shape[0] / \
sim.res[last_run].pax_exp.shape[0] # what is the share of unserved demand
did_i_work_yesterday = sim.res[last_run].veh_exp.loc[veh.id].ENDS_SHIFT > 0
if not did_i_work_yesterday:
if avg_yesterday < prev_rides:
msg = 'veh {} stays out'.format(veh.id)
flag = True
elif unserved_demand_yesterday > unserved_threshold:
if random.random() < anneal:
msg = 'veh {} comes to serve unserved'.format(veh.id)
flag = False
else:
msg = 'veh {} someone else come to serve unserved'.format(veh.id)
flag = False
else:
msg = 'veh {} comes back'.format(veh.id)
flag = False
pass
else:
if rides_yesterday > quant_yesterday:
msg = 'veh {} stays in'.format(veh.id)
flag = False
else:
msg = 'veh {} leaves'.format(veh.id)
flag = True
sim.logger.info('DRIVER OUT: ' + msg)
return flag
def f_repos(*args, **kwargs):
"""
handles the vehiciles when they become IDLE (after comppleting the request or entering the system)
:param args:
:param kwargs: vehicle and simulation object (veh.sim)
:return: structure with flag = bool, position to reposition to and time that it will take to reposition there.
"""
import random
repos = DotMap()
if random.random() > 0.8: # 20% of cases driver will repos
driver = kwargs.get('veh', None)
sim = driver.sim
neighbors = list(sim.inData.G.neighbors(driver.veh.pos))
if len(neighbors) == 0:
# escape from dead-end (teleport)
repos.pos = sim.inData.nodes.sample(1).squeeze().name
repos.time = 300
else:
repos.pos = random.choice(neighbors)
repos.time = driver.sim.skims.ride[repos.pos][driver.veh.pos]
repos.flag = True
else:
repos.flag = False
return repos
def f_decline(*args, **kwargs):
# determines whether driver will pick up the request or not
# now it accepts requests only in the first quartile of travel times
wait_limit = 200
fare_limit = 0.1
veh = kwargs.get('veh',None)
offers = veh.platform.offers
my_offer = None
for key, offer in offers.items():
if offer['status'] == 0 and offer['veh_id'] == veh.id:
my_offer = offer
break
if my_offer is None:
return False
wait_time = my_offer['wait_time']
fare = my_offer['fare']
flag = False # i do not decline
if wait_time >= wait_limit:
flag = True # unless I have ot wait a lot
if fare < fare_limit:
flag = True # or fare is low
#if flag:
# veh.sim.logger.critical('Veh {} declined offer with {} wait time and fare {}'.format(veh.id, wait_time,fare))
return flag
# ######### #
# PLATFORM #
# ######### #
def f_match(**kwargs):
"""
for each platfrom, whenever one of the queues changes (new idle vehicle or new unserved request)
this procedure handles the queue and prepares transactions between drivers and travellers
it operates based on nearest vehicle and prepares and offer to accept by traveller/vehicle
:param kwargs:
:return:
"""
platform = kwargs.get('platform') # platform for which we perform matching
vehQ = platform.vehQ # queue of idle vehicles
reqQ = platform.reqQ # queue of unserved requests
sim = platform.sim # reference to the simulation object
while min(len(reqQ), len(vehQ)) > 0: # loop until one of queues is empty (i.e. all requests handled)
requests = sim.inData.requests.loc[reqQ] # queued schedules of requests
vehicles = sim.vehicles.loc[vehQ] # vehicle agents
skimQ = sim.skims.ride[requests.origin].loc[vehicles.pos].copy().stack() # travel times between
# requests and vehicles in the column vector form
skimQ = skimQ.drop(platform.tabu, errors='ignore') # drop already rejected matches
if skimQ.shape[0] == 0:
sim.logger.warn("Nobody likes each other, "
"Qs {}veh; {}req; tabu {}".format(len(vehQ), len(reqQ), len(platform.tabu)))
break # nobody likes each other - wait until new request or new vehicle
vehPos, reqPos = skimQ.idxmin() # find the closest ones
mintime = skimQ.min() # and the travel time
vehicle = vehicles[vehicles.pos == vehPos].iloc[0]
veh_id = vehicle.name
veh = sim.vehs[veh_id] # vehicle agent
request = requests[requests.origin == reqPos].iloc[0]
req_id = request.name
simpaxes = request.sim_schedule.req_id.dropna().unique()
simpax = sim.pax[simpaxes[0]] # first traveller of shared ride (he is a leader and decision maker)
veh.update(event=driverEvent.RECEIVES_REQUEST)
for i in simpaxes:
sim.pax[i].update(event=travellerEvent.RECEIVES_OFFER)
if simpax.veh is not None: # the traveller already assigned (to a different platform)
if req_id in platform.reqQ: # we were too late, forget about it
platform.reqQ.pop(platform.reqQ.index(req_id)) # pop this request (vehicle still in the queue)
else:
for i in simpaxes:
offer_id = i
pax_request = sim.pax[i].request
if isinstance(pax_request.ttrav, int):
ttrav = pax_request.ttrav
else:
ttrav = pax_request.ttrav.total_seconds()
offer = {'pax_id': i,
'req_id': pax_request.name,
'simpaxes': simpaxes,
'veh_id': veh_id,
'status': 0, # 0 - offer made, 1 - accepted, -1 rejected by traveller, -2 rejected by veh
'request': pax_request,
'wait_time': mintime,
'travel_time': ttrav,
'fare': platform.platform.fare * sim.pax[i].request.dist / 1000} # make an offer
platform.offers[offer_id] = offer # bookkeeping of offers made by platform
sim.pax[i].offers[platform.platform.name] = offer # offer transferred to
if veh.f_driver_decline(veh=veh): # allow driver reject the request
veh.update(event=driverEvent.REJECTS_REQUEST)
platform.offers[offer_id]['status'] = -2
for i in simpaxes:
sim.pax[i].update(event=travellerEvent.IS_REJECTED_BY_VEHICLE)
sim.pax[i].offers[platform.platform.name]['status'] = -2
sim.logger.warning("pax {:>4} {:40} {}".format(request.name,
'got rejected by vehicle ' + str(veh_id),
sim.print_now()))
platform.tabu.append((vehPos, reqPos)) # they are unmatchable
else:
for i in simpaxes:
if not sim.pax[i].got_offered.triggered:
sim.pax[i].got_offered.succeed()
vehQ.pop(vehQ.index(veh_id)) # pop offered ones
reqQ.pop(reqQ.index(req_id)) # from the queues
platform.updateQs()
# ######### #
# TRAVELLER #
# ######### #
def f_platform_opt_out(*args, **kwargs):
pax = kwargs.get('pax', None)
return pax.request.platform == -1
def f_out(*args, **kwargs):
# it uses pax_exp of a passenger populated in previous run
# prev_exp is a pd.Series of this pd.DataFrame
# pd.DataFrame(columns=['wait_pickup','wait_match','tt'])
# returns boolean True if passanger decides to opt out
prev_exp = kwargs.get('prev_exp', None)
if prev_exp is None:
# no prev exepreince
return False
else:
if prev_exp.iloc[0].outcome == 1:
return False
else:
return True
def f_mode(*args, **kwargs):
# returns boolean True if passenger decides not to use MaaS (bad offer)
offer = kwargs.get('offer', None)
delta = 0.5
trip = kwargs.get('trip')
pass_walk_time = trip.pass_walk_time
veh_pickup_time = trip.sim.skims.ride.T[trip.veh.pos][trip.request.origin]
pass_matching_time = trip.sim.env.now - trip.t_matching
tt = trip.request.ttrav
return (max(pass_walk_time, veh_pickup_time) + pass_matching_time) / tt.seconds > delta
def f_platform_choice(*args, **kwargs):
traveller = kwargs.get('traveller')
sim = traveller.sim
betas = sim.params.platform_choice
offers = traveller.offers
# calc utilities
exps = list()
add_opt_out = True
for platform, offer in offers.items():
if add_opt_out:
u = offer['wait_time'] * 2 * betas.Beta_wait + \
offer['travel_time'] * 2 * betas.Beta_time + \
offer['fare'] / 2 * betas.Beta_cost
exps.append(exp(u))
add_opt_out = False
u = offer['wait_time'] * betas.Beta_wait + \
offer['travel_time'] * betas.Beta_time + \
offer['fare'] * betas.Beta_cost
exps.append(exp(u))
p = [_ / sum(exps) for _ in exps]
platform_chosen = choice([-1] + list(offers.keys()), 1, p=p)[0] # random choice with p
if platform_chosen == -1:
sim.logger.info("pax {:>4} {:40} {}".format(traveller.id, 'chosen to opt out',
sim.print_now()))
else:
sim.logger.info("pax {:>4} {:40} {}".format(traveller.id, 'chosen platform ' + str(platform_chosen),
sim.print_now()))
sim.logger.info("pax {:>4} {:40} {}".format(traveller.id, 'platform probs: ' + str(p),
sim.print_now()))
# handle requests
for platform_id, offer in offers.items():
if int(platform_id) == platform_chosen:
sim.plats[platform_id].handle_accepted(offer['pax_id'])
else:
sim.plats[platform_id].handle_rejected(offer['pax_id'])
sim.logger.info("pax {:>4} {:40} {}".format(traveller.id,
"wait: {}, travel: {}, fare: {}".format(offer['wait_time'],
int(offer['travel_time']),
int(offer[
'fare'] * 100) / 100),
sim.print_now()))
return platform_chosen == -1
#############
# SIMULATOR #
#############
def f_stop_crit(*args, **kwargs):
"""
Decision whether to stop experiment after current iterartion
:param args:
:param kwargs: sim object
:return: boolean flag
"""
sim = kwargs.get('sim', None)
convergence_threshold = 0.001
_ = sim.run_ids[-1]
sim.logger.warning(sim.res[_].veh_exp[sim.res[_].veh_exp.ENDS_SHIFT > 0].shape[0])
if len(sim.runs) < 2:
sim.logger.warning('Early days')
return False
else:
# example of convergence on waiting times
convergence = abs((sim.res[sim.run_ids[-1]].pax_kpi['MEETS_DRIVER_AT_PICKUP']['mean'] -
sim.res[sim.run_ids[-2]].pax_kpi['MEETS_DRIVER_AT_PICKUP']['mean']) /
sim.res[sim.run_ids[-2]].pax_kpi['MEETS_DRIVER_AT_PICKUP']['mean'])
if convergence < convergence_threshold:
sim.logger.warn('CONVERGED to {} after {} days'.format(convergence, sim.run_ids[-1]))
return True
else:
sim.logger.warn('NOT CONVERGED to {} after {} days'.format(convergence, sim.run_ids[-1]))
return False
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.