content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# test.py
""" This file houses the suite of tests for main.py classes and functions
THIS FILE IS A MESS AND TOTALLY BROKEN AT THIS POINT
IT WILL NOT RUN
IT IS HERE THAT IT MAY BE CANABALISED FOR FUTURE ITERATIONS OF THE PROJECT
"""
def test_variables():
""" variables needed for various tests """
global game_1, game_2, game_3, game_4, game_5, game_6, game_7, game_8, game_9
global game_10, game_11, game_12, game_13, game_14, game_15, game_16, game_17, game_18
global game_19, game_20, game_21, game_22, game_23, game_24, game_25, game_26, game_27
global game_28, game_29, game_30, game_31, game_32, game_33, game_34, game_35, game_36
global game_37, game_38, game_39, game_40, game_41, game_42, game_43, game_44, game_45
global game_46, game_47, game_48, game_49, legal_checkmates
global fens, fen_1, fen_2, fen_3, fen_4
fen_1 = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1'
fen_2 = 'rnbqkbnr/pppppppp/8/8/4P3/8/PPPP1PPP/RNBQKBNR b KQkq e3 0 1'
fen_3 = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/8/PPPP1PPP/RNBQKBNR w KQkq c6 0 2'
fen_4 = 'rnbqkbnr/pp1ppppp/8/2p5/4P3/5N2/PPPP1PPP/RNBQKB1R b KQkq - 1 2'
fens = [fen_1, fen_2, fen_3, fen_4]
game_1 = '1. e4 d5 '\
'2. exd5 Qxd5 3. Nc3 Qd8 4. Bc4 Nf6 5. Nf3 Bg4 6. h3 Bxf3 '\
'7. Qxf3 e6 8. Qxb7 Nbd7 9. Nb5 Rc8 10. Nxa7 Nb6 11. Nxc8 Nxc8 '\
'12. d4 Nd6 13. Bb5+ Nxb5 14. Qxb5+ Nd7 15. d5 exd5 16. Be3 Bd6 '\
'17. Rd1 Qf6 18. Rxd5 Qg6 19. Bf4 Bxf4 20. Qxd7+ Kf8 21. Qd8#'
game_2 = '1.e4 b6 2.d4 Bb7 3.Bd3 f5 4.exf5 Bxg2 5.Qh5+ g6 6.fxg6 Nf6 ' \
'7.gxh7 Nxh5 8.Bg6#'
game_3 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Nf6 4.Ng5 d5 5.exd5 Nxd5 6.Nxf7 Kxf7 '\
'7.Qf3+ Ke6 8.Nc3 Nce7 9.O-O c6 10.Re1 Bd7 11.d4 Kd6 12.Rxe5 Ng6 '\
'13.Nxd5 Nxe5 14.dxe5+ Kc5 15.Qa3+ Kxc4 16.Qd3+ Kc5 17.b4#'
game_4 = '1. e4 e5 2. Nf3 d6 3. Bc4 Bg4 4. Nc3 g6 5. Nxe5 Bxd1 6. Bxf7+ '\
'Ke7 7. Nd5#'
game_5 = '1. e4 e5 2. Bc4 Bc5 3. d3 c6 4. Qe2 d6 5. f4 exf4 6. Bxf4 Qb6 '\
'7. Qf3 Qxb2 8. Bxf7+ Kd7 9. Ne2 Qxa1 10. Kd2 Bb4+ 11. Nbc3 '\
'Bxc3+ 12. Nxc3 Qxh1 13. Qg4+ Kc7 14. Qxg7 Nd7 15. Qg3 b6 '\
'16. Nb5+ cxb5 17. Bxd6+ Kb7 18. Bd5+ Ka6 19. d4 b4 20. Bxb4 '\
'Kb5 21. c4+ Kxb4 22. Qb3+ Ka5 23. Qb5#'
game_6 = '1.e4 e5 2.f4 exf4 3.Bc4 Qh4+ 4.Kf1 b5 5.Bxb5 Nf6 6.Nf3 Qh6 '\
'7.d3 Nh5 8.Nh4 Qg5 9.Nf5 c6 10.g4 Nf6 11.Rg1 cxb5 12.h4 Qg6 '\
'13.h5 Qg5 14.Qf3 Ng8 15.Bxf4 Qf6 16.Nc3 Bc5 17.Nd5 Qxb2 18.Bd6 '\
"Bxg1 {It is from this move that Black's defeat stems. Wilhelm "\
'Steinitz suggested in 1879 that a better move would be '\
'18... Qxa1+; likely moves to follow are 19. Ke2 Qb2 20. Kd2 '\
'Bxg1.} 19. e5 Qxa1+ 20. Ke2 Na6 21.Nxg7+ Kd8 22.Qf6+ Nxf6 '\
'23.Be7#'
game_7 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Bc5 4.c3 Qe7 5.O-O d6 6.d4 Bb6 7.Bg5 '\
'f6 8.Bh4 g5 9.Nxg5 fxg5 10.Qh5+ Kf8 11.Bxg5 Qe8 12.Qf3+ Kg7 '\
'13.Bxg8 Rxg8 14.Qf6#'
game_8 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Bc5 4.b4 Bxb4 5.c3 Ba5 6.d4 exd4 7.O-O '\
'd3 8.Qb3 Qf6 9.e5 Qg6 10.Re1 Nge7 11.Ba3 b5 12.Qxb5 Rb8 13.Qa4 '\
'Bb6 14.Nbd2 Bb7 15.Ne4 Qf5 16.Bxd3 Qh5 17.Nf6+ gxf6 18.exf6 '\
'Rg8 19.Rad1 Qxf3 20.Rxe7+ Nxe7 21.Qxd7+ Kxd7 22.Bf5+ Ke8 '\
'23.Bd7+ Kf8 24.Bxe7#'
game_9 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Nf6 4.d4 exd4 5.Ng5 d5 6.exd5 Nxd5 '\
'7.O-O Be7 8.Nxf7 Kxf7 9.Qf3+ Ke6 10.Nc3 dxc3 11.Re1+ Ne5 '\
'12.Bf4 Bf6 13.Bxe5 Bxe5 14.Rxe5+ Kxe5 15.Re1+ Kd4 16.Bxd5 Re8 '\
'17.Qd3+ Kc5 18.b4+ Kxb4 19.Qd4+ Ka5 20.Qxc3+ Ka4 21.Qb3+ Ka5 '\
'22.Qa3+ Kb6 23.Rb1#'
game_10 = '1. e4 e5 2. d4 exd4 3. Bc4 Nf6 4. e5 d5 5. Bb3 Ne4 6. Ne2 Bc5 '\
'7. f3 Qh4+ 8. g3 d3 9. gxh4 Bf2+ 10. Kf1 Bh3#'
game_11 = '1. e4 e5 2. Nf3 d6 3. Bc4 f5 4. d4 Nf6 5. Nc3 exd4 6. Qxd4 Bd7 '\
'7. Ng5 Nc6 8. Bf7+ Ke7 9. Qxf6+ Kxf6 10. Nd5+ Ke5 11. Nf3+ '\
'Kxe4 12. Nc3#'
game_12 = '1. e4 e5 2. d4 exd4 3. c3 dxc3 4. Bc4 d6 5. Nxc3 Nf6 6. Nf3 '\
'Bg4 7. O-O Nc6 8. Bg5 Ne5 9. Nxe5 Bxd1 10. Bxf7+ Ke7 11. Nd5#'
game_13 = '1.e4 e5 2.Nf3 Nc6 3.Bc4 Nh6 4.O-O Ng4 5.d4 exd4 6.Bxf7+ Kxf7 '\
'7.Ng5+ Kg6 8.Qxg4 d5 9.Ne6+ Kf6 10.Qf5+ Ke7 11.Bg5+ Kd6 '\
'12.Qxd5#'
game_14 = '1. e4 e6 2. d4 d5 3. Nc3 Bb4 4. Bd3 Bxc3+ 5. bxc3 h6 6. Ba3 '\
'Nd7 7. Qe2 dxe4 8. Bxe4 Ngf6 9. Bd3 b6 10. Qxe6+ fxe6 11. Bg6#'
game_15 = '1.e4 e5 2.d4 exd4 3.Nf3 Nc6 4.Bc4 Be7 5.c3 dxc3 6.Qd5 d6 '\
'7.Qxf7+ Kd7 8.Be6#'
game_16 = '1. Nf3 Nf6 2. c4 c5 3. d4 Nc6 4. d5 Nb8 5. Nc3 d6 6. g3 g6 '\
'7. Bg2 Bg7 8. O-O O-O 9. Bf4 h6 10. Qd2 Kh7 11. e4 Nh5 12. Be3 '\
'Nd7 13. Rae1 Rb8 14. Nh4 Ndf6 15. h3 Ng8 16. g4 Nhf6 17. f4 e6 '\
'18. Nf3 exd5 19. cxd5 b5 20. e5 b4 21. Nd1 Ne4 22. Qd3 f5 '\
'23. e6 Qa5 24. gxf5 gxf5 25. Nh4 Ba6 26. Qxe4 fxe4 27. Bxe4+ '\
'Kh8 28. Ng6+ Kh7 29. Nxf8+ Kh8 30. Ng6+ Kh7 31. Ne5+ Kh8 '\
'32. Nf7#'
game_17 = '1. e4 e5 2. f4 exf4 3. Nf3 Nf6 4. e5 Ng4 5. d4 g5 6. Nc3 Ne3 '\
'7. Qe2 Nxf1 8. Ne4 Ne3 9. Nf6+ Ke7 10. Bd2 Nxc2+ 11. Kf2 Nxa1 '\
'12. Nd5+ Ke6 13. Qc4 b5 14. Nxg5+ Qxg5 15. Nxc7+ Ke7 16. Nd5+ '\
'Ke6 17. Nxf4+ Ke7 18. Nd5+ Ke8 19. Qxc8+ Qd8 20. Nc7+ Ke7 '\
'21. Bb4+ d6 22. Bxd6+ Qxd6 23. Qe8#'
game_18 = '1. d4 { Notes by Raymond Keene. Here is a brilliant win by '\
'Tarrasch. } d5 2. Nf3 c5 3. c4 e6 4. e3 Nf6 5. Bd3 Nc6 6. O-O '\
'Bd6 7. b3 O-O 8. Bb2 b6 9. Nbd2 Bb7 10. Rc1 Qe7 11. cxd5 {11 '\
'Qe2!? } 11...exd5 12. Nh4 g6 13. Nhf3 Rad8 14. dxc5 bxc5 '\
'15. Bb5 Ne4 16. Bxc6 Bxc6 17. Qc2 Nxd2 18. Nxd2 {"The guardian '\
"of the king's field leaves his post for a moment, assuming "\
'wrongly that 19 Qc3 is a major threat" -- Tartakower. If 18 '\
'Qxd2 d4 19 exd4 Bxf3 20 gxf3 Qh4 } 18...d4 {!} 19. exd4 {19 '\
'Rfe1! } Bxh2+ 20. Kxh2 Qh4+ 21. Kg1 Bxg2 {!} 22. f3 {22 Kxg2 '\
'Qg4+ 23 Kh2 Rd5-+ } 22...Rfe8 23. Ne4 Qh1+ 24. Kf2 Bxf1 25. d5 '\
'{25 Rxf1 Qh2+ or 25 Nf6+ Kf8 26 Nxe8 Qg2+ } 25...f5 26. Qc3 '\
'Qg2+ 27. Ke3 Rxe4+ 28. fxe4 f4+ {28...Qg3+! } 29. Kxf4 Rf8+ '\
'30. Ke5 Qh2+ 31. Ke6 Re8+ 32. Kd7 Bb5#'
game_19 = '1. e4 e5 2. Nc3 Nc6 3. Nf3 d6 4. Bb5 Bg4 5. Nd5 Nge7 6. c3 a6 '\
'7. Ba4 b5 8. Bb3 Na5 9. Nxe5 Bxd1 10. Nf6+ gxf6 11. Bxf7#'
game_20 = '1.e4 {Notes by Karel Traxler} e5 2.Nf3 Nc6 3.Bc4 Nf6 4.Ng5 Bc5 '\
'{An original combination that is better than it looks. A small '\
'mistake by white can give black a decisive attack. It is not '\
'easy to find the best defense against it in a practical game '\
'and it is probably theoretically correct. ... It somewhat '\
'resembles the Blackmar-Jerome gambit: 1.e4 e5 2.Nf3 Nc6 3.Bc4 '\
'Bc5 4.Bxf7+?! Kxf7 5.Nxe5+?!} 5.Nxf7 Bxf2+ 6.Ke2 {The best '\
'defense is 6.Kf1! although after 6...Qe7 7.Nxh8 d5 8.exd5 Nd4 '\
'Black gets a strong attack.} Nd4+ 7.Kd3 b5 8.Bb3 Nxe4 9.Nxd8 '\
'{White has no defense; the mating finale is pretty.} Nc5+ '\
'10.Kc3 Ne2+ 11.Qxe2 Bd4+ 12.Kb4 a5+ 13.Kxb5 Ba6+ 14.Kxa5 Bd3+ '\
'15.Kb4 Na6+ 16.Ka4 Nb4+ 17.Kxb4 c5#'
game_21 = '1. e4 {Some sources indicate that this game is actually a '\
'post-mortem of a twenty-three move draw.} e5 2. f4 Bc5 3. Nf3 '\
'd6 4. Nc3 Nf6 5. Bc4 Nc6 6. d3 Bg4 7. Na4 exf4 8. Nxc5 dxc5 '\
'9. Bxf4 Nh5 10. Be3 Ne5 11. Nxe5 Bxd1 12. Bxf7+ Ke7 13. Bxc5+ '\
'Kf6 14. O-O+ Kxe5 15. Rf5#'
game_22 = '1. e4 e5 2. Nf3 Nc6 3. d4 exd4 4. Bc4 Nf6 5. e5 d5 6. Bb5 Ne4 '\
'7. Nxd4 Bd7 8. Bxc6 bxc6 9. O-O Be7 10. f3 Nc5 11. f4 f6 '\
'12. f5 fxe5 13. Qh5+ Kf8 14. Ne6+ Bxe6 15. fxe6+ Bf6 16. Qf7#'
game_23 = '1.e4 d5 2.exd5 Qxd5 3.Ke2 {White intended to play 3.Nc3, bu t '\
'by accident moved the Bc1 to c3 instead. The rules at the time '\
'required that an illegal move be retracted and replaced with a '\
'legal king move, so 3.Ke2 was the penalty. What happened next '\
'is unclear. The usual account is that Black simply played '\
'3...Qe4#. (See, for example, Irving Chernev, "Wonders and '\
'Curiosities of Chess", New York, 1974, p. 119.) However, some '\
'contemporary accounts indicate that Black did not play the '\
'mate because he did not see it ("Deutsche Schachzeitung" of '\
'September 1893, p. 283). The tournament book is more '\
'ambiguous, claiming that Black let White wriggle for a while '\
'(Kiel, 1893 tournament book, p. 60 (in the original "[...] zog '\
'es aber vor, den Gegner erst noch zappeln zu lassen, ehe er '\
'ihn endlich erschlug.")), indicating either a pause before '\
'playing 3...Qe4# or preference for a slower win. If additional '\
'moves were played after 3.Ke2, they have not been '\
"recorded. Information was retrieved from Edward Winter's C.N "\
'5381.} Qe4#'
game_24 = '1. e4 d5 2. exd5 Qxd5 3. Nc3 Qd8 4. d4 Nc6 5. Nf3 Bg4 6. d5 '\
'Ne5 7. Nxe5 Bxd1 8. Bb5+ c6 9. dxc6 Qc7 10. cxb7+ Kd8 '\
'11. Nxf7#'
game_25 = '1.e4 d5 2.exd5 Qxd5 3.Nc3 Qa5 4.d4 c6 5.Nf3 Bg4 6.Bf4 e6 7.h3 '\
'Bxf3 8.Qxf3 Bb4 9.Be2 Nd7 10.a3 O-O-O 11.axb4 Qxa1+ 12.Kd2 '\
'Qxh1 13.Qxc6+ bxc6 14.Ba6#'
game_26 = '1. e4 e5 2. d4 exd4 3. Qxd4 Nc6 4. Qe3 Bb4+ 5. c3 Ba5 6. Bc4 '\
'Nge7 7. Qg3 O-O 8. h4 Ng6 9. h5 Nge5 10. Bg5 Qe8 11. Bf6 g6 '\
'12. hxg6 Nxg6 13. Qxg6+ hxg6 14. Rh8#'
game_27 = '1. e4 e5 2. Bc4 Bc5 3. Qh5 g6 4. Qxe5+ Ne7 5. Qxh8+ Ng8 '\
'6. Qxg8+ Bf8 7. Qxf7#'
game_28 = '1.e4 e5 2.Bc4 Bc5 3.Qe2 Qe7 4.f4 Bxg1 5.Rxg1 exf4 6.d4 Qh4+ '\
'7.g3 fxg3 8.Rxg3 Nf6 9.Nc3 Nh5 10.Bxf7+ Kxf7 11.Bg5 Nxg3 '\
'12.Qf3+ Kg6 13.Bxh4 Nh5 14.Qf5+ Kh6 15.Qg5#'
game_29 = '1.e4 e5 2.Nc3 Nc6 3.f4 exf4 4.d4 Qh4+ 5.Ke2 b6 6.Nb5 Nf6 7.Nf3 '\
'Qg4 8.Nxc7+ Kd8 9.Nxa8 Nxe4 10.c4 Bb4 11.Qa4 Nxd4+ 12.Kd1 Nf2#'
game_30 = '1. e4 e5 2. Nc3 Nc6 3. f4 d6 4. Nf3 a6 5. Bc4 Bg4 6. fxe5 Nxe5 '\
'7. Nxe5 Bxd1 8. Bxf7+ Ke7 9. Nd5#'
game_31 = '1.e4 e5 2.Nc3 Nc6 3.f4 exf4 4.Nf3 g5 5.h4 g4 6.Ng5 h6 7.Nxf7 '\
'Kxf7 8.Bc4+ Ke8 9.Qxg4 Ne5 10.Qh5+ Ke7 11.Qxe5#'
game_32 = '1. e4 e5 2. f4 exf4 3. Nf3 Nc6 4. Nc3 d6 5. Bc4 Bg4 6. Ne5 '\
'Bxd1 7. Bxf7+ Ke7 8. Nd5#'
game_33 = '1. e4 e5 2. Nf3 f5 3. Nxe5 Qf6 4. d4 d6 5. Nc4 fxe4 6. Be2 Nc6 '\
'7. d5 Ne5 8. O-O Nxc4 9. Bxc4 Qg6 10. Bb5+ Kd8 11. Bf4 h5 '\
'12. f3 Bf5 13. Nc3 exf3 14. Qxf3 Bxc2 15. Bg5+ Nf6 16. Rae1 c6 '\
'17. Bxf6+ Qxf6 18. Qe2 Qd4+ 19. Kh1 Bg6 20. Rxf8+ Kc7 21. Bxc6 '\
'bxc6 22. Nb5+ cxb5 23. Qxb5 Re8 24. Re7+ Rxe7 25. Qc6#'
game_34 = '1. e4 e5 2. f4 exf4 3. Bc4 d5 4. Bxd5 Nf6 5. Nc3 Bb4 6. Nf3 '\
'O-O 7. O-O Nxd5 8. Nxd5 Bd6 9. d4 g5 10. Nxg5 Qxg5 11. e5 Bh3 '\
'12. Rf2 Bxe5 13. dxe5 c6 14.Bxf4 Qg7 15. Nf6+ Kh8 16. Qh5 Rd8 '\
'17. Qxh3 Na6 18. Rf3 Qg6 19. Rc1 Kg7 20. Rg3 Rh8 21. Qh6#'
game_35 = '1.e4 e5 2.Bc4 Nf6 3.Nf3 Nc6 4.O-O Bc5 5.d3 d6 6.Bg5 Bg4 7.h3 '\
'h5 8.hxg4 hxg4 9.Nh2 g3 10.Nf3 Ng4 11.Bxd8 Bxf2+ 12.Rxf2 gxf2+ '\
'13.Kf1 Rh1+ 14.Ke2 Rxd1 15.Nfd2 Nd4+ 16.Kxd1 Ne3+ 17.Kc1 Ne2#'
game_36 = '1.e4 e5 2.f4 exf4 3.Nf3 g5 4.h4 g4 5.Ne5 Nf6 6.Bc4 d5 7.exd5 '\
'Bd6 8.d4 Nh5 9.Bb5+ c6 10.dxc6 bxc6 11.Nxc6 Nxc6 12.Bxc6+ Kf8 '\
'13.Bxa8 Ng3 14.Rh2 Bf5 15.Bd5 Kg7 16.Nc3 Re8+ 17.Kf2 Qb6 '\
'18.Na4 Qa6 19.Nc3 Be5 20.a4 Qf1+ 21.Qxf1 Bxd4+ 22.Be3 Rxe3 '\
'23.Kg1 Re1#'
game_37 = '1. e4 e5 2. Nf3 Nc6 3. Bc4 Nf6 4. d3 Bc5 5. O-O d6 6. Bg5 h6 '\
'7. Bh4 g5 8. Bg3 h5 9. Nxg5 h4 10. Nxf7 hxg3 11. Nxd8 Bg4 '\
'12. Qd2 Nd4 13. h3 Ne2+ 14. Kh1 Rxh3+ 15. gxh3 Bf3#'
game_38 = '1.d4 f5 2.c4 Nf6 3.Nc3 e6 4.Nf3 d5 5.e3 c6 6.Bd3 Bd6 7.O-O O-O '\
'8.Ne2 Nbd7 9.Ng5 Bxh2+ 10.Kh1 Ng4 11.f4 Qe8 12.g3 Qh5 13.Kg2 '\
'Bg1 14.Nxg1 Qh2+ 15.Kf3 e5 16.dxe5 Ndxe5+ 17.fxe5 Nxe5+ 18.Kf4 '\
'Ng6+ 19.Kf3 f4 20.exf4 Bg4+ 21.Kxg4 Ne5+ 22.fxe5 h5#'
game_39 = '1.e4 {Notes by Frank Marshall} e5 2.Nf3 Nc6 3.Bc4 Bc5 4.b4 Bb6'\
'{Declining the gambit. This is supposed to be better for Black'\
'than the acceptance. However, White gets dangerous attacks in'\
'both branches} 5.c3 {To support d4, but to slow. 5 O-O is'\
'stronger. Another good suggestion by Ulvestad is 5 a4 followed'\
'by Ba3.} d6 6.O-O Bg4 7.d4 exd4 8.Bxf7+ {?} Kf8 {? Black'\
'should have accepted. The sacrifice is unsound.} 9.Bd5 Nge7'\
'{9...Nf6 is better as after 10 h3 Black could play 10...Bxf3'\
'without the dangerous recapture by the White queen with a'\
'check. The text move enables White to finish with a killing'\
'attack.} 10.h3 Bh5 11.g4 Bg6 12.Ng5 {!} Qd7 13.Ne6+ Ke8'\
'14.Nxg7+ Kf8 15.Be6 Qd8 16.Bh6 Bxe4 17.Nh5+ Ke8 18.Nf6#'
game_40 = '1. e4 e5 2. f4 d5 3. exd5 Qxd5 4. Nc3 Qd8 5. fxe5 Bc5 6. Nf3 '\
'Bg4 7. Be2 Nc6 8. Ne4 Bb6 9. c3 Qd5 10. Qc2 Bf5 11. Nf6+ Nxf6 '\
'12. Qxf5 Ne7 13. Qg5 Ne4 14. Qxg7 Bf2+ 15. Kf1 Rg8 16. Qxh7 '\
'Bb6 17. d4 O-O-O 18. Qh6 Rh8 19. Qf4 Rdg8 20. Rg1 Ng6 21. Qe3 '\
'Re8 22. Qd3 Nh4 23. Nxh4 Rxh4 24. h3 c5 25. Bg4+ f5 26. exf6+ '\
'Rxg4 27. hxg4 Ng3+ 28. Qxg3 Qc4+ 29. Kf2 Qe2#'
game_41 = '1.d4 d5 2.c4 e6 3.Nc3 Nc6 4.Nf3 Nf6 5.Bf4 Bd6 6.Bg3 Ne4 7.e3 '\
'O-O 8.Bd3 f5 9.a3 b6 10.Rc1 Bb7 11.cxd5 exd5 12.Nxd5 Nxd4 '\
'13.Bc4 Nxf3+ 14.gxf3 Nxg3 15.Ne7+ Kh8 16.Ng6+ hxg6 17.hxg3+ '\
'Qh4 18.Rxh4#'
game_42 = '1. e4 e5 2. Nf3 Nc6 3. Bc4 Nf6 4. d4 exd4 5. O-O Nxe4 6. Re1 '\
'd5 7. Bxd5 Qxd5 8. Nc3 Qa5 9. Nxd4 Nxd4 10. Qxd4 f5 11. Bg5 '\
'Qc5 12. Qd8+ Kf7 13. Nxe4 fxe4 14. Rad1 Bd6 15. Qxh8 Qxg5 '\
'16. f4 Qh4 17. Rxe4 Bh3 18. Qxa8 Bc5+ 19. Kh1 Bxg2+ 20. Kxg2 '\
'Qg4+ 21. Kf1 Qf3+ 22. Ke1 Qf2#'
game_43 = '1. e4 c6 2. d4 d5 3. Nc3 dxe4 4. Nxe4 Nf6 5. Qd3 e5 6. dxe5 '\
'Qa5+ 7. Bd2 Qxe5 8. O-O-O Nxe4 9. Qd8+ Kxd8 10. Bg5+ Kc7 '\
'11. Bd8#'
game_44 = '1. f4 e5 2. fxe5 d6 3. exd6 Bxd6 4. Nf3 g5 5. h3 Bg3#'
game_45 = '1. f4 e5 2. fxe5 d6 3. exd6 Bxd6 4. Nf3 h5 5. g3 h4 6. Nxh4 '\
'Rxh4 7. gxh4 Qxh4#'
game_46 = '1.e4 e6 2.d4 d5 3.Nd2 h6 4.Bd3 c5 5.dxc5 Bxc5 6.Ngf3 Nc6 7.O-O '\
'Nge7 8.Qe2 O-O 9.Nb3 Bb6 10.c3 dxe4 11.Qxe4 Ng6 12.Bc4 Kh8 '\
'13.Qc2 Nce5 14.Nxe5 Nxe5 15.Be2 Qh4 16.g3 Qh3 17.Be3 Bxe3 '\
'18.fxe3 Ng4 19.Bxg4 Qxg4 20.Rad1 f6 21.Nd4 e5 22.Nf5 Be6 23.e4 '\
'Rfd8 24.Ne3 Qg6 25.Kg2 b5 26.b3 a5 27.c4 bxc4 28.bxc4 Qh5 '\
'29.h4 Bd7 30.Rf2 Bc6 31.Nd5 Rab8 32.Qe2 Qg6 33.Qf3 Rd7 34.Kh2 '\
'Rdb7 35.Rdd2 a4 36.Qe3 Bd7 37.Qf3 Bg4 38.Qe3 Be6 39.Qf3 Rb1 '\
'40.Ne3 Rc1 41.Rd6 Qf7 42.Rfd2 Rbb1 43.g4 Kh7 44.h5 Rc3 45.Kg2 '\
'Rxe3 46.Qxe3 Bxg4 47.Rb6 Ra1 48.Qc3 Re1 49.Rf2 Rxe4 50.c5 Bxh5 '\
'51.Rb4 Bg6 52.Kh2 Qe6 53.Rg2 Bf5 54.Rb7 Bg4 55.Rf2 f5 56.Rb4 '\
'Rxb4 57.Qxb4 e4 58.Qd4 e3 59.Rf1 Qxa2+ 60.Kg3 Qe2 61.Qf4 Qd2 '\
'62.Qe5 e2 63.Rg1 h5 64.c6 f4+ 65.Kh4 Qd8+ 66.Qg5 Qxg5+ 67.Kxg5 '\
'f3 68.c7 f2 69.Rxg4 f1Q 70.c8Q Qf6+ 71.Kxh5 Qh6#'
game_47 = '1.e4 e5 2.Nc3 Nf6 3.f4 d5 4.fxe5 Nxe4 5.Nf3 Bb4 6.Qe2 Bxc3 '\
'7.bxc3 Bg4 8.Qb5+ c6 9.Qxb7 Bxf3 10.Qxa8 Bxg2 11.Be2 Qh4+ '\
'12.Kd1 Nf2+ 13.Ke1 Nd3+ 14.Kd1 Qe1+ 15.Rxe1 Nf2#'
game_48 = '1. e4 e5 2. Nf3 Nc6 3. Bb5 Nf6 4. O-O Nxe4 5. Re1 Nd6 6. Nxe5 '\
'Be7 7. Bf1 O-O 8. d4 Nf5 9. c3 d5 10. Qd3 Re8 11. f4 Nd6 '\
'12. Re3 Na5 13. Nd2 Nf5 14. Rh3 Nh4 15. g4 Ng6 16. Rh5 Nc6 '\
'17. Ndc4 dxc4 18. Qxg6 hxg6 19. Nxg6 fxg6 20. Bxc4+ Kf8 '\
'21. Rh8#'
game_49 = '1. Nf3 Nc6 2. e4 e5 3. d4 exd4 4. c3 dxc3 5. Bc4 cxb2 6. Bxb2 '\
'Bb4+ 7. Nc3 Nf6 8. Qc2 O-O 9. O-O-O Re8 10. e5 Ng4 11. Nd5 a5 '\
'12. Nf6+ gxf6 13. Bxf7+ Kf8 14. Qxh7 Ngxe5 15. Nh4 Ne7 '\
'16. Bxe5 fxe5 17. Rd3 Ra6 18. Rg3 Ba3+ 19. Kd1 Ng6 20. Qg8+ '\
'Ke7 21. Nf5+ Kf6 22. Qxg6#'
legal_checkmates = [game_1, game_2, game_3, game_4, game_5, game_6, game_7, game_8, game_9,
game_10, game_11, game_12, game_13, game_14, game_15, game_16, game_17,
game_18, game_19, game_20, game_21, game_22, game_23, game_24, game_25,
game_26, game_27, game_28, game_29, game_30, game_31, game_32, game_33,
game_34, game_35, game_36, game_37, game_38, game_39, game_40, game_41,
game_42, game_43, game_44, game_45, game_46, game_47, game_48, game_49]
def strip_to_scripts(text):
new_text = text
new_text = re.sub(r'\{.*?\}', '', new_text)
new_text = re.sub(r'\d?\d\.\.?\.?', ' ', new_text)
moves = new_text.split()
white_script, black_script = [], []
while moves:
white_script.append(moves.pop(0))
if moves:
black_script.append(moves.pop(0))
return white_script, black_script
def test():
""" Misc test function - contains arbitrary code for testing """
board = Board()
# print(board)
player1, player2 = Random('Player1', 'White'), Random('Player2', 'Black')
play_a_game(board, player1, player2)
def test_checkmates(legal_checkmates):
""" Loads 49 games ending in checkmate into the play_a_game function """
for index, game in enumerate(legal_checkmates):
board = Board()
white_script, black_script = strip_to_scripts(game)
player1, player2 = Scripted('Player1', 'White', white_script), Scripted('Player2', 'Black', black_script)
board, player_of_last_move = play_a_game(board, player1, player2)
if board.checkmate: # game ended in checkmate
print(f'game {index + 1} PASSED!')
else: # game did not end in checkmate
print(f'game {index + 1} FAILED!')
""" TEST FUNCTIONS -------------------------------------------------------------------------------------- TEST FUNCTIONS
"""
def board_initialisations():
print(Board(STANDARD_GAME))
print(Board(KING_AND_PAWN_GAME))
print(Board(MINOR_GAME))
print(Board(MAJOR_GAME))
def san_validation_decomposition(san_test_strings, string_decompositions):
for i, elem in enumerate(san_test_strings):
print('\n' + elem + ' :')
try:
assert validate_san(elem)
except AssertionError:
print('san FAILED to validate')
else:
print('validation: PASSED')
decomposition = decompose_san(elem)
print(decomposition)
try:
assert decomposition == string_decompositions[i]
except AssertionError:
print('san FAILED to decompose')
print('got instead: ' + str(decomposition))
else:
print('decomposition: PASSED >>')
print(decomposition)
def coord_conversions(coords):
for elem in coords:
print('\n' + elem[0] + ' :')
conversion = convert_san2board(elem[0])
try:
assert conversion == elem[1]
except AssertionError:
print('san FAILED to convert')
print('got instead: ' + str(conversion))
else:
print('conversion: PASSED >>')
print(conversion)
for elem in coords:
print('\n' + str(elem[1]) + ' :')
conversion = convert_board2san(elem[1])
try:
assert conversion == elem[0]
except AssertionError:
print('san FAILED to convert')
print('got instead: ' + str(conversion))
else:
print('conversion: PASSED >>')
print(conversion)
def test_legal(coords):
for coord in coords:
print(coord)
a, b = coord
if legal(a, b):
print('LEGAL')
else:
print('NOT LEGAL!')
def repr_pieces(board):
for rank in board.squares:
for piece in rank:
if isinstance(piece, Piece):
print(repr(piece))
def what_checks(board, pieces):
positions = (0, 3, 4, 7)
for index, piece in enumerate(pieces):
type_ = piece[1]
board.squares[0][positions[index]] = eval(type_)(piece[0], board, (0, positions[index]))
print(board)
board.update_pieces('White')
return board.in_check()
def test_all_checks(board):
black_in_check = (('Black', 'King'), ('White', 'Queen'), ('White', 'King'))
white_in_check = (('White', 'King'), ('Black', 'Queen'), ('Black', 'King'))
both_in_check = (('White', 'King'), ('Black', 'Queen'), ('White', 'Queen'), ('Black', 'King'))
no_checks = (('White', 'King'), ('Black', 'King'))
print('BLACK_IN_CHECK has checks: ' + str(what_checks(board, black_in_check)))
board.clear()
print('WHITE_IN_CHECK has checks: ' + str(what_checks(board, white_in_check)))
board.clear()
print('BOTH_IN_CHECK has checks: ' + str(what_checks(board, both_in_check)))
board.clear()
print('NO_CHECKS has checks: ' + str(what_checks(board, no_checks)))
def run_through_full_game(series_of_legal_moves):
board = Board()
print(board)
print(series_of_legal_moves)
count = 2
for candidate in series_of_legal_moves:
print(f"{board.player_turn}'s turn!")
print(board)
board.update_pieces(board.player_turn)
if board.is_checkmate(board.player_turn):
winner, = set(COLOURS) - {board.player_turn}
print(f' CHECKMATE! {winner} wins!')
print('\n GAME OVER!\n')
else:
print('\n')
print(f'move : {candidate}')
active_piece, active_piece_type, target_location, is_capture, promotion_type,\
castle_direction = decompose_and_assess(board, candidate)
board.move(active_piece, active_piece_type, target_location, is_capture, promotion_type, castle_direction)
active_piece.has_moved = True
board.player_turn, = set(COLOURS) - {board.player_turn}
board.turn_num += 0.5
print(f'Turn count: {board.turn_num}')
print('\n' * 2)
def run_through_all_games():
_zip = zip(HENCH_LIST_OF_GAMES, GAME_LENGTHS)
for game, length in _zip:
series_of_legal_moves = game.split()
for i in range(1, length + 1):
series_of_legal_moves.remove(str(i) + '.')
run_through_full_game(series_of_legal_moves)
""" MAIN ---------------------------------------------------------------------------------------------------------- MAIN
"""
if __name__ == '__main__':
""" Primary imports and initialisation -----------------------------------------------------------------------------
"""
from Exceptions import *
from main import *
import copy
MAIN_VARIABLES()
COLOURS = ('White', 'Black')
""" Local imports and initialisation -----------------------------------------------------------------------------
"""
SAN_TEST_STRINGS = (
'e4', 'Nxf5', 'exd4', 'Rdf8', 'R1a3', 'Qh4e1',
'Qh4xe1+', 'e8Q', 'fxe8Q#', 'f8N', '0-0', '0-0-0',
)
STRING_DECOMPOSITIONS = (
('Pawn', '', False, 'e4', '', '', ''),
('Knight', '', True, 'f5', '', '', ''),
('Pawn', 'e', True, 'd4', '', '', ''),
('Rook', 'd', False, 'f8', '', '', ''),
('Rook', '1', False, 'a3', '', '', ''),
('Queen', 'h4', False, 'e1', '', '', ''),
('Queen', 'h4', True, 'e1', '', '+', ''),
('Pawn', '', False, 'e8', 'Queen', '', ''),
('Pawn', 'f', True, 'e8', 'Queen', '#', ''),
('Pawn', '', False, 'f8', 'Knight', '', ''),
('King', '', False, '', '', '', 'King'),
('King', '', False, '', '', '', 'Queen'),
)
COORDS = (
('e4', (4, 4)),
('a1', (7, 0)),
('a8', (0, 0)),
('h1', (7, 7)),
('h8', (0, 7)),
('d3', (5, 3))
)
LEGAL_TEST = [(x, 4) for x in range(-2, 10)] + [(4, x) for x in range(-2, 10)]
KASPAROV_TOPALOV_WIJKAANZEE_1999 = \
'1. e4 d6 2. d4 Nf6 3. Nc3 g6 4. Be3 Bg7 5. Qd2 c6 6. f3 b5 7. Nge2 Nbd7 ' \
'8. Bh6 Bxh6 9. Qxh6 Bb7 10. a3 e5 11. 0-0-0 Qe7 12. Kb1 a6 13. Nc1 0-0-0 14. Nb3 exd4 15. Rxd4 c5 ' \
'16. Rd1 Nb6 17. g3 Kb8 18. Na5 Ba8 19. Bh3 d5 20. Qf4+ Ka7 21. Rhe1 d4 22. Nd5 Nbxd5 23. exd5 Qd6 ' \
'24. Rxd4 cxd4 25. Re7+ Kb6 26. Qxd4+ Kxa5 27. b4+ Ka4 28. Qc3 Qxd5 29. Ra7 Bb7 30. Rxb7 Qc4 31. Qxf6 Kxa3 ' \
'32. Qxa6+ Kxb4 33. c3+ Kxc3 34. Qa1+ Kd2 35. Qb2+ Kd1 36. Bf1 Rd2 37. Rd7 Rxd7 38. Bxc4 bxc4 39. Qxh8 Rd3 ' \
'40. Qa8 c3 41. Qa4+ Ke1 42. f4 f5 43. Kc1 Rd2 44. Qa7'
MORPHY_DUKE_1858 = \
'1. e4 e5 2. Nf3 d6 3. d4 Bg4 4. dxe5 Bxf3 5. Qxf3 dxe5 6. Bc4 Nf6 7. Qb3 Qe7 8. ' \
'Nc3 c6 9. Bg5 b5 10. Nxb5 cxb5 11. Bxb5+ Nbd7 12. O-O-O Rd8 13. Rxd7 Rxd7 14. ' \
'Rd1 Qe6 15. Bxd7+ Nxd7 16. Qb8+ Nxb8 17. Rd8#'
ARONIAN_ANAND_WIJKAANZEE_2013 = \
'1. d4 d5 2. c4 c6 3. Nf3 Nf6 4. Nc3 e6 5. e3 Nbd7 6. Bd3 dxc4 7. Bxc4 b5 8. Bd3 ' \
'Bd6 9. O-O O-O 10. Qc2 Bb7 11. a3 Rc8 12. Ng5 c5 13. Nxh7 Ng4 14. f4 cxd4 15. ' \
'exd4 Bc5 16. Be2 Nde5 17. Bxg4 Bxd4+ 18. Kh1 Nxg4 19. Nxf8 f5 20. Ng6 Qf6 21. h3 ' \
'Qxg6 22. Qe2 Qh5 23. Qd3 Be3'
KARPOV_KASPAROV_WORLD_CH_1985 = \
'1. e4 c5 2. Nf3 e6 3. d4 cxd4 4. Nxd4 Nc6 5. Nb5 d6 6. c4 Nf6 7. N1c3 a6 8. Na3 ' \
'd5 9. cxd5 exd5 10. exd5 Nb4 11. Be2 Bc5 12. O-O O-O 13. Bf3 Bf5 14. Bg5 Re8 15. ' \
'Qd2 b5 16. Rad1 Nd3 17. Nab1 h6 18. Bh4 b4 19. Na4 Bd6 20. Bg3 Rc8 21. b3 g5 22. ' \
'Bxd6 Qxd6 23. g3 Nd7 24. Bg2 Qf6 25. a3 a5 26. axb4 axb4 27. Qa2 Bg6 28. d6 g4 ' \
'29. Qd2 Kg7 30. f3 Qxd6 31. fxg4 Qd4+ 32. Kh1 Nf6 33. Rf4 Ne4 34. Qxd3 Nf2+ 35. ' \
'Rxf2 Bxd3 36. Rfd2 Qe3 37. Rxd3 Rc1 38. Nb2 Qf2 39. Nd2 Rxd1+ 40. Nxd1 Re1+'
BYRNE_FISCHER_MEMORIAL_ROSENWALD_1956 = \
'1. Nf3 Nf6 2. c4 g6 3. Nc3 Bg7 4. d4 O-O 5. Bf4 d5 6. Qb3 dxc4 7. Qxc4 c6 8. e4 ' \
'Nbd7 9. Rd1 Nb6 10. Qc5 Bg4 11. Bg5 Na4 12. Qa3 Nxc3 13. bxc3 Nxe4 14. Bxe7 Qb6 ' \
'15. Bc4 Nxc3 16. Bc5 Rfe8+ 17. Kf1 Be6 18. Bxb6 Bxc4+ 19. Kg1 Ne2+ 20. Kf1 Nxd4+ ' \
'21. Kg1 Ne2+ 22. Kf1 Nc3+ 23. Kg1 axb6 24. Qb4 Ra4 25. Qxb6 Nxd1 26. h3 Rxa2 27. ' \
'Kh2 Nxf2 28. Re1 Rxe1 29. Qd8+ Bf8 30. Nxe1 Bd5 31. Nf3 Ne4 32. Qb8 b5 33. h4 h5 ' \
'34. Ne5 Kg7 35. Kg1 Bc5+ 36. Kf1 Ng3+ 37. Ke1 Bb4+ 38. Kd1 Bb3+ 39. Kc1 Ne2+ 40. ' \
'Kb1 Nc3+ 41. Kc1 Rc2#'
IVANCHUK_YUSUPOV_WORLD_CH_1991 = \
'1. c4 e5 2. g3 d6 3. Bg2 g6 4. d4 Nd7 5. Nc3 Bg7 6. Nf3 Ngf6 7. O-O O-O 8. Qc2 ' \
'Re8 9. Rd1 c6 10. b3 Qe7 11. Ba3 e4 12. Ng5 e3 13. f4 Nf8 14. b4 Bf5 15. Qb3 h6 ' \
'16. Nf3 Ng4 17. b5 g5 18. bxc6 bxc6 19. Ne5 gxf4 20. Nxc6 Qg5 21. Bxd6 Ng6 22. ' \
'Nd5 Qh5 23. h4 Nxh4 24. gxh4 Qxh4 25. Nde7+ Kh8 26. Nxf5 Qh2+ 27. Kf1 Re6 28. ' \
'Qb7 Rg6 29. Qxa8+ Kh7 30. Qg8+ Kxg8 31. Nce7+ Kh7 32. Nxg6 fxg6 33. Nxg7 Nf2 34. ' \
'Bxf4 Qxf4 35. Ne6 Qh2 36. Rdb1 Nh3 37. Rb7+ Kh8 38. Rb8+ Qxb8 39. Bxh3 Qg3'
SHORT_TIMMAN_TILBURG_1991 = \
'1. e4 Nf6 2. e5 Nd5 3. d4 d6 4. Nf3 g6 5. Bc4 Nb6 6. Bb3 Bg7 7. Qe2 Nc6 8. O-O ' \
'O-O 9. h3 a5 10. a4 dxe5 11. dxe5 Nd4 12. Nxd4 Qxd4 13. Re1 e6 14. Nd2 Nd5 15. ' \
'Nf3 Qc5 16. Qe4 Qb4 17. Bc4 Nb6 18. b3 Nxc4 19. bxc4 Re8 20. Rd1 Qc5 21. Qh4 b6 ' \
'22. Be3 Qc6 23. Bh6 Bh8 24. Rd8 Bb7 25. Rad1 Bg7 26. R8d7 Rf8 27. Bxg7 Kxg7 28. ' \
'R1d4 Rae8 29. Qf6+ Kg8 30. h4 h5 31. Kh2 Rc8 32. Kg3 Rce8 33. Kf4 Bc8 34. Kg5'
BAI_LIREN_CHINESE_CH_LEAGUE_2017 = \
'1. d4 Nf6 2. c4 e6 3. Nc3 Bb4 4. Nf3 O-O 5. Bg5 c5 6. e3 cxd4 7. Qxd4 Nc6 8. Qd3 ' \
'h6 9. Bh4 d5 10. Rd1 g5 11. Bg3 Ne4 12. Nd2 Nc5 13. Qc2 d4 14. Nf3 e5 15. Nxe5 ' \
'dxc3 16. Rxd8 cxb2+ 17. Ke2 Rxd8 18. Qxb2 Na4 19. Qc2 Nc3+ 20. Kf3 Rd4 21. h3 h5 ' \
'22. Bh2 g4+ 23. Kg3 Rd2 24. Qb3 Ne4+ 25. Kh4 Be7+ 26. Kxh5 Kg7 27. Bf4 Bf5 28. ' \
'Bh6+ Kh7 29. Qxb7 Rxf2 30. Bg5 Rh8 31. Nxf7 Bg6+ 32. Kxg4 Ne5+'
ROTLEVI_RUBINSTEIN_RUSSIAN_CH_1907 = \
'1. d4 d5 2. Nf3 e6 3. e3 c5 4. c4 Nc6 5. Nc3 Nf6 6. dxc5 Bxc5 7. a3 a6 8. b4 Bd6 ' \
'9. Bb2 O-O 10. Qd2 Qe7 11. Bd3 dxc4 12. Bxc4 b5 13. Bd3 Rd8 14. Qe2 Bb7 15. O-O ' \
'Ne5 16. Nxe5 Bxe5 17. f4 Bc7 18. e4 Rac8 19. e5 Bb6+ 20. Kh1 Ng4 21. Be4 Qh4 22. ' \
'g3 Rxc3 23. gxh4 Rd2 24. Qxd2 Bxe4+ 25. Qg2 Rh3'
GELLER_EUWE_CANDIDATES_ZURICH_1953 = \
'1. d4 Nf6 2. c4 e6 3. Nc3 Bb4 4. e3 c5 5. a3 Bxc3+ 6. bxc3 b6 7. Bd3 Bb7 8. f3 ' \
'Nc6 9. Ne2 O-O 10. O-O Na5 11. e4 Ne8 12. Ng3 cxd4 13. cxd4 Rc8 14. f4 Nxc4 15. ' \
'f5 f6 16. Rf4 b5 17. Rh4 Qb6 18. e5 Nxe5 19. fxe6 Nxd3 20. Qxd3 Qxe6 21. Qxh7+ ' \
'Kf7 22. Bh6 Rh8 23. Qxh8 Rc2 24. Rc1 Rxg2+ 25. Kf1 Qb3 26. Ke1 Qf3'
HENCH_LIST_OF_GAMES = [
KASPAROV_TOPALOV_WIJKAANZEE_1999,
MORPHY_DUKE_1858,
ARONIAN_ANAND_WIJKAANZEE_2013,
KARPOV_KASPAROV_WORLD_CH_1985,
BYRNE_FISCHER_MEMORIAL_ROSENWALD_1956,
IVANCHUK_YUSUPOV_WORLD_CH_1991,
SHORT_TIMMAN_TILBURG_1991,
BAI_LIREN_CHINESE_CH_LEAGUE_2017,
ROTLEVI_RUBINSTEIN_RUSSIAN_CH_1907,
GELLER_EUWE_CANDIDATES_ZURICH_1953,
]
GAME_LENGTHS = [44, 17, 23, 40, 41, 39, 34, 32, 25, 26]
""" Main -----------------------------------------------------------------------------------------------------------
"""
board = Board()
print(board)
# print(board.get_pieces('White', 'Pawn'))
# board_initialisations()
# san_validation_decomposition(SAN_TEST_STRINGS, STRING_DECOMPOSITIONS)
# coord_conversions(COORDS)
# repr_pieces(board)
# test_legal(LEGAL_TEST)
# print(board.get_pieces('White', 'King'))
# board.clear()
# test_all_checks(board)
run_through_all_games()
|
python
|
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import hashlib
import base64
from functools import lru_cache
from sawtooth_sdk.processor.context import StateEntry
from sawtooth_sdk.messaging.future import FutureTimeoutError
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.protobuf.transaction_pb2 import TransactionHeader
from sawtooth_settings.protobuf.settings_pb2 import SettingsPayload
from sawtooth_settings.protobuf.settings_pb2 import SettingProposal
from sawtooth_settings.protobuf.settings_pb2 import SettingVote
from sawtooth_settings.protobuf.settings_pb2 import SettingCandidate
from sawtooth_settings.protobuf.settings_pb2 import SettingCandidates
from sawtooth_settings.protobuf.setting_pb2 import Setting
LOGGER = logging.getLogger(__name__)
# The config namespace is special: it is not derived from a hash.
SETTINGS_NAMESPACE = '000000'
# Number of seconds to wait for state operations to succeed
STATE_TIMEOUT_SEC = 10
class SettingsTransactionHandler(object):
@property
def family_name(self):
return 'sawtooth_settings'
@property
def family_versions(self):
return ['1.0']
@property
def encodings(self):
return ['application/protobuf']
@property
def namespaces(self):
return [SETTINGS_NAMESPACE]
def apply(self, transaction, state):
txn_header = TransactionHeader()
txn_header.ParseFromString(transaction.header)
pubkey = txn_header.signer_pubkey
auth_keys = _get_auth_keys(state)
if auth_keys and pubkey not in auth_keys:
raise InvalidTransaction(
'{} is not authorized to change settings'.format(pubkey))
settings_payload = SettingsPayload()
settings_payload.ParseFromString(transaction.payload)
if settings_payload.action == SettingsPayload.PROPOSE:
return self._apply_proposal(
auth_keys, pubkey, settings_payload.data, state)
elif settings_payload.action == SettingsPayload.VOTE:
return self._apply_vote(pubkey, settings_payload.data,
auth_keys, state)
else:
raise InvalidTransaction(
"'action' must be one of {PROPOSE, VOTE} in 'Ballot' mode")
def _apply_proposal(self, auth_keys, pubkey, setting_proposal_data, state):
setting_proposal = SettingProposal()
setting_proposal.ParseFromString(setting_proposal_data)
proposal_id = hashlib.sha256(setting_proposal_data).hexdigest()
approval_threshold = _get_approval_threshold(state)
_validate_setting(auth_keys,
setting_proposal.setting,
setting_proposal.value)
if approval_threshold > 1:
setting_candidates = _get_setting_candidates(state)
existing_candidate = _first(
setting_candidates.candidates,
lambda candidate: candidate.proposal_id == proposal_id)
if existing_candidate is not None:
raise InvalidTransaction(
'Duplicate proposal for {}'.format(
setting_proposal.setting))
record = SettingCandidate.VoteRecord(
public_key=pubkey,
vote=SettingVote.ACCEPT)
setting_candidates.candidates.add(
proposal_id=proposal_id,
proposal=setting_proposal,
votes=[record]
)
LOGGER.debug('Proposal made to set %s to %s',
setting_proposal.setting,
setting_proposal.value)
_save_setting_candidates(state, setting_candidates)
else:
_set_setting_value(state,
setting_proposal.setting,
setting_proposal.value)
def _apply_vote(self, pubkey, settings_vote_data, authorized_keys, state):
settings_vote = SettingVote()
settings_vote.ParseFromString(settings_vote_data)
proposal_id = settings_vote.proposal_id
setting_candidates = _get_setting_candidates(state)
candidate = _first(
setting_candidates.candidates,
lambda candidate: candidate.proposal_id == proposal_id)
if candidate is None:
raise InvalidTransaction(
"Proposal {} does not exist.".format(proposal_id))
candidate_index = _index_of(setting_candidates.candidates, candidate)
approval_threshold = _get_approval_threshold(state)
vote_record = _first(candidate.votes,
lambda record: record.public_key == pubkey)
if vote_record is not None:
raise InvalidTransaction(
'{} has already voted'.format(pubkey))
candidate.votes.add(
public_key=pubkey,
vote=settings_vote.vote)
accepted_count = 0
rejected_count = 0
for vote_record in candidate.votes:
if vote_record.vote == SettingVote.ACCEPT:
accepted_count += 1
elif vote_record.vote == SettingVote.REJECT:
rejected_count += 1
if accepted_count >= approval_threshold:
_set_setting_value(state,
candidate.proposal.setting,
candidate.proposal.value)
del setting_candidates.candidates[candidate_index]
elif rejected_count >= approval_threshold or \
(rejected_count + accepted_count) == len(authorized_keys):
LOGGER.debug('Proposal for %s was rejected',
candidate.proposal.setting)
del setting_candidates.candidates[candidate_index]
else:
LOGGER.debug('Vote recorded for %s',
candidate.proposal.setting)
_save_setting_candidates(state, setting_candidates)
def _get_setting_candidates(state):
value = _get_setting_value(state, 'sawtooth.settings.vote.proposals')
if not value:
return SettingCandidates(candidates={})
setting_candidates = SettingCandidates()
setting_candidates.ParseFromString(base64.b64decode(value))
return setting_candidates
def _save_setting_candidates(state, setting_candidates):
_set_setting_value(state,
'sawtooth.settings.vote.proposals',
base64.b64encode(
setting_candidates.SerializeToString()))
def _get_approval_threshold(state):
return int(_get_setting_value(
state, 'sawtooth.settings.vote.approval_threshold', 1))
def _get_auth_keys(state):
value = _get_setting_value(
state, 'sawtooth.settings.vote.authorized_keys', '')
return _split_ignore_empties(value)
def _split_ignore_empties(value):
return [v.strip() for v in value.split(',') if v]
def _validate_setting(auth_keys, setting, value):
if not auth_keys and \
setting != 'sawtooth.settings.vote.authorized_keys':
raise InvalidTransaction(
'Cannot set {} until authorized_keys is set.'.format(setting))
if setting == 'sawtooth.settings.vote.authorized_keys':
if not _split_ignore_empties(value):
raise InvalidTransaction('authorized_keys must not be empty.')
if setting == 'sawtooth.settings.vote.approval_threshold':
threshold = None
try:
threshold = int(value)
except ValueError:
raise InvalidTransaction('approval_threshold must be an integer')
if threshold > len(auth_keys):
raise InvalidTransaction(
'approval_threshold must be less than or equal to number of '
'authorized_keys')
if setting == 'sawtooth.settings.vote.proposals':
raise InvalidTransaction(
'Setting sawtooth.settings.vote.proposals is read-only')
def _get_setting_value(state, key, default_value=None):
address = _make_settings_key(key)
setting = _get_setting_entry(state, address)
for entry in setting.entries:
if key == entry.key:
return entry.value
return default_value
def _set_setting_value(state, key, value):
address = _make_settings_key(key)
setting = _get_setting_entry(state, address)
old_value = None
old_entry_index = None
for i, entry in enumerate(setting.entries):
if key == entry.key:
old_value = entry.value
old_entry_index = i
if old_entry_index is not None:
setting.entries[old_entry_index].value = value
else:
setting.entries.add(key=key, value=value)
try:
addresses = list(state.set(
[StateEntry(address=address,
data=setting.SerializeToString())],
timeout=STATE_TIMEOUT_SEC))
except FutureTimeoutError:
LOGGER.warning(
'Timeout occured on state.set([%s, <value>])', address)
raise InternalError('Unable to set {}'.format(key))
if len(addresses) != 1:
LOGGER.warning(
'Failed to save value on address %s', address)
raise InternalError(
'Unable to save config value {}'.format(key))
if setting != 'sawtooth.settings.vote.proposals':
LOGGER.info('Setting setting %s changed from %s to %s',
key, old_value, value)
def _get_setting_entry(state, address):
setting = Setting()
try:
entries_list = state.get([address], timeout=STATE_TIMEOUT_SEC)
except FutureTimeoutError:
LOGGER.warning('Timeout occured on state.get([%s])', address)
raise InternalError('Unable to get {}'.format(address))
if entries_list:
setting.ParseFromString(entries_list[0].data)
return setting
def _to_hash(value):
return hashlib.sha256(value.encode()).hexdigest()
def _first(a_list, pred):
return next((x for x in a_list if pred(x)), None)
def _index_of(iterable, obj):
return next((i for i, x in enumerate(iterable) if x == obj), -1)
_MAX_KEY_PARTS = 4
_ADDRESS_PART_SIZE = 16
_EMPTY_PART = _to_hash('')[:_ADDRESS_PART_SIZE]
@lru_cache(maxsize=128)
def _make_settings_key(key):
# split the key into 4 parts, maximum
key_parts = key.split('.', maxsplit=_MAX_KEY_PARTS - 1)
# compute the short hash of each part
addr_parts = [_to_hash(x)[:_ADDRESS_PART_SIZE] for x in key_parts]
# pad the parts with the empty hash, if needed
addr_parts.extend([_EMPTY_PART] * (_MAX_KEY_PARTS - len(addr_parts)))
return SETTINGS_NAMESPACE + ''.join(addr_parts)
|
python
|
import os
from leapp.models import FirewalldGlobalConfig
def read_config():
default_conf = FirewalldGlobalConfig()
path = '/etc/firewalld/firewalld.conf'
if not os.path.exists(path):
return default_conf
conf_dict = {}
with open(path) as conf_file:
for line in conf_file:
(key, _unused, value) = line.partition('=')
if not value:
continue
value = value.lower().strip()
if value in ['yes', 'true']:
value = True
if value in ['no', 'false']:
value = False
# Only worry about config used by our Model
key = key.lower().strip()
if hasattr(default_conf, key):
conf_dict[key] = value
return FirewalldGlobalConfig(**conf_dict)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Ensure system commands and responses execute successfully."""
import pytest
from stepseries import commands, responses, step400
from tests.conftest import HardwareIncremental
@pytest.mark.skip_disconnected
class TestSpeedProfileSettings(HardwareIncremental):
def test_speed_profile(self, device: step400.STEP400) -> None:
device.set(commands.SetSpeedProfile(4, 5555, 4444, 3333))
resp = device.get(commands.GetSpeedProfile(4))
assert isinstance(resp, responses.SpeedProfile)
# TODO: Debug why the device returns values less than set here
# assert resp.acc == 5555
# assert resp.dec == 4444
# assert resp.maxSpeed == 3333
device.set(commands.SetSpeedProfile(4, 2000, 2000, 620))
def test_fullstep_speed(self, device: step400.STEP400) -> None:
device.set(commands.SetFullstepSpeed(3, 9206.46))
resp = device.get(commands.GetFullstepSpeed(3))
assert isinstance(resp, responses.FullstepSpeed)
# TODO: Debug why the device returns values less than set here
# assert resp.fullstepSpeed == 9206.46
device.set(commands.SetFullstepSpeed(3, 15625))
def test_set_max_speed(self, device: step400.STEP400) -> None:
device.set(commands.SetMaxSpeed(2, 1240))
def test_set_acc(self, device: step400.STEP400) -> None:
device.set(commands.SetAcc(1, 6002))
def test_set_dec(self, device: step400.STEP400) -> None:
device.set(commands.SetDec(4, 155))
def test_min_speed(self, device: step400.STEP400) -> None:
device.set(commands.SetMinSpeed(3, 9206.46))
resp = device.get(commands.GetMinSpeed(3))
assert isinstance(resp, responses.MinSpeed)
# TODO: Debug why the device returns values less than set here
# assert resp.minSpeed == 9206.46
device.set(commands.SetMinSpeed(3, 0))
def test_get_speed(self, device: step400.STEP400) -> None:
resp = device.get(commands.GetSpeed(2))
assert isinstance(resp, responses.Speed)
|
python
|
# -*- test-case-name: twisted.lore.test.test_slides -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Rudimentary slide support for Lore.
TODO:
- Complete mgp output target
- syntax highlighting
- saner font handling
- probably lots more
- Add HTML output targets
- one slides per page (with navigation links)
- all in one page
Example input file::
<html>
<head><title>Title of talk</title></head>
<body>
<h1>Title of talk</h1>
<h2>First Slide</h2>
<ul>
<li>Bullet point</li>
<li>Look ma, I'm <strong>bold</strong>!</li>
<li>... etc ...</li>
</ul>
<h2>Second Slide</h2>
<pre class="python">
# Sample code sample.
print "Hello, World!"
</pre>
</body>
</html>
"""
from xml.dom import minidom as dom
import os.path, re
from cStringIO import StringIO
from twisted.lore import default
from twisted.web import domhelpers
# These should be factored out
from twisted.lore.latex import BaseLatexSpitter, LatexSpitter, processFile
from twisted.lore.latex import getLatexText, HeadingLatexSpitter
from twisted.lore.tree import getHeaders, _removeLeadingTrailingBlankLines
from twisted.lore.tree import removeH1, fixAPI, fontifyPython
from twisted.lore.tree import addPyListings, addHTMLListings, setTitle
hacked_entities = { 'amp': ' &', 'gt': ' >', 'lt': ' <', 'quot': ' "',
'copy': ' (c)'}
entities = { 'amp': '&', 'gt': '>', 'lt': '<', 'quot': '"',
'copy': '(c)'}
class MagicpointOutput(BaseLatexSpitter):
bulletDepth = 0
def writeNodeData(self, node):
buf = StringIO()
getLatexText(node, buf.write, entities=hacked_entities)
data = buf.getvalue().rstrip().replace('\n', ' ')
self.writer(re.sub(' +', ' ', data))
def visitNode_title(self, node):
self.title = domhelpers.getNodeText(node)
def visitNode_body(self, node):
# Adapted from tree.generateToC
self.fontStack = [('standard', None)]
# Title slide
self.writer(self.start_h2)
self.writer(self.title)
self.writer(self.end_h2)
self.writer('%center\n\n\n\n\n')
for authorNode in domhelpers.findElementsWithAttribute(node, 'class', 'author'):
getLatexText(authorNode, self.writer, entities=entities)
self.writer('\n')
# Table of contents
self.writer(self.start_h2)
self.writer(self.title)
self.writer(self.end_h2)
for element in getHeaders(node):
level = int(element.tagName[1])-1
self.writer(level * '\t')
self.writer(domhelpers.getNodeText(element))
self.writer('\n')
self.visitNodeDefault(node)
def visitNode_div_author(self, node):
# Skip this node; it's already been used by visitNode_body
pass
def visitNode_div_pause(self, node):
self.writer('%pause\n')
def visitNode_pre(self, node):
"""
Writes Latex block using the 'typewriter' font when it encounters a
I{pre} element.
@param node: The element to process.
@type node: L{xml.dom.minidom.Element}
"""
# TODO: Syntax highlighting
buf = StringIO()
getLatexText(node, buf.write, entities=entities)
data = buf.getvalue()
data = _removeLeadingTrailingBlankLines(data)
lines = data.split('\n')
self.fontStack.append(('typewriter', 4))
self.writer('%' + self.fontName() + '\n')
for line in lines:
self.writer(' ' + line + '\n')
del self.fontStack[-1]
self.writer('%' + self.fontName() + '\n')
def visitNode_ul(self, node):
if self.bulletDepth > 0:
self.writer(self._start_ul)
self.bulletDepth += 1
self.start_li = self._start_li * self.bulletDepth
self.visitNodeDefault(node)
self.bulletDepth -= 1
self.start_li = self._start_li * self.bulletDepth
def visitNode_strong(self, node):
self.doFont(node, 'bold')
def visitNode_em(self, node):
self.doFont(node, 'italic')
def visitNode_code(self, node):
self.doFont(node, 'typewriter')
def doFont(self, node, style):
self.fontStack.append((style, None))
self.writer(' \n%cont, ' + self.fontName() + '\n')
self.visitNodeDefault(node)
del self.fontStack[-1]
self.writer('\n%cont, ' + self.fontName() + '\n')
def fontName(self):
names = [x[0] for x in self.fontStack]
if 'typewriter' in names:
name = 'typewriter'
else:
name = ''
if 'bold' in names:
name += 'bold'
if 'italic' in names:
name += 'italic'
if name == '':
name = 'standard'
sizes = [x[1] for x in self.fontStack]
sizes.reverse()
for size in sizes:
if size:
return 'font "%s", size %d' % (name, size)
return 'font "%s"' % name
start_h2 = "%page\n\n"
end_h2 = '\n\n\n'
_start_ul = '\n'
_start_li = "\t"
end_li = "\n"
def convertFile(filename, outputter, template, ext=".mgp"):
fout = open(os.path.splitext(filename)[0]+ext, 'w')
fout.write(open(template).read())
spitter = outputter(fout.write, os.path.dirname(filename), filename)
fin = open(filename)
processFile(spitter, fin)
fin.close()
fout.close()
# HTML DOM tree stuff
def splitIntoSlides(document):
body = domhelpers.findNodesNamed(document, 'body')[0]
slides = []
slide = []
title = '(unset)'
for child in body.childNodes:
if isinstance(child, dom.Element) and child.tagName == 'h2':
if slide:
slides.append((title, slide))
slide = []
title = domhelpers.getNodeText(child)
else:
slide.append(child)
slides.append((title, slide))
return slides
def insertPrevNextLinks(slides, filename, ext):
for slide in slides:
for name, offset in (("previous", -1), ("next", +1)):
if (slide.pos > 0 and name == "previous") or \
(slide.pos < len(slides)-1 and name == "next"):
for node in domhelpers.findElementsWithAttribute(slide.dom, "class", name):
if node.tagName == 'a':
node.setAttribute('href', '%s-%d%s'
% (filename[0], slide.pos+offset, ext))
else:
text = dom.Text()
text.data = slides[slide.pos+offset].title
node.appendChild(text)
else:
for node in domhelpers.findElementsWithAttribute(slide.dom, "class", name):
pos = 0
for child in node.parentNode.childNodes:
if child is node:
del node.parentNode.childNodes[pos]
break
pos += 1
class HTMLSlide:
def __init__(self, dom, title, pos):
self.dom = dom
self.title = title
self.pos = pos
def munge(document, template, linkrel, d, fullpath, ext, url, config):
# FIXME: This has *way* to much duplicated crap in common with tree.munge
#fixRelativeLinks(template, linkrel)
removeH1(document)
fixAPI(document, url)
fontifyPython(document)
addPyListings(document, d)
addHTMLListings(document, d)
#fixLinks(document, ext)
#putInToC(template, generateToC(document))
template = template.cloneNode(1)
# Insert the slides into the template
slides = []
pos = 0
for title, slide in splitIntoSlides(document):
t = template.cloneNode(1)
text = dom.Text()
text.data = title
setTitle(t, [text])
tmplbody = domhelpers.findElementsWithAttribute(t, "class", "body")[0]
tmplbody.childNodes = slide
tmplbody.setAttribute("class", "content")
# FIXME: Next/Prev links
# FIXME: Perhaps there should be a "Template" class? (setTitle/setBody
# could be methods...)
slides.append(HTMLSlide(t, title, pos))
pos += 1
insertPrevNextLinks(slides, os.path.splitext(os.path.basename(fullpath)), ext)
return slides
from tree import makeSureDirectoryExists
def getOutputFileName(originalFileName, outputExtension, index):
return os.path.splitext(originalFileName)[0]+'-'+str(index) + outputExtension
def doFile(filename, linkrel, ext, url, templ, options={}, outfileGenerator=getOutputFileName):
from tree import parseFileAndReport
doc = parseFileAndReport(filename)
slides = munge(doc, templ, linkrel, os.path.dirname(filename), filename, ext, url, options)
for slide, index in zip(slides, range(len(slides))):
newFilename = outfileGenerator(filename, ext, index)
makeSureDirectoryExists(newFilename)
f = open(newFilename, 'wb')
slide.dom.writexml(f)
f.close()
# Prosper output
class ProsperSlides(LatexSpitter):
firstSlide = 1
start_html = '\\documentclass[ps]{prosper}\n'
start_body = '\\begin{document}\n'
start_div_author = '\\author{'
end_div_author = '}'
def visitNode_h2(self, node):
if self.firstSlide:
self.firstSlide = 0
self.end_body = '\\end{slide}\n\n' + self.end_body
else:
self.writer('\\end{slide}\n\n')
self.writer('\\begin{slide}{')
spitter = HeadingLatexSpitter(self.writer, self.currDir, self.filename)
spitter.visitNodeDefault(node)
self.writer('}')
def _write_img(self, target):
self.writer('\\begin{center}\\includegraphics[%%\nwidth=1.0\n\\textwidth,'
'height=1.0\\textheight,\nkeepaspectratio]{%s}\\end{center}\n' % target)
class PagebreakLatex(LatexSpitter):
everyN = 1
currentN = 0
seenH2 = 0
start_html = LatexSpitter.start_html+"\\date{}\n"
start_body = '\\begin{document}\n\n'
def visitNode_h2(self, node):
if not self.seenH2:
self.currentN = 0
self.seenH2 = 1
else:
self.currentN += 1
self.currentN %= self.everyN
if not self.currentN:
self.writer('\\clearpage\n')
level = (int(node.tagName[1])-2)+self.baseLevel
self.writer('\n\n\\'+level*'sub'+'section*{')
spitter = HeadingLatexSpitter(self.writer, self.currDir, self.filename)
spitter.visitNodeDefault(node)
self.writer('}\n')
class TwoPagebreakLatex(PagebreakLatex):
everyN = 2
class SlidesProcessingFunctionFactory(default.ProcessingFunctionFactory):
latexSpitters = default.ProcessingFunctionFactory.latexSpitters.copy()
latexSpitters['prosper'] = ProsperSlides
latexSpitters['page'] = PagebreakLatex
latexSpitters['twopage'] = TwoPagebreakLatex
def getDoFile(self):
return doFile
def generate_mgp(self, d, fileNameGenerator=None):
template = d.get('template', 'template.mgp')
df = lambda file, linkrel: convertFile(file, MagicpointOutput, template, ext=".mgp")
return df
factory=SlidesProcessingFunctionFactory()
|
python
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@File : __init__.py
Description :
@Author : pchaos
tradedate: 2018-4-7
-------------------------------------------------
Change Activity:
2018-4-7:
@Contact : p19992003#gmail.com
-------------------------------------------------
"""
__author__ = 'pchaos'
__version__ = "0.0.2"
|
python
|
# Create a list of strings: mutants
mutants = ['charles xavier',
'bobby drake',
'kurt wagner',
'max eisenhardt',
'kitty pryde']
# Create a list of tuples: mutant_list
mutant_list = list(enumerate(mutants))
# Print the list of tuples
print(mutant_list)
# Unpack and print the tuple pairs
for index1, value1 in list(enumerate(mutants)):
print(index1, value1)
# Change the start index
for index2, value2 in list(enumerate(mutants, start=1)):
print(index2, value2)
|
python
|
import abc
import dataclasses
import datetime
from typing import Callable, List, Optional
from xlab.base import time
from xlab.data.proto import data_entry_pb2, data_type_pb2
class DataStore(abc.ABC):
# Add a single data entry to the store. No exception is thrown if the
# operation is successful.
@abc.abstractmethod
def add(self, data_entry: data_entry_pb2.DataEntry):
pass
# Fields for retrieving data entries.
@dataclasses.dataclass(frozen=True)
class LookupKey:
data_space: int = 0 # Prot Enum data_entry_pb2.DataEntry.DataSpace
symbol: Optional[str] = None
data_type: int = 0 # Proto Enum data_type_pb2.DataType.Enum
timestamp: Optional[time.Time] = None
# Read a single data entry by a key. If the data entry is not found, throw
# an exception (instead of returns None).
# All fields of the LookupKey must be specified.
@abc.abstractmethod
def read(self, lookup_key: LookupKey) -> data_entry_pb2.DataEntry:
pass
# Read zero or more data entries matching the lookup key.
@abc.abstractmethod
def lookup(self, lookup_key: LookupKey) -> data_entry_pb2.DataEntries:
pass
# Go through all data entries, and apply |fn| to each of them.
@abc.abstractmethod
def each(self, fn: Callable[[data_entry_pb2.DataEntry], None]):
pass
|
python
|
import subprocess;
from Thesis.util.ycsbCommands.Commands import getLoadCommand;
from Thesis.util.ycsbCommands.Commands import getRunCommand;
from Thesis.util.util import checkExitCodeOfProcess;
class Cluster(object):
def __init__(self, normalBinding, consistencyBinding, nodesInCluster):
self.__normalbinding = normalBinding;
self.__consistencyBinding = consistencyBinding;
self.__nodesInCluster = nodesInCluster;
def getNormalBinding(self):
return self.__normalbinding;
def getConsistencyBinding(self):
return self.__consistencyBinding;
def getNodesInCluster(self):
return list(self.__nodesInCluster);
# Should be overriden in subclasses
def deleteDataInCluster(self):
pass;
def writeNormalWorkloadFile(self, remoteYcsbNodes, pathForWorkloadFile):
dataToWrite = 'recordcount=10000\n' + \
'operationcount=10000000\n' + \
"""workload=com.yahoo.ycsb.workloads.CoreWorkload
readallfields=true
readproportion=0.4
updateproportion=0.25
scanproportion=0.1
insertproportion=0.25
scanlengthdistribution=uniform
maxscanlength=100
requestdistribution=zipfian
hosts=""" + ",".join(self.getNodesInCluster());
self.writeFileToYcsbNodes(dataToWrite, remoteYcsbNodes, pathForWorkloadFile, pathForWorkloadFile);
def writeConsistencyWorkloadFile(self, remoteYcsbNodes, pathForWorkloadFile):
dataToWrite = """recordcount=10000
operationcount=10000000
workload=com.yahoo.ycsb.workloads.CoreWorkload
readallfields=true
readproportion=0.4
updateproportion=0.25
scanproportion=0.1
insertproportion=0.25
scanlengthdistribution=uniform
maxscanlength=100
requestdistribution=zipfian
starttime=10000
consistencyTest=True
useFixedOperationDistributionSeed=True
operationDistributionSeed=46732463246
readProportionConsistencyCheck=0.5
updateProportionConsistencyCheck=0.5
hosts=""" + ",".join(self.getNodesInCluster());
self.writeFileToYcsbNodes(dataToWrite, remoteYcsbNodes, pathForWorkloadFile, pathForWorkloadFile);
def writeFileToYcsbNodes(self, dataToWrite, remoteYcsbNodes, localPath, remotePath):
f = open(localPath, "w");
f.write(dataToWrite);
f.close();
for ip in remoteYcsbNodes:
exitCode = subprocess.call(['scp', localPath, 'root@' + ip + ':' + remotePath]);
checkExitCodeOfProcess(exitCode, 'Writing workload file to remote YCSB nodes failed');
def getLoadCommand(self, pathToWorkloadFile, extraParameters = []):
return getLoadCommand(self.getNormalBinding(), pathToWorkloadFile, extraParameters);
def getRunCommand(self, pathToWorkloadFile, runtimeBenchmarkInMinutes, amountOfThreads, extraParameters = []):
return getRunCommand(self.getNormalBinding(), pathToWorkloadFile, runtimeBenchmarkInMinutes, amountOfThreads, extraParameters);
def getConsistencyRunCommand(self, pathToWorkloadFile, pathConsistencyResult, runtimeBenchmarkInSeconds, amountOfThreads, extraParameters = []):
extraParameters.extend(['-p', 'resultfile=' + pathConsistencyResult]);
return getRunCommand(self.getConsistencyBinding(), pathToWorkloadFile, runtimeBenchmarkInSeconds, amountOfThreads, extraParameters);
def removeNode(self, ipNodeToRemove):
result = self.doRemoveNode(ipNodeToRemove);
self.__nodesInCluster.remove(ipNodeToRemove);
return result;
def addNode(self, ipNodeToAdd):
self.doAddNode(ipNodeToAdd);
self.__nodesInCluster.append(ipNodeToAdd);
# Should be overriden in subclasses
def stopNode(self, ipNodeToStop):
pass;
# Should be overriden in subclasses
def startNode(self, ipNodeToStart):
pass;
def getOtherIpInCluster(self, ip):
for otherIp in self.__nodesInCluster:
if otherIp != ip:
return otherIp;
raise Exception('No other ip found in cluster');
|
python
|
from .KickerSystem import KickerSystem
|
python
|
import pygame, sys
from pygame.locals import *
from camera import *
def event_system(width, height):
keys = pygame.key.get_pressed()
is_fs = False
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if keys[pygame.K_ESCAPE]:
pygame.quit()
if event.type == VIDEORESIZE:
window = pygame.display.set_mode((event.w, event.h), pygame.RESIZABLE)
if keys[pygame.K_F11]:
is_fs = not is_fs
if is_fs == True:
window = pygame.display.set_mode((width, height), pygame.FULLSCREEN)
else:
window = pygame.display.set_mode((width, height), pygame.RESIZABLE)
class game_base:
def render_window(self, window, dt, layer_data_1, layer_data_2):
keys = pygame.key.get_pressed()
camera_differing = [0.05*dt,0.05*dt,0.1*dt,0.15*dt,0.2*dt,0.25*dt,0.3*dt]
for layer in layer_data_1:
render_layer = layer_data_1[layer]
render_layer.draw(window)
render_layer.update()
update_frame(layer_data_2[0],window)
update_frame(layer_data_2[1],window)
|
python
|
"""
This file handles their access credentials and tokens for various APIs required
to retrieve data for the website.
The file that contains the credentials is called "vault.zip", and is referenced
by a constant, `VAULT_FILE`. This file is accessed using a password stored in
the config called `VAULT_PASSWORD`.
Inside the "vault.zip" file, there is a file named "keys.yml." And this is the
file with all the credentials (plus a Flask secret key). It looks like this:
flask:
secret_key: "<key is here>"
hobolink:
password: "<password is here>"
user: "crwa"
token: "<token is here>"
"""
import os
import zipfile
import yaml
from distutils.util import strtobool
from flask import current_app
from flagging_site.config import VAULT_FILE
def get_keys() -> dict:
"""Retrieves the keys from the `current_app` if it exists. If not, then this
function tries to load directly from the vault. The reason this function
exists is so that you can use the API wrappers regardless of whether or not
the Flask app is running.
Note that this function requires that you assign the vault password to the
environmental variable named `VAULT_PASSWORD`.
Returns:
The full keys dict.
"""
if current_app:
d = current_app.config['KEYS']
else:
vault_file = os.environ.get('VAULT_FILE') or VAULT_FILE
d = load_keys_from_vault(vault_password=os.environ['VAULT_PASSWORD'],
vault_file=vault_file)
return d.copy()
def load_keys_from_vault(
vault_password: str,
vault_file: str = VAULT_FILE
) -> dict:
"""This code loads the keys directly from the vault zip file. Users should
preference using the `get_keys()` function over this function.
Args:
vault_password: (str) Password for opening up the `vault_file`.
vault_file: (str) File path of the zip file containing `keys.yml`.
Returns:
Dict of credentials.
"""
pwd = bytes(vault_password, 'utf-8')
with zipfile.ZipFile(vault_file) as f:
with f.open('keys.yml', pwd=pwd, mode='r') as keys_file:
d = yaml.load(keys_file, Loader=yaml.BaseLoader)
return d
def offline_mode() -> bool:
if current_app:
return current_app.config['OFFLINE_MODE']
else:
return bool(strtobool(os.environ.get('OFFLINE_MODE', 'false')))
def get_data_store_file_path(file_name: str) -> str:
if current_app:
return os.path.join(current_app.config['DATA_STORE'], file_name)
else:
from ..config import DATA_STORE
return os.path.join(DATA_STORE, file_name)
|
python
|
import streamlit as st
container = st.container()
container.write("This is inside the container")
st.write("This is outside the container")
# Now insert some more in the container
container.write("This is inside too")
|
python
|
#!/usr/bin/env python
# Usage: ./twitget.py twitget.ini
import configparser
import time
import sys
from pymongo import MongoClient
import TwitterSearch
import requests
import logging
logging.basicConfig(level=logging.DEBUG)
DEFAULT_INTERVAL = 15 * 60
DEFAULT_LANGUAGE = 'en'
def get_tweets(tweets_col, config):
try:
newest_tweet = tweets_col.find_one({}, {'id': True}, sort=[('id', -1)])
if newest_tweet is None:
newest_id = int(config['query']['default_since_id'])
else:
newest_id = newest_tweet['id']
tso = TwitterSearch.TwitterSearchOrder()
tso.set_keywords(config['query']['keywords'].split(','), bool(config['query']['or']))
tso.set_language(DEFAULT_LANGUAGE)
tso.set_include_entities(False)
tso.set_since_id(newest_id)
ts = TwitterSearch.TwitterSearch(consumer_key=config['auth']['consumer_key'],
consumer_secret=config['auth']['consumer_secret'],
access_token=config['auth']['access_token'],
access_token_secret=config['auth']['access_token_secret'],
verify=True)
tweets_col.insert_many(ts.search_tweets_iterable(tso))
except TwitterSearch.TwitterSearchException as e:
print(e)
def init_config(config_path):
config = configparser.ConfigParser()
config.read(config_path)
if not config.has_option('query', 'interval'):
config.set('query', 'interval', DEFAULT_INTERVAL)
return config
if __name__ == '__main__':
config = init_config(sys.argv[1])
client = MongoClient(config['db']['host'], int(config['db']['port']))
db = client.twit
while True:
get_tweets(db.tweets, config)
time.sleep(int(config['query']['interval']))
|
python
|
from scrapers.memphis_council_calendar_scraper import MemphisCouncilCalScraper
from . import utils
def test_get_docs_from_page():
memphis_scraper = MemphisCouncilCalScraper()
page_str = open(utils.get_abs_filename(
'memphis-city-council-calendar.html'), 'r').read()
docs = memphis_scraper._get_docs_from_calendar(page_str)
assert len(docs) == 47
for doc in docs:
# All URLs should be absolute.
assert doc.url.startswith("http://")
|
python
|
from exquiro.parsers.package_diagram_parser import PackageDiagramParser
from exquiro.models.package_diagram.package_node import PackageNode
from exquiro.models.package_diagram.package_relation import PackageRelation
from lxml import etree
import uuid
class OpenPonkPackageDiagramParser(PackageDiagramParser):
def parse_nodes(self, model, namespaces):
try:
m_nodes = self.parse_packages(model, namespaces)
except AttributeError as exc:
raise exc
except Exception as exc:
raise Exception("Corrupted model in source file") from exc
return m_nodes
def parse_relations(self, model, namespaces):
try:
packages = self.get_packages(model, namespaces)
m_relations = self.parse_imports(packages, namespaces)
m_relations.update(self.parse_member_packages(packages, namespaces))
except AttributeError as exc:
raise exc
except Exception as exc:
raise Exception("Corrupted model in source file") from exc
return m_relations
def parse_id(self, model, namespaces):
return str(uuid.uuid4())
def parse_packages(self, model, namespaces):
m_packages = set()
packages = self.get_packages(model, namespaces)
for package in packages:
m_packages.add(self.parse_package(package, namespaces))
return m_packages
@staticmethod
def parse_package(package, namespaces):
try:
node_id = package.attrib["{" + namespaces['xmi'] + "}" + "id"]
node_name = package.attrib["name"]
node_class = "Package"
if 'visibility' in package.attrib:
node_visibility = package.attrib["visibility"]
else:
node_visibility = "public"
except Exception as exc:
raise AttributeError("Corrupted package node in source file") from exc
return PackageNode(node_name, node_id, node_class, node_visibility)
def parse_imports(self, packages, namespaces):
m_imports = set()
for package in packages:
children = list(set(package.findall('.//packageImport[@xmi:type="uml:PackageImport"]', namespaces)) &
set(package.getchildren()))
for child in children:
m_imports.add(self.parse_import(child, package, namespaces))
return m_imports
@staticmethod
def parse_import(package_import, package, namespaces):
try:
import_id = package_import.attrib["{" + namespaces['xmi'] + "}" + "id"]
import_target = package_import.find(".//importedPackage").attrib["{" + namespaces['xmi'] + "}" + "idref"]
import_source = package.attrib["{" + namespaces['xmi'] + "}" + "id"]
import_type = "PackageImport"
except Exception as exc:
raise AttributeError("Corrupted import relation in source file") from exc
return PackageRelation(import_id, import_source, import_target, import_type)
def parse_member_packages(self, packages, namespaces):
m_members_of = set()
for package in packages:
children = package.getchildren()
for child in children:
if self.is_package(child, namespaces):
m_members_of.add(self.parse_member_package(child, package, namespaces))
return m_members_of
@staticmethod
def parse_member_package(member_package, package, namespaces):
try:
# generate universal unique id
member_id = str(uuid.uuid4())
member_source = member_package.attrib["{" + namespaces['xmi'] + "}" + "id"]
member_target = package.attrib["{" + namespaces['xmi'] + "}" + "id"]
member_type = "MemberOf"
except Exception as exc:
raise AttributeError("Corrupted package node or one of his child package node in source file") from exc
return PackageRelation(member_id, member_source, member_target, member_type)
def get_model(self, file_name, namespaces):
return etree.parse(file_name).getroot().find('uml:Package', namespaces)
@staticmethod
def get_packages(model, namespaces):
packages = model.findall('.//packagedElement[@xmi:type="uml:Package"]', namespaces)
packages.extend(model.findall('.//packagedElement[@xmi:type="uml:Model"]', namespaces))
packages.append(model)
return packages
@staticmethod
def is_package(element, namespaces):
if "{" + namespaces['xmi'] + "}" + "type" not in element.attrib:
return False
el_type = element.attrib["{" + namespaces['xmi'] + "}" + "type"]
return el_type == "uml:Package" or el_type == "uml:Model"
|
python
|
import os
import numpy as np
import pandas as pd
import rampwf as rw
from rampwf.score_types.base import BaseScoreType
class Mechanics(object):
def __init__(self, workflow_element_names=[
'feature_extractor', 'classifier', 'regressor']):
self.element_names = workflow_element_names
self.feature_extractor_workflow = rw.workflows.FeatureExtractor(
[self.element_names[0]])
self.classifier_workflow = rw.workflows.Classifier(
[self.element_names[1]])
self.regressor_workflow = rw.workflows.Regressor(
[self.element_names[2]])
def train_submission(self, module_path, X_df, y_array, train_is=None):
if train_is is None:
train_is = slice(None, None, None)
# Avoid setting with copy warning
X_train_df = X_df.iloc[train_is].copy()
y_train_array = y_array[train_is]
fe = self.feature_extractor_workflow.train_submission(
module_path, X_train_df, y_train_array)
X_train_array = self.feature_extractor_workflow.test_submission(
fe, X_train_df)
y_train_clf_array = y_train_array[:, 0]
clf = self.classifier_workflow.train_submission(
module_path, X_train_array, y_train_clf_array)
y_train_reg_array = y_train_array[:, 1].astype(float)
# Concatenating ground truth y_proba (one-hot, derived from labels)
# to X_train_df.
# This makes it vulnerable to training sets that don't contain
# all the classes. So better to use it with stratified CV.
labels = np.unique(y_array[:, 0])
label_array = np.zeros((len(y_train_clf_array), len(labels)))
for i, label in enumerate(labels):
label_array[:, i] = (y_train_clf_array == label)
X_train_array = np.hstack([X_train_array, label_array])
reg = self.regressor_workflow.train_submission(
module_path, X_train_array, y_train_reg_array)
# It's a bit ugly that we return the labels here, but I don't see
# a better solution
return labels, fe, clf, reg
def test_submission(self, trained_model, X_df):
labels, fe, clf, reg = trained_model
X_test_array = self.feature_extractor_workflow.test_submission(
fe, X_df)
y_proba_clf = self.classifier_workflow.test_submission(
clf, X_test_array)
label_array = np.zeros((len(y_proba_clf), len(labels)))
for i, label in enumerate(labels):
label_array[:, i] = y_proba_clf[:, i]
X_test_array = np.hstack([X_test_array, label_array])
y_pred_reg = self.regressor_workflow.\
test_submission(reg, X_test_array)
return np.concatenate([y_proba_clf, y_pred_reg.reshape(-1, 1)], axis=1)
problem_title = \
'Mechanics classification'
_train = 'train.csv'
_test = 'test.csv'
quick_mode = os.getenv('RAMP_TEST_MODE', 0)
if(quick_mode):
_train = 'train_small.csv'
_test = 'test_small.csv'
# Need better error messages for invalid input parameters
_debug_time_series = False
# label names for the classification target
_prediction_label_names = ['A', 'B', 'C', 'D', 'E']
# the regression target column
_target_column_name_clf = 'system'
# the classification target column
_target_column_name_reg = 'future'
# The first four columns of y_pred will be wrapped in multiclass Predictions.
Predictions_1 = rw.prediction_types.make_multiclass(
label_names=_prediction_label_names)
# The last column of y_pred will be wrapped in regression Predictions.
# We make a 2D but single-column y_pred (instead of a classical 1D y_pred)
# to make handling the combined 2D y_pred array easier.
Predictions_2 = rw.prediction_types.make_regression(
label_names=[_target_column_name_reg])
# The combined Predictions is initalized by the list of individual Predictions.
Predictions = rw.prediction_types.make_combined([Predictions_1, Predictions_2])
# The workflow object, named after the RAMP.
# workflow = rw.workflows.Mechanics()
workflow = Mechanics()
# The first score will be applied on the first Predictions
score_type_1 = rw.score_types.ClassificationError(name='err', precision=3)
# The second score will be applied on the second Predictions
class CyclicRMSE(BaseScoreType):
is_lower_the_better = True
minimum = 0.0
maximum = float('inf')
def __init__(self, name='rmse', precision=2, periodicity=-1):
self.name = name
self.precision = precision
self.periodicity = periodicity
def __call__(self, y_true, y_pred):
d = y_true - y_pred
if(self.periodicity > 0):
for i, er in enumerate(d):
d[i] = min(np.mod(er, self.periodicity),
np.mod(-er, self.periodicity))
return np.sqrt(np.mean(np.square(d)))
score_type_2 = CyclicRMSE(name='rmse', precision=3,
periodicity=2 * np.pi)
score_types = [
# The official score combines the two scores with weights 2/3 and 1/3.
# To let the score type know that it should be applied on the first
# Predictions of the combined Predictions' prediction_list, we wrap
# it into a special MakeCombined score with index 0
rw.score_types.Combined(
name='combined', score_types=[score_type_1, score_type_2],
weights=[0.1, 0.9], precision=3),
rw.score_types.MakeCombined(score_type=score_type_1, index=0),
rw.score_types.MakeCombined(score_type=score_type_2, index=1),
]
# CV implemented here:
def get_cv(X, y):
unique_replicates = np.unique(X['distribution'])
r = np.arange(len(X))
for replicate in unique_replicates:
train_is = r[(X['distribution'] != replicate).values]
test_is = r[(X['distribution'] == replicate).values]
yield train_is, test_is
# Both train and test targets are stripped off the first
# n_burn_in entries
def _read_data(path, filename):
input_df = pd.read_csv(os.path.join(path, 'data', filename)).loc[::1]
data_df = input_df.drop(['future', 'system'], axis=1)
y_reg_array = input_df[_target_column_name_reg].values.reshape(-1, 1)
y_clf_array = input_df[_target_column_name_clf].values.reshape(-1, 1)
y_array = np.concatenate((y_clf_array,
y_reg_array), axis=1)
return data_df, y_array
def get_train_data(path='.'):
return _read_data(path, _train)
def get_test_data(path='.'):
return _read_data(path, _test)
|
python
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""AdamWeightDecay op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
adam_weight_decay_op_info = CpuRegOp("AdamWeightDecay") \
.input(0, "var", "required") \
.input(1, "m", "required") \
.input(2, "v", "required") \
.input(3, "lr", "required") \
.input(4, "beta1", "required") \
.input(5, "beta2", "required") \
.input(6, "epsilon", "required") \
.input(7, "decay", "required") \
.input(8, "gradient", "required") \
.output(0, "output0", "required") \
.output(1, "output1", "required") \
.output(2, "output2", "required") \
.dtype_format(DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default) \
.dtype_format(DataType.F16_Default, DataType.F16_Default, DataType.F16_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F32_Default,
DataType.F32_Default, DataType.F32_Default, DataType.F16_Default,
DataType.F16_Default, DataType.F16_Default, DataType.F16_Default) \
.get_op_info()
@op_info_register(adam_weight_decay_op_info)
def _adam_weight_decay_cpu():
"""AdamWeightDecay cpu register"""
return
|
python
|
"""
Visdom server
-------------
.. autosummary::
:toctree: toctree/monitor
VisdomMighty
"""
import os
import sys
import time
from collections import defaultdict
import numpy as np
import visdom
from mighty.monitor.batch_timer import timer
from mighty.utils.constants import VISDOM_LOGS_DIR
class VisdomMighty(visdom.Visdom):
"""
A Visdom server that updates measures in online fashion.
Parameters
----------
env : str, optional
Environment name.
Default: "main"
offline : bool, optional
Online (False) or offline (True) mode.
Default: False
"""
def __init__(self, env="main", offline=False):
port = int(os.environ.get('VISDOM_PORT', 8097))
server = os.environ.get('VISDOM_SERVER', 'http://localhost')
base_url = os.environ.get('VISDOM_BASE_URL', '/')
env = env.replace('_', '-') # visdom things
log_to_filename = None
if offline:
VISDOM_LOGS_DIR.mkdir(exist_ok=True)
log_to_filename = VISDOM_LOGS_DIR / f"{env}.log"
try:
super().__init__(env=env, server=server, port=port,
username=os.environ.get('VISDOM_USER', None),
password=os.environ.get('VISDOM_PASSWORD', None),
log_to_filename=log_to_filename,
offline=offline,
base_url=base_url,
raise_exceptions=True)
except ConnectionError as error:
tb = sys.exc_info()[2]
raise ConnectionError("Start Visdom server with "
"'python -m visdom.server' command."
).with_traceback(tb)
self.timer = timer
self.legends = defaultdict(list)
self.with_markers = False
if offline:
print(f"Visdom logs are saved in {log_to_filename}")
else:
url = f"{self.server}:{self.port}{self.base_url}"
print(f"Monitor is opened at {url}. "
f"Choose environment '{self.env}'.")
# self._register_comments_window()
def _register_comments_window(self):
txt_init = "Enter comments:"
win = 'comments'
def type_callback(event):
if event['event_type'] == 'KeyPress':
curr_txt = event['pane_data']['content']
if event['key'] == 'Enter':
curr_txt += '<br>'
elif event['key'] == 'Backspace':
curr_txt = curr_txt[:-1]
elif event['key'] == 'Delete':
curr_txt = txt_init
elif len(event['key']) == 1:
curr_txt += event['key']
self.text(curr_txt, win='comments')
self.text(txt_init, win=win)
self.register_event_handler(type_callback, win)
def line_update(self, y, opts, name=None):
"""
Appends `y` axis value to the plot. The `x` axis value will be
extracted from the global timer.
Parameters
----------
y : float or list of float or torch.Tensor
The Y axis value.
opts : dict
Visdom plot `opts`.
name : str or None, optional
The label name of this plot. Used when a plot has a legend.
Default: None
"""
y = np.array([y])
n_lines = y.shape[-1]
if n_lines == 0:
return
if y.ndim > 1 and n_lines == 1:
# visdom expects 1d array for a single line plot
y = y[0]
x = np.full_like(y, self.timer.epoch_progress(), dtype=np.float32)
# hack to make window names consistent if the user forgets to specify
# the title
win = opts.get('title', str(opts))
if self.with_markers:
opts['markers'] = True
opts['markersize'] = 7
self.line(Y=y, X=x, win=win, opts=opts, update='append', name=name)
if name is not None:
self.update_window_opts(win=win, opts=dict(legend=[], title=win))
def log(self, text, timestamp=True):
"""
Log the text.
Parameters
----------
text : str
Text
timestamp : bool, optional
Prepend date timestamp (True) or not.
Default: True
"""
if timestamp:
text = f"{time.strftime('%Y-%b-%d %H:%M')} {text}"
self.text(text, win='log', opts=dict(title='log'),
append=self.win_exists(win='log'))
|
python
|
"""
Gaze
@author Anthony Liu <[email protected]>
@version 1.1.0
"""
from context import Rasta
import numpy as np
class Scene(object):
x_range, y_range, z_range = 13.33333, 10., 10.
num_viewer_params = 3
num_box_params = 3
@classmethod
def sample(cls, num_boxes):
# generate the boxes
boxes = []
for i in range(num_boxes):
# generate random box
boxes += [
np.random.uniform(0, cls.x_range/2. - 1.),
np.random.uniform(0, cls.y_range - 1.),
np.random.uniform(0, cls.z_range - 1.)
]
# get the viewer
viewer_center = [0.75*cls.x_range, cls.z_range/2.]
target_box = np.random.choice(num_boxes)
viewer = [
np.random.normal(
viewer_center[0], cls.x_range/16.
),
np.random.normal(
viewer_center[1], cls.z_range/4.
),
target_box
]
viewer[0] = min(cls.x_range - 1., max(0, viewer[0]))
viewer[1] = min(cls.z_range - 1., max(0, viewer[2]))
return viewer + boxes
@classmethod
def transition(cls, latent, k):
latent_ = latent[:]
num_boxes = len(latent) - cls.num_viewer_params
num_boxes /= cls.num_box_params
# first few latent variables are the viewer
if k == 0:
latent_[k] = np.random.normal(latent[k], 2.)
latent_[k] = min(
cls.x_range - 1., max(0, latent_[k])
)
elif k == 1:
latent_[k] = np.random.normal(latent[k], 2.)
latent_[k] = min(
cls.z_range - 1., max(0, latent_[k])
)
elif k == 2:
latent_[k] = np.random.choice(num_boxes)
# the rest are the boxes
elif (k - cls.num_viewer_params) % 3 == 0:
latent_[k] = np.random.normal(latent[k], 2.)
latent_[k] = min(
cls.x_range/2. - 1., max(0, latent_[k])
)
elif (k - cls.num_viewer_params) % 3 == 1:
latent_[k] = np.random.normal(latent[k], 2.)
latent_[k] = min(
cls.y_range - 1., max(0, latent_[k])
)
elif (k - cls.num_viewer_params) % 3 == 2:
latent_[k] = np.random.normal(latent[k], 2.)
latent_[k] = min(
cls.z_range - 1., max(0, latent_[k])
)
else:
return latent
return latent_
@classmethod
def get_model_from_latent(cls, latent):
viewer_offset = cls.num_viewer_params
num_boxes = len(latent) - cls.num_viewer_params
num_boxes /= cls.num_box_params
latent[2] = min(num_boxes - 1, max(0, int(latent[2])))
scene = {
'viewer': [
latent[0],
latent[
cls.num_viewer_params +
latent[2]*cls.num_box_params + 1
],
latent[1],
latent[2]
],
# go from a list to a list of lists
'boxes': [
latent[
viewer_offset +
cls.num_box_params * i:viewer_offset +
cls.num_box_params * i + cls.num_box_params
]
for i in range(num_boxes)
]
}
boxes = []
v1 = scene['boxes'][scene['viewer'][3]]
v2 = np.subtract(
scene['viewer'][0:3], v1
)
v3 = [1, 0, 0]
angle = np.arccos(np.dot(v2, v3) / (
np.linalg.norm(v2) * np.linalg.norm(v3)
))
viewer_model = Rasta.rotate_box(
Rasta.get_box([
scene['viewer'][0],
scene['viewer'][1],
scene['viewer'][2]
], 1, 1, 1, color=(255, 0, 0)),
angle
)
viewer_model['faces'][0][1] = (0, 1e6, 0)
# broken for some reason
viewer_model['faces'][1][0] = list(reversed(
viewer_model['faces'][1][0]
))
viewer_model['faces'][1][1] = (0, 1e6, 1e6)
boxes.append(viewer_model)
for box in scene['boxes']:
boxes.append(Rasta.get_box(
box,
1, 1, 1,
color=(0, 0, 255)
))
return boxes
@classmethod
def get_target_box(cls, latent):
box_idx = latent[cls.num_viewer_params-1]
box_offset = cls.num_box_params * box_idx
offset = cls.num_viewer_params + box_offset
return latent[offset:offset + cls.num_box_params]
@classmethod
def get_target_loss(cls, latent_a, latent_b):
box_a = cls.get_target_box(latent_a)
box_b = cls.get_target_box(latent_b)
diff = np.subtract(box_a, box_b)
sq = np.square(diff)
return np.sum(sq) ** 0.5
|
python
|
v = int(input('Digite um valor'))
print('o numero antecessor é {} e o numero sucessor é {}'.format((v-1),(v+1)))
print('o numero somado a 10 é igual a {}'.format(v+10))
|
python
|
from .__version__ import __version__
from .util import environment_info
from .wrapper import (
convert_into,
convert_into_by_batch,
read_pdf,
read_pdf_with_template,
)
|
python
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Mixly Wiki'
copyright = '2019, Mixly Team'
author = 'hznupeter'
version='0.999'
# The full version, including alpha/beta/rc tags
release = '0.999'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
gettext_auto_build=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
html_title="Mixly Wiki"
html_short_title="Milxy"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'zh_CN'
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False # optional.
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
html_static_path = ['static']
html_favicon = 'favicon.png'
html_logo = 'favicon.png'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
html_sidebars = {
'**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'],
'using/windows': ['windowssidebar.html', 'searchbox.html'],
}
latex_engine = 'xelatex'
latex_elements = {
'papersize': 'a4paper',
'pointsize': '11pt',
'preamble': r'''
\usepackage{xeCJK}
\setCJKmainfont[BoldFont=STZhongsong, ItalicFont=STKaiti]{STSong}
\setCJKsansfont[BoldFont=STHeiti]{STXihei}
\setCJKmonofont{STFangsong}
\XeTeXlinebreaklocale "zh"
\XeTeXlinebreakskip = 0pt plus 1pt
\parindent 2em
\definecolor{VerbatimColor}{rgb}{0.95,0.95,0.95}
\setcounter{tocdepth}{3}
\renewcommand\familydefault{\ttdefault}
\renewcommand\CJKfamilydefault{\CJKrmdefault}
'''
}
|
python
|
from sqlalchemy import Column, Integer, String, DECIMAL, ForeignKey, Date, Time, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class TimeEntry(Base):
__tablename__ = "time_entries"
time_entry_id = Column(Integer, primary_key=True)
duration = Column(DECIMAL(precision=18, scale=2))
comment = Column(String(length=255))
user_id = Column(Integer, ForeignKey('users.user_id'), index=True)
project_id = Column(Integer, ForeignKey('projects.project_id'), index=True)
user = relationship('User', back_populates='user_time_entries', lazy='joined')
project = relationship('Project', back_populates='project_time_entries')
report_date = Column(Date)
class Project(Base):
__tablename__ = "projects"
project_id = Column(Integer, primary_key=True)
name = Column(String(length=100))
project_users = relationship('UserProject', back_populates='project')
project_time_entries = relationship('TimeEntry', back_populates='project')
class User(Base):
__tablename__ = "users"
user_id = Column(Integer, primary_key=True)
username = Column(String(length=100), unique=True, index=True)
slack_user_id = Column(String(length=100), unique=True)
first_name = Column(String(length=100))
last_name = Column(String(length=100))
password = Column(String(length=80))
role_id = Column(Integer, ForeignKey('user_roles.user_role_id'), index=True)
role = relationship("UserRole", back_populates="users_in_role")
user_projects = relationship("UserProject", back_populates="user")
user_time_entries = relationship('TimeEntry', back_populates='user')
vacations = relationship('Vacation', back_populates='user')
remind_time_monday = Column(Time, nullable=True)
remind_time_tuesday = Column(Time, nullable=True)
remind_time_wednesday = Column(Time, nullable=True)
remind_time_thursday = Column(Time, nullable=True)
remind_time_friday = Column(Time, nullable=True)
remind_time_saturday = Column(Time, nullable=True)
remind_time_sunday = Column(Time, nullable=True)
phone = Column(String(length=15))
def __repr__(self):
return "User(id={},name={})".format(self.user_id, self.username)
class UserRole(Base):
__tablename__ = "user_roles"
user_role_id = Column(Integer, primary_key=True)
role = Column(String(length=100))
users_in_role = relationship(
"User", order_by=User.user_id, back_populates='role')
class UserProject(Base):
__tablename__ = "user_projects"
user_project_id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.user_id'))
project_id = Column(Integer, ForeignKey('projects.project_id'))
user = relationship('User', back_populates='user_projects')
project = relationship('Project', back_populates='project_users')
class Token(Base):
""" Access or refresh token
Because of our current grant flow, we are able to associate tokens
with the users who are requesting them. This can be used to track usage
and potential abuse. Only bearer tokens currently supported.
"""
__tablename__ = "tokens"
id = Column(Integer, primary_key=True)
token = Column(String(255), unique=True)
refresh_token = Column(String(255), unique=True, index=True)
token_uri = Column(String(255))
client_id = Column(String(255), nullable=False, unique=True, index=True)
client_secret = Column(String(255))
scopes = Column(String(4096))
class Vacation(Base):
__tablename__ = "vacations"
vacation_id = Column(Integer, primary_key=True)
start_date = Column(Date, nullable=False)
end_date = Column(Date, nullable=False)
event_id = Column(String(255), nullable=True)
user_id = Column(Integer, ForeignKey('users.user_id'), nullable=False)
user = relationship('User', back_populates='vacations')
class FoodOrder(Base):
__tablename__ = "food_order"
food_order_id = Column(Integer, primary_key=True)
order_date = Column(Date, nullable=False)
ordering_user_id = Column(Integer, ForeignKey('users.user_id'))
link = Column(String(length=512))
reminder = Column(String(length=32))
channel_name = Column(String(length=24), nullable=False)
class FoodOrderItem(Base):
__tablename__ = "food_order_item"
food_order_item_id = Column(Integer, primary_key=True)
food_order_id = Column(Integer, ForeignKey('food_order.food_order_id'))
eating_user_id = Column(Integer, ForeignKey('users.user_id'))
description = Column(String(length=255))
cost = Column(DECIMAL(precision=18, scale=2), nullable=False)
paid = Column(Boolean, nullable=False)
surrender = Column(Boolean, nullable=False)
|
python
|
import json
import logging
import re
import sys
import urllib.parse
import requests
logger = logging.getLogger(__name__)
URL = sys.argv[1]
NETLOC = urllib.parse.urlparse(URL)[1]
ALLOWED_MAP = {
# if any of these strings is in url, error is allowed
405: ["/xmlrpc.php", "wp-comments-post.php"],
400: ["?rest_route="],
}
def parse_urls(body: str):
# todo handle all urls, not only these starting with http
return [x[:-1] for x in re.findall(r"http[s]?://.*?[\'\"]", body)]
def is_parsable(url: str):
parsed = urllib.parse.urlparse(url)
if parsed[1] == NETLOC:
logging.debug(f"Parsable url {url}")
return True
logging.debug(f"Not parsable {url}")
return False
def get_body(url: str) -> (int, str):
resp = requests.get(url, allow_redirects=True)
try:
if any(x in url for x in ["js", ".css", ".png", ".jpg", ".jpeg", ".gif", ".ico"]):
return resp.status_code, ""
body = resp.content.decode()
return resp.status_code, body
except Exception as exc:
# parsing this body failed
logger.exception(exc)
return 0, exc.__repr__()
def main():
SUCCESS = {}
ERRORS = {}
ERRORS_ALLOWED = {}
ERRORS_BODY = {}
PARSED = {}
TODO = []
ALL = []
homepage_status, homepage = get_body(URL)
if homepage_status != 200: raise Exception(URL, homepage_status)
TODO.extend(parse_urls(homepage))
if not TODO:
raise Exception("TODO is empty! Fuck!")
while TODO:
for url in TODO.copy():
TODO.pop(0)
if is_parsable(url) and not PARSED.get(url):
status, body = get_body(url)
if status == 0:
ERRORS_BODY[url] = body
urls = parse_urls(body)
ALL.extend(urls)
TODO.extend(urls)
PARSED[url] = status
for url, status in PARSED.items():
if status == 200:
SUCCESS[url] = status
continue
elif rules := ALLOWED_MAP.get(status):
if any(r in url for r in rules):
ERRORS_ALLOWED[url] = status
continue
else:
ERRORS[url] = status
print("PARSED")
print(json.dumps(PARSED, indent=4))
print("ERRORS_ALLOWED")
print(json.dumps(ERRORS_ALLOWED, indent=4))
print("ERRORS_BODY")
print(json.dumps(ERRORS_BODY, indent=4))
print("ERRORS")
print(json.dumps(ERRORS, indent=4))
if ERRORS:
logging.error("Errors! Fuck!")
sys.exit(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
python
|
import argparse
import time
from geopy.geocoders import Nominatim
app = Nominatim(user_agent="pycoordinates")
def get_location(latitude, longitude, verbose):
'''Takes in a latitude and longitude and returns the commensurate location.
:param latitude: the latitude
:param longitude: the longitude
:type latitude: float
:type longitude: float
:returns: the city that is found at the given coordinates
:rtype: str
:raises ValueError: raises a ValueError if latitude and/or longitude is not a coordinates
'''
coordinates = f"{latitude}, {longitude}"
try:
location_info = app.reverse(coordinates, language='en').raw
except ValueError:
return "Invalid coordinates entered"
location = ''
if verbose:
location = location + f'City: {location_info["address"]["city"]}\n'
if location_info["address"]['country'] == 'United States':
location = location + f'State: {location_info["address"]["state"]}\n'
location = location + f'Country: {location_info["address"]["country"]}\n'
else:
location = location + f'{location_info["address"]["city"]}, '
if location_info["address"]['country'] == 'United States':
location = location + f'{location_info["address"]["state"]}, '
location = location + f'{location_info["address"]["country"]}'
# Nominatim only allows 1 request per second
time.sleep(1)
return location
def get_coordinates(location, verbose):
'''Takes in a location and returns its coordinates.
:param location: the location that you want to find the coordinates of
:type location: str
:returns: the coordinates of the given location
:rtype: str
:raises AttributeError: raises an AttributeError if the given location is not a valid location
'''
try:
location_info = app.geocode(location, language='en').raw
except AttributeError:
return "Invalid location entered"
latitude = float(location_info["lat"])
longitude = float(location_info["lon"])
coordinates = ''
if verbose:
coordinates = coordinates + f'Location: {location}\n'
coordinates = coordinates + f'Latitude: {latitude:.2f}\n'
coordinates = coordinates + f'Longitude: {longitude:.2f}'
else:
coordinates = f'{location}: {latitude:.2f}, {longitude:.2f}'
# Nominatim only allows 1 request per second
time.sleep(1)
return coordinates
def parse_commands():
'''Command-line interface driver.'''
parser = argparse.ArgumentParser(prog='pycoordinates', description="Provides information \
on given coordinates and returns coordinates of given location.")
parser.add_argument('--coordinates', action='store', nargs=2, help='Latitude and longitude')
parser.add_argument('--location', action='store', nargs=1, help='Name of the location')
parser.add_argument('--verbose', action='store_true', help="Produces a more verbose output")
arguments = parser.parse_args()
if arguments.coordinates:
latitude = arguments.coordinates[0]
longitude = arguments.coordinates[1]
print(get_location(latitude, longitude, arguments.verbose))
if arguments.location:
print(get_coordinates(arguments.location[0], arguments.verbose))
if __name__ == '__main__':
parse_commands()
|
python
|
"""
This module consists of functions to calculate the [equivalent latitude](https://journals.ametsoc.org/doi/citedby/10.1175/1520-0469%282003%29060%3C0287%3ATELADT%3E2.0.CO%3B2) and edge of a polar vortex using [Nash criteria](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/96JD00066).
### Installation
```
pip install -U pyvortex
```
install the latest version using
```
pip install git+https://github.com/pankajkarman/pyvortex.git
```
### Usage
`pyvortex` is easy to use. Just import:
```python
import pyvortex as vr
```
#### Northern Hemisphere
Instantiate the `PolarVortex` class using:
```python
pol = PolarVortex(pv, uwind)
```
Get equivalent lqtitude for the provided vorticity data as:
```python
eql = pol.get_eql()
```
If you want to get both equivalent latitude and Vortex edge, just use:
```python
eql = pol.get_edge(min_eql=30)
```
#### Southern Hemisphere
Flip pv and uwind along latitude dimension and multiply pv by -1. All other things will be the same.
"""
from .pyvortex import PolarVortex
|
python
|
'''
Problem 9
25 January 2002
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
----------------------------------------------------------
Created on 26.01.2012
@author: ahallmann
'''
import unittest
import timeit
import math
def is_pythagorean_triplet(a,b,c):
return int(a*a) + int(b*b) == int(c*c)
def solve(tripletSum):
for a in range(1, 1000):
for b in range(1, 1000):
c = math.sqrt(a*a + b*b)
#print "a = " + str(a) + " b = " + str(b) + " c = " + str(c)
if a+b+c == tripletSum:
return int(a*b*c)
return None
class Test(unittest.TestCase):
def testSimple0(self):
self.assertTrue(is_pythagorean_triplet(1,1,math.sqrt(2)))
def testSimple1(self):
self.assertTrue(is_pythagorean_triplet(3,4,5))
def testSimple2(self):
self.assertFalse(is_pythagorean_triplet(3,4,6))
def test_answer(self):
self.assertEqual(31875000, solve(1000))
# -----------------------------------------
def run():
return solve(1000)
if __name__ == '__main__':
unittest.main()
if __name__ == '__main__':
t = timeit.Timer("run()", "from __main__ import run")
count = 1
print str(t.timeit(count)) + " seconds for " + str(count) + " runs"
|
python
|
# from django_filters import Filter
# Add your filters here
|
python
|
import shap
import numpy as np
import cv2
class BetterImageMasker(shap.maskers.Image):
def __call__(self, mask, x):
if np.prod(x.shape) != np.prod(self.input_shape):
raise Exception("The length of the image to be masked must match the shape given in the " + \
"ImageMasker contructor: "+" * ".join([str(i) for i in x.shape])+ \
" != "+" * ".join([str(i) for i in self.input_shape]))
# unwrap single element lists (which are how single input models look in multi-input format)
if isinstance(x, list) and len(x) == 1:
x = x[0]
# we preserve flattend inputs as flattened and full-shaped inputs as their original shape
in_shape = x.shape
if len(x.shape) > 1:
x = x.ravel()
# if mask is not given then we mask the whole image
if mask is None:
mask = np.zeros(np.prod(x.shape), dtype=np.bool)
if isinstance(self.mask_value, str):
if self.blur_kernel is not None:
if self.last_xid != id(x):
self._blur_value_cache = cv2.blur(x.reshape(self.input_shape), self.blur_kernel).ravel()
self.last_xid = id(x)
out = x.copy()
out[~mask] = self._blur_value_cache[~mask]
elif self.mask_value == "inpaint_telea":
out = self.inpaint(x, ~mask, "INPAINT_TELEA")
elif self.mask_value == "inpaint_ns":
out = self.inpaint(x, ~mask, "INPAINT_NS")
else:
out = x.copy()
out[~mask.flatten()] = self.mask_value[~mask.flatten()]
return (out.reshape(1, *in_shape),)
|
python
|
# Generated by Django 2.1.5 on 2019-01-05 14:44
import django.contrib.postgres.fields
import django.core.validators
from django.db import migrations, models
import reader.validators
class Migration(migrations.Migration):
dependencies = [
('reader', '0011_auto_20181001_1853'),
]
operations = [
migrations.AlterField(
model_name='article',
name='uri',
field=models.URLField(blank=True, max_length=2048),
),
migrations.AlterField(
model_name='board',
name='name',
field=models.CharField(max_length=100, verbose_name='name'),
),
migrations.AlterField(
model_name='board',
name='tags',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=40), blank=True, default=list, size=100, verbose_name='tags'),
),
migrations.AlterField(
model_name='feed',
name='name',
field=models.CharField(max_length=100, verbose_name='name'),
),
migrations.AlterField(
model_name='feed',
name='uri',
field=models.URLField(max_length=2048, unique=True, validators=[django.core.validators.URLValidator(schemes=['http', 'https']), reader.validators.http_port_validator], verbose_name='Feed address'),
),
]
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
from ipywidgets import interact
from PIL import Image
# [Preparation]
# - check installation of jupyter and ipywidgets with
# `pip list | grep ipywidgets`
# - make the following jupyter extension enable
# jupyter nbextension enable --py widgetsnbextension --sys-prefix
def display_image_batch(batch,order_bgr=False, order_nchw=False, global_norm=False):
# batch.shape = (N,C,H,W)
N = len(batch)
min_values = np.zeros(N, dtype=np.float32)
max_values = np.ones(N, dtype=np.float32) * 255
normalize = False
if isinstance(batch, np.ndarray) and np.issubdtype(batch.dtype, np.float):
if global_norm:
min_values[:] = batch.min()
max_values[:] = batch.max()
else:
min_values[:] = np.min(batch.reshape(N,-1), axis=1)
max_values[:] = np.max(batch.reshape(N,-1), axis=1)
normalize = True
def display_image(idx):
img = batch[idx].copy()
if normalize:
min_value = min_values[idx]
max_value = max_values[idx]
if max_value > min_value:
img = np.clip(255.0/(max_value-min_value) * (img-min_value),0,255).astype(np.uint8)
else:
img = np.clip(255.0*(img-min_value),0,255).astype(np.uint8)
if img.ndim == 3:
if order_nchw:
# img.shape = [C,H,W]
img = img.transpose(1,2,0)
if img.shape[2] == 3 and order_bgr:
img[...,[0,1,2]] = img[...,[2,1,0]]
if img.shape[2] == 1:
img = img[...,0] # convert [H,W,1] to [H,W]
return Image.fromarray(img)
interact(display_image, batch=batch, idx=(0, N-1,1));
#def switch_pylab_notebook():
# %pylab notebook
# %pylab notebook # I don't know why but execution twice is fine for system
#def switch_pylab_inline():
# %pylab inline
|
python
|
i = 0
classe = []
continuar = ''
while True:
nome = str(input('Nome: ').strip())
nota1 = float(input('Nota 1: ').strip())
nota2 = float(input('Nota 2: ').strip())
classe.append([i, nome, nota1, nota2, (nota1 + nota2) / 2])
while True:
continuar = str(input('Quer continuar? [S/N] ').strip()[0])
if continuar not in 'SsNn':
print('\033[31mValor informado inválido!\033[m')
else:
break
if continuar in 'Nn':
break
i += 1
print('-=' * 30)
print(f'{"No.":<4}{"NOME":<10}{"MÉDIA":>8}')
print('-' * 26)
for c in classe:
print(f'{c[0]:<4}{c[1]:<10}{c[4]:>8.1f}')
print('-' * 40)
while True:
continuar = str(input('Mostrar notas de qual aluno? (999 interrompe): '))
if '999' in continuar:
print('FINALIZANDO...')
break
for c in classe:
if int(continuar) == c[0]:
print(f'As notas de {c[1]} são [{c[2]}, {c[3]}]')
print(' <<< VOLTE SEMPRE >>> ')
|
python
|
# Individual image generator
import sys
HW_NUMBER = int(sys.argv[1])
Q_NUMBER = int(sys.argv[2])
HW_PATH = sys.argv[3] + "/body.tex"
f = open(HW_PATH, 'r')
content = [n for n in f.readlines() if (not n.startswith('%') and not n.startswith('\sol') and n.strip())]
def index_containing_substring(the_list, substring):
for i, s in enumerate(the_list):
if substring in s:
return i
return -1
index_of_beginqunlist = index_containing_substring(content, 'begin{qunlist}')
index_of_endqunlist = index_containing_substring(content, 'end{qunlist}')
index_of_maketitle = index_containing_substring(content, 'maketitle')
index_of_enddoc = index_containing_substring(content, 'end{document}')
new_content = []
for i in range(index_of_beginqunlist):
if i < index_of_maketitle:
new_content.append(content[i])
# There may be extra empty lines, comments, etc in the qunlist section.
# So we'll find the Q'th \input{q_ line.
index_of_q_number = -1 # In theory it is (index_of_beginqunlist + Q_NUMBER)
current_q = 0
for i in range(index_of_beginqunlist+1, index_of_endqunlist):
if "\input{q_" in content[i]:
current_q += 1
if current_q == Q_NUMBER:
index_of_q_number = i
break
assert index_of_q_number != -1, "Can't find the given question number"
new_content.append('\def\qcontributor#1{}') # Hack to disable contributor list from generating a large footer at the bottom
new_content.append('\\pagestyle{empty}') # <-- doesn't seem to do anything/work?
new_content.append(content[index_of_beginqunlist])
new_content.append('\setcounter{sparectr}{' + str(Q_NUMBER - 1) + '}')
new_content.append(content[index_of_q_number])
new_content.append(content[index_of_endqunlist])
new_content.append(content[index_of_enddoc])
with open(HW_PATH, 'w+') as f2:
for line in new_content:
f2.write(line + "\n")
|
python
|
from typing import Sequence, Union, Optional
from typing_extensions import Literal
from .transforms import CriteriaFn
from .deform import deform_image_random
from ..transforms import transforms
class TransformRandomDeformation(transforms.TransformBatchWithCriteria):
"""
Transform an image using a random deformation field.
Only 2D or 3D supported transformation.
The gradient can be back-propagated through this transform.
"""
def __init__(
self,
control_points: Union[int, Sequence[int]] = 6,
max_displacement: Optional[Union[float, Sequence[float]]] = 0.5,
criteria_fn: Optional[CriteriaFn] = None,
interpolation: Literal['linear', 'nearest'] = 'linear',
padding_mode: Literal['zeros', 'border', 'reflection'] = 'zeros',
gaussian_filter_sigma: Optional[float] = 1.5,
align_corners: bool = False):
"""
Args:
control_points: the control points spread on the image at regularly
spaced intervals with random `max_displacement` magnitude
max_displacement: specify the maximum displacement of a control point. Range [-1..1]. If None, use
the moving volume shape and number of control points to calculate appropriate small deformation
field
interpolation: the interpolation of the image with displacement field
padding_mode: how to handle data outside the volume geometry
align_corners: should be False. The (0, 0) is the center of a voxel
gaussian_filter_sigma: if not None, smooth the deformation field using a gaussian filter.
The smoothing is done in the control point space
criteria_fn: a function to select applicable features in a batch
"""
self.interpolation = interpolation
self.align_corners = align_corners
self.max_displacement = max_displacement
self.control_points = control_points
self.padding_mode = padding_mode
self.gaussian_filter_sigma = gaussian_filter_sigma
if criteria_fn is None:
criteria_fn = transforms.criteria_is_array_4_or_above
self.criteria_fn = criteria_fn
super().__init__(
criteria_fn=criteria_fn,
transform_fn=self._transform
)
def _transform(self, features_names, batch):
data_shape = batch[features_names[0]].shape
data_dim = len(data_shape) - 2 # remove `N` and `C` components
assert data_dim == 2 or data_dim == 3, f'only 2D or 3D data handled. Got={data_dim}'
for name in features_names[1:]:
# make sure the data is correct: we must have the same dimensions (except `C`)
# for all the images
feature = batch[name]
feature_shape = feature.shape[2:]
assert feature_shape == data_shape[2:], f'joint features transformed must have the same dimension. ' \
f'Got={feature_shape}, expected={data_shape[2:]}'
assert feature.shape[0] == data_shape[0]
images = [batch[name] for name in features_names]
deformed_images = deform_image_random(
images,
control_points=self.control_points,
max_displacement=self.max_displacement,
interpolation=self.interpolation,
padding_mode=self.padding_mode,
align_corners=self.align_corners,
gaussian_filter_sigma=self.gaussian_filter_sigma
)
# copy features that are not images
new_batch = {name: value for name, value in zip(features_names, deformed_images)}
for name, value in batch.items():
if name not in new_batch:
new_batch[name] = value
return new_batch
|
python
|
import stk
from .utilities import is_equivalent_atom
def test_repr(atom):
"""
Test :meth:`.Atom.__repr__`.
Parameters
----------
atom : :class:`.Atom`
The atom, whose representation should be tested.
Returns
-------
None : :class:`NoneType`
"""
other = eval(repr(atom), dict(stk.__dict__))
is_equivalent_atom(other, atom)
|
python
|
#####################################################################################
# Manager class of Meal which deals with Meal saving / loading / setting / deleting #
#####################################################################################
from Manager import Manager
from Models.Meal import Meal
import mysql.connector as mariadb
import pymysql
import sys
class MealManager(Manager):
def __init__(self, usr="toor", psswd="toor"):
self.table = "Meal"
Manager.__init__(self, self.table, usr, psswd)
def db_create(self, name):
"""
Create a meal in the database from a name
:param name : the name of the meal
:return: the Meal object if successfully created else, False
"""
connect = self.get_connector()
cursor = connect.cursor(prepared=True)
try:
cursor.execute("INSERT INTO `{}` (name_meal) VALUES (?)".format(self.table), (name,))
connect.commit()
except mariadb.errors.IntegrityError:
sys.stderr.write("The meal name {} may already exist.".format(name))
return False
except mariadb.Error:
sys.stderr.write("An error occurred with the meal creating.")
return False
id = self.get_current_id() - 1
connect.close()
return Meal(id, name)
def db_create_from_obj(self, meal):
"""
Create a recipe in the database from a Recipe object
:param meal : the Recipe object to create in database
:return: True if success else False
"""
self.check_managed(meal)
connect = self.get_connector()
cursor = connect.cursor(prepared=True)
try:
cursor.execute("INSERT INTO `{}` (id_meal, name_meal) VALUES (?, ?)".format(self.table),
(meal.get_id_meal(), meal.get_name_meal()))
connect.commit()
connect.close()
except mariadb.errors.IntegrityError:
sys.stderr.write("The meal name {} or the meal id {} may already exist.".format(meal.get_name(), str(meal.get_id())))
return False
except mariadb.Error:
sys.stderr.write("An error occurred with the meal creating.")
return False
return True
def db_delete(self, id=None, name=None):
"""
Delete a meal by its name or its id from the database (soft delete)
:param ? id : the id of the meal to delete
:param ? name : the name of the meal to delete
:return: False if no parameters given or if an error occurs else True
"""
if name is None and id is None:
sys.stderr.write("No name or id mentioned.")
return False
else:
try:
connect = self.get_connector()
cursor = connect.cursor(prepared=True)
if id is not None:
cursor.execute("UPDATE `{}` SET deleted = 1 WHERE id_meal = %s".format(self.table), (id,))
else:
cursor.execute("UPDATE `{}` SET deleted = 1 WHERE name_meal = %s".format(self.table), (name,))
connect.commit()
connect.close()
except mariadb.Error:
sys.stderr.write("An error occurred with the meal deleting.")
return False
return True
def db_save(self, meal):
"""
Save a Meal object into database
:param meal : the object to save
:return: False an error occurred else True
"""
self.check_managed(meal)
try:
connect = self.get_connector()
cursor = connect.cursor()
cursor.execute('UPDATE `{}` SET `name_meal` = "{}" WHERE `id_meal` = "{}"'.format(self.table, meal.get_name_meal(), str(meal.get_id_meal())))
connect.commit()
connect.close()
except mariadb.Error:
sys.stderr.write("An error occured with the meal saving.")
return False
return True
def db_load(self, id=None, name=None):
"""
From an id or a name, load a Meal object from the database
:param id : the id of the meal to load
:param name : the name of the meal to load
:return: the Meal object loaded, None if not in database
"""
if name is None and id is None:
sys.stderr.write("No name or id mentioned.")
return False
else:
connect = self.get_connector()
cursor = connect.cursor(dictionary=True)
if id is not None:
cursor.execute("SELECT Meal.id_meal, Meal.name_meal, Ingredient.id_ingredient, Ingredient.name_ingredient, "
"Recipe.quantity, Meal.deleted FROM `{}` INNER JOIN Recipe ON Meal.id_meal = Recipe.id_meal INNER JOIN "
"Ingredient ON Recipe.id_ingredient = Ingredient.id_ingredient WHERE Meal.id_meal = {} "
"AND Meal.deleted = 0".format(self.table, pymysql.escape_string(str(id))))
else:
cursor.execute("SELECT Meal.id_meal, Meal.name_meal, Recipe.id_ingredient, Ingredient.name_ingredient, "
"Recipe.quantity, Meal.deleted FROM `{}` INNER JOIN Recipe ON Meal.id_meal = Recipe.id_meal INNER JOIN "
"Ingredient ON Recipe.id_ingredient = Ingredient.id_ingredient WHERE Meal.name_meal = {} "
"AND Meal.deleted = 0".format(self.table, pymysql.escape_string(name)))
answ = cursor.fetchall()
connect.close()
return Meal().init(answ) if answ else None
def get_listview_info(self):
"""
Returns all the information from Meal database (deleted = 0) formatted to display on ListView widget (id, name)
:return: answ : The result of the query
"""
connect = self.get_connector()
cursor = connect.cursor()
cursor.execute('SELECT id_meal, name_meal FROM {} WHERE Meal.deleted = 0'.format(self.table))
answ = cursor.fetchall()
connect.close()
return answ
def get_current_id(self):
"""
Returns the current id, usefull to create associated objects in conformity with the database values
and constraints
:return: the current assignable id
"""
connect = self.get_connector()
cursor = connect.cursor()
cursor.execute('SELECT MAX(id_meal) FROM {}'.format(self.table))
connect.close()
return int(cursor.fetchall()[0][0]) + 1
@staticmethod
def check_managed(item):
"""
Check if the parameter is from the type of the managed item, if not raise ValueError
:param item : the item to verify
"""
if not isinstance(item, Meal):
raise ValueError('The parameter must be a Meal instance.')
|
python
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
def gpa_cal(scores : str,base:int = 10) -> float:
"""Return float Grade Point Average for anyy base with default as 10
A- and B+ aree treated as similar, althouh functionality can be modified"""
gpa_final = 0.0
scores = scores.upper() # To allow for mistypes in scores entered
gpa_final += scores.count('+') # increments score by 1 for each plus
gpa_final -= scores.count('-') # decrements score by 1 for each minus
# Remove + and minus from string
scores = "".join(scores.split('+')) # Splits across + and joins scores string again
scores = "".join(scores.split('-')) # Splits across - and joins scores string again
if len(scores) == 0:
raise ValueError("Invalid Grades entered as input")
grading = {} # dict stores value alloted to each grade
for value in range(0,base):
grading[chr(65+value)] = base-value-1
for grade in scores:
try:
gpa_final += grading[grade]
except KeyError as e:
raise KeyError("Incorrect Symbol entered {!r}".format(e))
gpa_final = gpa_final/len(scores)
print("if you want to convert GPA from 10 scale to scale of 3 or 4 or 5 - Press 1 else Press 0")
inp = int(input())
if inp==1:
print("To convert base convert GPA from 10 scale to scale of 3 - Press 3")
print("To convert base convert GPA from 10 scale to scale of 4 - Press 4")
print("To convert base convert GPA from 10 scale to scale of 5 - Press 5")
scaling=int(input())
if scaling==3:
gpa_final = (gpa_final/10)*3
elif scaling==4:
gpa_final = (gpa_final/10)*4
else:
gpa_final = (gpa_final/10)*5
return gpa_final
if __name__ == "__main__":
print(gpa_cal('AAA+BCADE',10)) # Test case in issue
# In[ ]:
|
python
|
from docker import DockerClient
from aavm.utils.progress_bar import ProgressBar
from cpk.types import Machine, DockerImageName
ALL_STATUSES = [
"created", "restarting", "running", "removing", "paused", "exited", "dead"
]
STOPPED_STATUSES = [
"created", "exited", "dead"
]
UNSTABLE_STATUSES = [
"restarting", "removing"
]
RUNNING_STATUSES = [
"running", "paused"
]
# noinspection DuplicatedCode
def pull_image(machine: Machine, image: str, progress: bool = True):
client: DockerClient = machine.get_client()
layers = set()
pulled = set()
pbar = ProgressBar() if progress else None
for line in client.api.pull(image, stream=True, decode=True):
if "id" not in line or "status" not in line:
continue
layer_id = line["id"]
layers.add(layer_id)
if line["status"] in ["Already exists", "Pull complete"]:
pulled.add(layer_id)
# update progress bar
if progress:
percentage = max(0.0, min(1.0, len(pulled) / max(1.0, len(layers)))) * 100.0
pbar.update(percentage)
if progress:
pbar.done()
def remove_image(machine: Machine, image: str):
client: DockerClient = machine.get_client()
client.images.remove(image)
def merge_container_configs(*args) -> dict:
out = {}
for arg in args:
assert isinstance(arg, dict)
for k, v in arg.items():
if k not in out:
out[k] = v
else:
if not isinstance(arg[k], type(out[k])):
raise ValueError(f"Type clash '{type(out[k])}' !== '{type(arg[k])}' "
f"for key '{k}'.")
if isinstance(out[k], list):
out[k].extend(arg[k])
elif isinstance(out[k], dict):
out[k].update(arg[k])
else:
out[k] = arg[k]
return out
def sanitize_image_name(image: str) -> str:
return DockerImageName.from_image_name(image).compile(allow_defaults=True)
|
python
|
"""Hyperopt templates for different models"""
# forked from hyperopt/hyperopt-sklearn
from functools import partial
import numpy as np
from hyperopt import hp
from hyperopt.pyll import scope
import sklearn.discriminant_analysis
import sklearn.ensemble
import sklearn.feature_extraction.text
import sklearn.preprocessing
import sklearn.svm
import sklearn.tree
# Optional dependencies
try:
import xgboost
except ImportError:
xgboost = None
def default_name_func(name):
return name
##############################
##==== Global variables ====##
##############################
_svm_default_cache_size = 512
###############################################
##==== Various hyperparameter generators ====##
###############################################
def hp_bool(name):
return hp.choice(name, [False, True])
def _svm_gamma(name, n_features=1):
'''Generator of default gamma values for SVMs.
This setting is based on the following rationales:
1. The gamma hyperparameter is basically an amplifier for the
original dot product or l2 norm.
2. The original dot product or l2 norm shall be normalized by
the number of features first.
'''
# -- making these non-conditional variables
# probably helps the GP algorithm generalize
# assert n_features >= 1
return hp.loguniform(name,
np.log(1. / n_features * 1e-3),
np.log(1. / n_features * 1e3))
def _svm_degree(name):
return hp.quniform(name, 1.5, 6.5, 1)
def _svm_max_iter(name):
return hp.qloguniform(name, np.log(1e7), np.log(1e9), 1)
def _svm_C(name):
return hp.loguniform(name, np.log(1e-5), np.log(1e5))
def _svm_tol(name):
return hp.loguniform(name, np.log(1e-5), np.log(1e-2))
def _svm_int_scaling(name):
return hp.loguniform(name, np.log(1e-1), np.log(1e1))
def _svm_epsilon(name):
return hp.loguniform(name, np.log(1e-3), np.log(1e3))
def _svm_loss_penalty_dual(name):
"""
The combination of penalty='l1' and loss='hinge' is not supported
penalty='l2' and loss='hinge' is only supported when dual='true'
penalty='l1' is only supported when dual='false'.
"""
return hp.choice(
name, [('hinge', 'l2', True), ('squared_hinge', 'l2', True),
('squared_hinge', 'l1', False), ('squared_hinge', 'l2', False)])
def _knn_metric_p(name, sparse_data=False, metric=None, p=None):
if sparse_data:
return ('euclidean', 2)
elif metric == 'euclidean':
return (metric, 2)
elif metric == 'manhattan':
return (metric, 1)
elif metric == 'chebyshev':
return (metric, 0)
elif metric == 'minkowski':
assert p is not None
return (metric, p)
elif metric is None:
return hp.pchoice(name, [
(0.55, ('euclidean', 2)),
(0.15, ('manhattan', 1)),
(0.15, ('chebyshev', 0)),
(0.15, ('minkowski', _knn_p(name + '.p'))),
])
else:
return (metric, p) # undefined, simply return user input.
def _knn_p(name):
return hp.quniform(name, 2.5, 5.5, 1)
def _knn_neighbors(name):
return scope.int(hp.qloguniform(name, np.log(0.5), np.log(50.5), 1))
def _knn_weights(name):
return hp.choice(name, ['uniform', 'distance'])
def _trees_n_estimators(name):
return scope.int(hp.qloguniform(name, np.log(9.5), np.log(3000.5), 1))
def _trees_criterion(name):
return hp.choice(name, ['gini', 'entropy'])
def _trees_max_features(name):
return hp.pchoice(
name,
[
(0.2, 'sqrt'), # most common choice.
(0.1, 'log2'), # less common choice.
(0.1, None), # all features, less common choice.
(0.6, hp.uniform(name + '.frac', 0., 1.))
])
def _trees_max_depth(name):
return hp.pchoice(
name,
[
(0.7, None), # most common choice.
# Try some shallow trees.
(0.1, 2),
(0.1, 3),
(0.1, 4),
])
def _trees_min_samples_split(name):
return 2
def _trees_min_samples_leaf(name):
return hp.choice(
name,
[
1, # most common choice.
scope.int(
hp.qloguniform(name + '.gt1', np.log(1.5), np.log(50.5), 1))
])
def _trees_bootstrap(name):
return hp.choice(name, [True, False])
def _boosting_n_estimators(name):
return scope.int(hp.qloguniform(name, np.log(10.5), np.log(1000.5), 1))
def _ada_boost_learning_rate(name):
return hp.lognormal(name, np.log(0.01), np.log(10.0))
def _ada_boost_loss(name):
return hp.choice(name, ['linear', 'square', 'exponential'])
def _ada_boost_algo(name):
return hp.choice(name, ['SAMME', 'SAMME.R'])
def _grad_boosting_reg_loss_alpha(name):
return hp.choice(name, [('ls', 0.9), ('lad', 0.9),
('huber', hp.uniform(name + '.alpha', 0.85, 0.95)),
('quantile', 0.5)])
def _grad_boosting_clf_loss(name):
return hp.choice(name, ['deviance', 'exponential'])
def _grad_boosting_learning_rate(name):
return hp.lognormal(name, np.log(0.01), np.log(10.0))
def _grad_boosting_subsample(name):
return hp.pchoice(
name,
[
(0.2, 1.0), # default choice.
(0.8, hp.uniform(name + '.sgb', 0.5, 1.0)
) # stochastic grad boosting.
])
def _sgd_penalty(name):
return hp.pchoice(name, [(0.40, 'l2'), (0.35, 'l1'), (0.25, 'elasticnet')])
def _sgd_alpha(name):
return hp.loguniform(name, np.log(1e-6), np.log(1e-1))
def _sgd_l1_ratio(name):
return hp.uniform(name, 0, 1)
def _sgd_epsilon(name):
return hp.loguniform(name, np.log(1e-7), np.log(1))
def _sgdc_learning_rate(name):
return hp.pchoice(name, [(0.50, 'optimal'), (0.25, 'invscaling'),
(0.25, 'constant')])
def _sgdr_learning_rate(name):
return hp.pchoice(name, [(0.50, 'invscaling'), (0.25, 'optimal'),
(0.25, 'constant')])
def _sgd_eta0(name):
return hp.loguniform(name, np.log(1e-5), np.log(1e-1))
def _sgd_power_t(name):
return hp.uniform(name, 0, 1)
def _random_state(name, random_state):
if random_state is None:
return hp.randint(name, 5)
else:
return random_state
def _class_weight(name):
return hp.choice(name, [None, 'balanced'])
##############################################
##==== SVM hyperparameters search space ====##
##############################################
def _svm_hp_space(kernel,
n_features=1,
C=None,
gamma=None,
coef0=None,
degree=None,
shrinking=None,
tol=None,
max_iter=None,
verbose=False,
cache_size=_svm_default_cache_size):
'''Generate SVM hyperparamters search space
'''
if kernel in ['linear', 'rbf', 'sigmoid']:
degree_ = 1
else:
degree_ = (_svm_degree('degree')
if degree is None else degree)
if kernel in ['linear']:
gamma_ = 'auto'
else:
gamma_ = (_svm_gamma('gamma', n_features=1)
if gamma is None else gamma)
gamma_ /= n_features # make gamma independent of n_features.
if kernel in ['linear', 'rbf']:
coef0_ = 0.0
elif coef0 is None:
if kernel == 'poly':
coef0_ = hp.pchoice(
'coef0',
[(0.3, 0),
(0.7, gamma_ * hp.uniform('coef0val', 0., 10.))])
elif kernel == 'sigmoid':
coef0_ = hp.pchoice(
'coef0',
[(0.3, 0),
(0.7, gamma_ * hp.uniform('coef0val', -10., 10.))])
else:
pass
else:
coef0_ = coef0
hp_space = dict(
kernel=kernel,
C=_svm_C('C') if C is None else C,
gamma=gamma_,
coef0=coef0_,
degree=degree_,
shrinking=(hp_bool('shrinking')
if shrinking is None else shrinking),
tol=_svm_tol('tol') if tol is None else tol,
max_iter=(_svm_max_iter('maxiter')
if max_iter is None else max_iter),
verbose=verbose,
cache_size=cache_size)
return hp_space
def _svc_hp_space(random_state=None, probability=False):
'''Generate SVC specific hyperparamters
'''
hp_space = dict(
random_state=_random_state('rstate', random_state),
probability=probability)
return hp_space
def _svr_hp_space(epsilon=None):
'''Generate SVR specific hyperparamters
'''
hp_space = {}
hp_space['epsilon'] = (_svm_epsilon('epsilon')
if epsilon is None else epsilon)
return hp_space
#########################################
##==== SVM classifier constructors ====##
#########################################
def svc_kernel_hp_space(kernel,
random_state=None,
probability=False,
**kwargs):
"""
Return a hyperparamter template that will construct
a sklearn.svm.SVC model with a user specified kernel.
Supported kernels: linear, rbf, poly and sigmoid
"""
hp_space = _svm_hp_space(kernel=kernel, **kwargs)
hp_space.update(_svc_hp_space(random_state, probability))
return hp_space
########################################
##==== SVM regressor constructors ====##
########################################
def svr_kernel_hp_space(kernel, epsilon=None, **kwargs):
"""
Return a hyperparamter template that will construct
a sklearn.svm.SVR model with a user specified kernel.
Supported kernels: linear, rbf, poly and sigmoid
"""
hp_space = _svm_hp_space(kernel=kernel, **kwargs)
hp_space.update(_svr_hp_space(epsilon))
return hp_space
##############################################
##==== KNN hyperparameters search space ====##
##############################################
def knn_hp_space(sparse_data=False,
n_neighbors=None,
weights=None,
algorithm='auto',
leaf_size=30,
metric=None,
p=None,
metric_params=None,
n_jobs=1):
'''Generate KNN hyperparameters search space
'''
metric_p = _knn_metric_p('metric_p', sparse_data, metric, p)
hp_space = dict(
n_neighbors=(_knn_neighbors('neighbors')
if n_neighbors is None else n_neighbors),
weights=(_knn_weights('weights')
if weights is None else weights),
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric_p[0] if metric is None else metric,
p=metric_p[1] if p is None else p,
metric_params=metric_params,
n_jobs=n_jobs)
return hp_space
####################################################################
##==== Random forest/extra trees hyperparameters search space ====##
####################################################################
def trees_hp_space(n_estimators=None,
max_features=None,
max_depth=None,
min_samples_split=None,
min_samples_leaf=None,
bootstrap=None,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=False):
'''Generate trees ensemble hyperparameters search space
'''
hp_space = dict(
n_estimators=(_trees_n_estimators('n_estimators')
if n_estimators is None else n_estimators),
max_features=(_trees_max_features('max_features')
if max_features is None else max_features),
max_depth=(_trees_max_depth('max_depth')
if max_depth is None else max_depth),
min_samples_split=(_trees_min_samples_split(
'min_samples_split') if min_samples_split is None else
min_samples_split),
min_samples_leaf=(_trees_min_samples_leaf(
'min_samples_leaf')
if min_samples_leaf is None else min_samples_leaf),
bootstrap=(_trees_bootstrap('bootstrap')
if bootstrap is None else bootstrap),
oob_score=oob_score,
n_jobs=n_jobs,
random_state=_random_state('rstate', random_state),
verbose=verbose,
)
return hp_space
#############################################################
##==== Random forest classifier/regressor constructors ====##
#############################################################
def random_forest_hp_space(criterion='gini', **kwargs):
""""Return a hyperparameter template for RandomForest model.
Parameters
----------
criterion: str
'gini' or 'entropy' and 'mse' for classification
"""
hp_space = trees_hp_space(**kwargs)
hp_space['criterion'] = criterion
return hp_space
###################################################
##==== AdaBoost hyperparameters search space ====##
###################################################
def ada_boost_hp_space(base_estimator=None,
n_estimators=None,
learning_rate=None,
random_state=None):
'''Generate AdaBoost hyperparameters search space
'''
hp_space = dict(
base_estimator=base_estimator,
n_estimators=(_boosting_n_estimators('n_estimators')
if n_estimators is None else n_estimators),
learning_rate=(_ada_boost_learning_rate('learning_rate')
if learning_rate is None else learning_rate),
random_state=_random_state('rstate', random_state))
return hp_space
###########################################################
##==== GradientBoosting hyperparameters search space ====##
###########################################################
def grad_boosting_hp_space(learning_rate=None,
n_estimators=None,
subsample=None,
min_samples_split=None,
min_samples_leaf=None,
max_depth=None,
init=None,
random_state=None,
max_features=None,
verbose=0,
max_leaf_nodes=None,
warm_start=False,
presort='auto'):
'''Generate GradientBoosting hyperparameters search space
'''
hp_space = dict(
learning_rate=(_grad_boosting_learning_rate('learning_rate')
if learning_rate is None else learning_rate),
n_estimators=(_boosting_n_estimators('n_estimators')
if n_estimators is None else n_estimators),
subsample=(_grad_boosting_subsample('subsample')
if subsample is None else subsample),
min_samples_split=(_trees_min_samples_split('min_samples_split')
if min_samples_split is None else min_samples_split),
min_samples_leaf=(_trees_min_samples_leaf('min_samples_leaf')
if min_samples_leaf is None else min_samples_leaf),
max_depth=(_trees_max_depth('max_depth')
if max_depth is None else max_depth),
init=init,
random_state=_random_state('rstate', random_state),
max_features=(_trees_max_features('max_features')
if max_features is None else max_features),
warm_start=warm_start,
presort=presort)
return hp_space
###################################################
##==== XGBoost hyperparameters search space ====##
###################################################
def _xgboost_max_depth(name):
return scope.int(hp.uniform(name, 1, 11))
def _xgboost_learning_rate(name):
return hp.loguniform(name, np.log(0.0001), np.log(0.5)) - 0.0001
def _xgboost_n_estimators(name):
return scope.int(hp.quniform(name, 100, 6000, 200))
def _xgboost_gamma(name):
return hp.loguniform(name, np.log(0.0001), np.log(5)) - 0.0001
def _xgboost_min_child_weight(name):
return scope.int(hp.loguniform(name, np.log(1), np.log(100)))
def _xgboost_subsample(name):
return hp.uniform(name, 0.5, 1)
def _xgboost_colsample_bytree(name):
return hp.uniform(name, 0.5, 1)
def _xgboost_colsample_bylevel(name):
return hp.uniform(name, 0.5, 1)
def _xgboost_reg_alpha(name):
return hp.loguniform(name, np.log(0.0001), np.log(1)) - 0.0001
def _xgboost_reg_lambda(name):
return hp.loguniform(name, np.log(1), np.log(4))
def xgboost_hp_space(max_depth=None,
learning_rate=None,
n_estimators=None,
gamma=None,
min_child_weight=None,
max_delta_step=0,
subsample=None,
colsample_bytree=None,
colsample_bylevel=None,
reg_alpha=None,
reg_lambda=None,
scale_pos_weight=1,
base_score=0.5,
random_state=None):
'''Generate XGBoost hyperparameters search space
'''
hp_space = dict(
max_depth=(_xgboost_max_depth('max_depth')
if max_depth is None else max_depth),
learning_rate=(_xgboost_learning_rate('learning_rate')
if learning_rate is None else learning_rate),
n_estimators=(_xgboost_n_estimators('n_estimators')
if n_estimators is None else n_estimators),
gamma=(_xgboost_gamma('gamma') if gamma is None else gamma),
min_child_weight=(_xgboost_min_child_weight(
'min_child_weight')
if min_child_weight is None else min_child_weight),
max_delta_step=max_delta_step,
subsample=(_xgboost_subsample('subsample')
if subsample is None else subsample),
colsample_bytree=(_xgboost_colsample_bytree(
'colsample_bytree')
if colsample_bytree is None else colsample_bytree),
colsample_bylevel=(_xgboost_colsample_bylevel(
'colsample_bylevel') if colsample_bylevel is None else
colsample_bylevel),
reg_alpha=(_xgboost_reg_alpha('reg_alpha')
if reg_alpha is None else reg_alpha),
reg_lambda=(_xgboost_reg_lambda('reg_lambda')
if reg_lambda is None else reg_lambda),
scale_pos_weight=scale_pos_weight,
base_score=base_score,
seed=_random_state('rstate', random_state))
return hp_space
#################################################
##==== Naive Bayes classifiers constructor ====##
#################################################
def multinomial_nb_hp_space(class_prior=None):
hp_space = dict(
alpha=hp.quniform('alpha', 0, 1, 0.001),
fit_prior=hp_bool('fit_prior'),
class_prior=class_prior)
return hp_space
###########################################
##==== Passive-aggressive classifier ====##
###########################################
def passive_aggressive_hp_space(loss=None,
C=None,
fit_intercept=False,
n_iter=None,
n_jobs=1,
random_state=None,
verbose=False):
hp_space = dict(
loss=hp.choice('loss', ['hinge', 'squared_hinge'])
if loss is None else loss,
C=hp.lognormal('learning_rate', np.log(0.01), np.log(10))
if C is None else C,
fit_intercept=fit_intercept,
n_iter=scope.int(
hp.qloguniform('n_iter', np.log(1), np.log(1000), q=1))
if n_iter is None else n_iter,
n_jobs=n_jobs,
random_state=_random_state('rstate', random_state),
verbose=verbose)
return hp_space
###############################################
##==== Discriminant analysis classifiers ====##
###############################################
def linear_discriminant_analysis_hp_space(solver=None,
shrinkage=None,
priors=None,
n_components=None,
store_covariance=False,
tol=0.00001):
solver_shrinkage = hp.choice('solver_shrinkage_dual',
[('svd', None), ('lsqr', None),
('lsqr', 'auto'), ('eigen', None),
('eigen', 'auto')])
rval = dict(
solver=solver_shrinkage[0] if solver is None else solver,
shrinkage=solver_shrinkage[1] if shrinkage is None else shrinkage,
priors=priors,
n_components=4 * scope.int(
hp.qloguniform(
'n_components', low=np.log(0.51), high=np.log(30.5), q=1.0))
if n_components is None else n_components,
store_covariance=store_covariance,
tol=tol)
return rval
def quadratic_discriminant_analysis_hp_space(reg_param=None, priors=None):
rval = dict(
reg_param=hp.uniform('reg_param', 0.0, 1.0)
if reg_param is None else 0.0,
priors=priors)
return rval
###############################################
##==== Various preprocessor constructors ====##
###############################################
def pca_hp_space(n_components=None, whiten=None, copy=True):
rval = dict(
# -- qloguniform is missing a "scale" parameter so we
# lower the "high" parameter and multiply by 4 out front
n_components=4 * scope.int(
hp.qloguniform(
'n_components', low=np.log(0.51), high=np.log(30.5), q=1.0))
if n_components is None else n_components,
# n_components=(hp.uniform(name + '.n_components', 0, 1)
# if n_components is None else n_components),
whiten=hp_bool('whiten') if whiten is None else whiten,
copy=copy,
)
return rval
def standard_scaler(with_mean=None, with_std=None):
rval = dict(
with_mean=hp_bool('with_mean') if with_mean is None else with_mean,
with_std=hp_bool('with_std') if with_std is None else with_std,
)
return rval
def ts_lagselector_hp_space(lower_lags=1, upper_lags=1):
rval = dict(lag_size=scope.int(
hp.quniform('lags', lower_lags - .5, upper_lags + .5, 1)))
return rval
def bernoulli_rbm_hp_space(n_components=None,
learning_rate=None,
batch_size=None,
n_iter=None,
verbose=False,
random_state=None):
rval = dict(
n_components=scope.int(
hp.qloguniform(
'n_components', low=np.log(0.51), high=np.log(999.5), q=1.0))
if n_components is None else n_components,
learning_rate=hp.lognormal(
'learning_rate',
np.log(0.01),
np.log(10),
) if learning_rate is None else learning_rate,
batch_size=scope.int(
hp.qloguniform(
'.batch_size',
np.log(1),
np.log(100),
q=1,
)) if batch_size is None else batch_size,
n_iter=scope.int(
hp.qloguniform(
'n_iter',
np.log(1),
np.log(1000), # -- max sweeps over the *whole* train set
q=1,
)) if n_iter is None else n_iter,
verbose=verbose,
random_state=_random_state('rstate', random_state),
)
return rval
def colkmeans_hp_space(n_clusters=None,
init=None,
n_init=None,
max_iter=None,
tol=None,
precompute_distances=True,
verbose=0,
random_state=None,
copy_x=True,
n_jobs=1):
rval = dict(
n_clusters=scope.int(
hp.qloguniform(
'n_clusters', low=np.log(1.51), high=np.log(19.5), q=1.0))
if n_clusters is None else n_clusters,
init=hp.choice(
'init',
['k-means++', 'random'],
) if init is None else init,
n_init=hp.choice(
'n_init',
[1, 2, 10, 20],
) if n_init is None else n_init,
max_iter=scope.int(
hp.qlognormal(
'max_iter',
np.log(300),
np.log(10),
q=1,
)) if max_iter is None else max_iter,
tol=hp.lognormal(
'tol',
np.log(0.0001),
np.log(10),
) if tol is None else tol,
precompute_distances=precompute_distances,
verbose=verbose,
random_state=random_state,
copy_x=copy_x,
n_jobs=n_jobs,
)
return rval
def lgbm_hp_space(**kwargs):
space = {
'n_estimators': scope.int(hp.quniform('n_estimators', 10, 700, 1)),
'num_leaves': scope.int(hp.quniform ('num_leaves', 10, 200, 1)),
'feature_fraction': hp.uniform('feature_fraction', 0.75, 1.0),
'bagging_fraction': hp.uniform('bagging_fraction', 0.75, 1.0),
'learning_rate': hp.loguniform('learning_rate', -5.0, -2.3),
'max_bin': scope.int(hp.quniform('max_bin', 64, 512, 1)),
'bagging_freq': scope.int(hp.quniform('bagging_freq', 1, 5, 1)),
'lambda_l1': hp.uniform('lambda_l1', 0, 10),
'lambda_l2': hp.uniform('lambda_l2', 0, 10),
**kwargs
}
return space
# -- flake8 eofk
|
python
|
from unittest import TestCase
from unittest.mock import patch
from app.ingest.infrastructure.mq.publishers.process_ready_queue_publisher import ProcessReadyQueuePublisher
from test.resources.ingest.ingest_factory import create_ingest
class TestProcessReadyQueuePublisher(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.TEST_INGEST = create_ingest()
cls.TEST_DESTINATION_PATH = "test_path"
@patch("app.common.infrastructure.mq.publishers.stomp_publisher_base.StompPublisherBase._publish_message")
@patch('app.ingest.infrastructure.mq.publishers.process_ready_queue_publisher.os.getenv')
def test_publish_message_happy_path(self, os_getenv_stub, inner_publish_message_mock) -> None:
os_getenv_stub.return_value = self.TEST_DESTINATION_PATH
self.sut = ProcessReadyQueuePublisher()
self.sut.publish_message(self.TEST_INGEST)
inner_publish_message_mock.assert_called_once_with(
{
'package_id': self.TEST_INGEST.package_id,
'destination_path': self.TEST_DESTINATION_PATH,
'application_name': self.TEST_INGEST.depositing_application.value
}
)
|
python
|
from aioredis import Redis, ConnectionPool
from six.moves import xrange
from ._util import to_string
from .auto_complete import SuggestionParser
class AioAutoCompleter(object):
"""
An asyncio client to RediSearch's AutoCompleter API
It provides prefix searches with optionally fuzzy matching of prefixes
"""
SUGADD_COMMAND = "FT.SUGADD"
SUGDEL_COMMAND = "FT.SUGDEL"
SUGLEN_COMMAND = "FT.SUGLEN"
SUGGET_COMMAND = "FT.SUGGET"
INCR = 'INCR'
WITHSCORES = 'WITHSCORES'
FUZZY = 'FUZZY'
WITHPAYLOADS = 'WITHPAYLOADS'
def __init__(self, key, host='localhost', port=6379, conn = None, password=None):
"""
Create a new AioAutoCompleter client for the given key, and optional host and port
If conn is not None, we employ an already existing redis connection
"""
self.key = key
self.redis = conn if conn is not None else Redis(
connection_pool = ConnectionPool(host=host, port=port, password=password))
def __await__(self):
"""
Automatically initialize the AioAutoCompleter by using the await magic word when creating
"""
return self.initialize().__await__()
async def initialize(self):
"""
Initialize the asynchronous attributes of the AioAutoCompleter
"""
if self.redis:
# Redis will initialize its own ConnectionPool instance
await self.redis.initialize()
return self
async def add_suggestions(self, *suggestions, **kwargs):
"""
Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.
If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores
"""
# If Transaction is not set to false it will attempt a MULTI/EXEC which will error
pipe = await self.redis.pipeline(transaction=False)
for sug in suggestions:
args = [AutoCompleter.SUGADD_COMMAND, self.key, sug.string, sug.score]
if kwargs.get('increment'):
args.append(AutoCompleter.INCR)
if sug.payload:
args.append('PAYLOAD')
args.append(sug.payload)
await pipe.execute_command(*args)
return await pipe.execute()[-1]
async def len(self):
"""
Return the number of entries in the AutoCompleter index
"""
return await self.redis.execute_command(AutoCompleter.SUGLEN_COMMAND, self.key)
async def delete(self, string):
"""
Delete a string from the AutoCompleter index.
Returns 1 if the string was found and deleted, 0 otherwise
"""
return await self.redis.execute_command(AutoCompleter.SUGDEL_COMMAND, self.key, string)
async def get_suggestions(self, prefix, fuzzy = False, num = 10, with_scores = False, with_payloads=False):
"""
Get a list of suggestions from the AutoCompleter, for a given prefix
### Parameters:
- **prefix**: the prefix we are searching. **Must be valid ascii or utf-8**
- **fuzzy**: If set to true, the prefix search is done in fuzzy mode.
**NOTE**: Running fuzzy searches on short (<3 letters) prefixes can be very slow, and even scan the entire index.
- **with_scores**: if set to true, we also return the (refactored) score of each suggestion.
This is normally not needed, and is NOT the original score inserted into the index
- **with_payloads**: Return suggestion payloads
- **num**: The maximum number of results we return. Note that we might return less. The algorithm trims irrelevant suggestions.
Returns a list of Suggestion objects. If with_scores was False, the score of all suggestions is 1.
"""
args = [AutoCompleter.SUGGET_COMMAND, self.key, prefix, 'MAX', num]
if fuzzy:
args.append(AutoCompleter.FUZZY)
if with_scores:
args.append(AutoCompleter.WITHSCORES)
if with_payloads:
args.append(AutoCompleter.WITHPAYLOADS)
ret = await self.redis.execute_command(*args)
results = []
if not ret:
return results
parser = SuggestionParser(with_scores, with_payloads, ret)
return [s for s in parser]
|
python
|
from .files import sppasFilesPanel
__all__ = (
"sppasFilesPanel",
)
|
python
|
from telebot import types
from typing import List
from datetime import datetime
import logging
from tengi.telegram.telegram_bot import TelegramBot
logger = logging.getLogger(__file__)
class TelegramCursor:
def __init__(self, bot: TelegramBot, look_back_days: float, long_polling_timeout: float = 20):
self.bot = bot
self.look_back_days = look_back_days
self.long_polling_timeout = long_polling_timeout
self.last_bot_update_id = None
def look_back(self, allowed_updates):
updates = self.bot.get_updates(long_polling_timeout=0,
limit=100,
allowed_updates=allowed_updates)
# Sort updates from newest to oldest to use latest message in the chat only
updates = sorted(updates,
key=lambda upd: upd.update_id,
reverse=True)
now = datetime.utcnow()
look_back_seconds = self.look_back_days * 24 * 60 * 60
look_back_updates = []
cached_chat_ids = set()
for u in updates:
if u.message is not None: # Ignore messages that are outside the look back window
elapsed_seconds = (now - datetime.utcfromtimestamp(u.message.date)).total_seconds()
if elapsed_seconds > look_back_seconds:
continue
# Cache only the last message from the chat
chat_id = u.message.chat.id
if chat_id in cached_chat_ids:
continue
cached_chat_ids.add(chat_id)
look_back_updates.append(u)
# Sort updates from oldest to newest to handle in natural order
look_back_updates = sorted(look_back_updates,
key=lambda upd: upd.update_id)
return look_back_updates
def get_new_updates(self, allowed_updates) -> List[types.Update]:
look_back_updates = []
if self.last_bot_update_id is None:
look_back_updates = self.look_back(allowed_updates=allowed_updates)
if look_back_updates:
last_update = max(look_back_updates, key=lambda upd: upd.update_id)
self.last_bot_update_id = last_update.update_id
else:
self.last_bot_update_id = -1
long_polling_timeout = self.long_polling_timeout if (not look_back_updates) else 0
updates: List[types.Update] = self.bot.get_updates(offset=self.last_bot_update_id + 1,
long_polling_timeout=long_polling_timeout,
allowed_updates=allowed_updates)
if look_back_updates:
updates = look_back_updates + updates
if updates:
last_update = max(updates, key=lambda upd: upd.update_id)
self.last_bot_update_id = last_update.update_id
return updates
|
python
|
from willump.graph.willump_graph_node import WillumpGraphNode
from willump.graph.willump_python_node import WillumpPythonNode
from weld.types import *
import ast
from typing import List
from willump.willump_utilities import strip_linenos_from_var
class CascadeStackDenseNode(WillumpPythonNode):
"""
Willump Stack Dense node. Horizontally stacks multiple dense matrices.
"""
def __init__(self, more_important_nodes: List[WillumpGraphNode], more_important_names: List[str],
less_important_nodes: List[WillumpGraphNode], less_important_names: List[str],
output_name: str, output_type: WeldType, small_model_output_node: WillumpGraphNode,
small_model_output_name: str) -> None:
"""
Initialize the node.
"""
assert (isinstance(output_type, WeldVec))
assert (isinstance(output_type.elemType, WeldVec))
self._output_name = output_name
self._output_type = output_type
input_nodes = more_important_nodes + less_important_nodes + [small_model_output_node]
input_names = more_important_names + less_important_names + [small_model_output_name]
python_ast = self.get_python_ast(more_important_names, less_important_names, small_model_output_name,
output_name)
super(CascadeStackDenseNode, self).__init__(in_nodes=input_nodes, input_names=input_names,
output_names=[output_name], output_types=[output_type],
python_ast=python_ast)
def get_python_ast(self, more_important_names, less_important_names, small_model_output_name,
output_name) -> ast.AST:
more_important_vecs = [strip_linenos_from_var(name) for name in more_important_names]
less_important_vecs = [strip_linenos_from_var(name) for name in less_important_names]
more_important_str, less_important_str = "", ""
for vec in more_important_vecs:
more_important_str += "%s," % vec
for vec in less_important_vecs:
less_important_str += "%s," % vec
python_string = "%s = cascade_dense_stacker([%s], [%s], %s)" % (
strip_linenos_from_var(output_name), more_important_str,
less_important_str, strip_linenos_from_var(small_model_output_name))
python_ast = ast.parse(python_string, "exec")
return python_ast.body[0]
def get_output_name(self) -> str:
return self._output_name
def get_output_type(self) -> WeldType:
return self._output_type
def __repr__(self):
return "Stack dense node for input {0} output {1}\n" \
.format(self._input_names, self._output_name)
|
python
|
import os
import sys
import argparse
from datetime import date
from collections import defaultdict
from egcg_core.app_logging import logging_default
from egcg_core.config import cfg
from egcg_core.util import query_dict
from egcg_core import rest_communication
from egcg_core.notifications.email import send_html_email
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import load_config
cache = {
'run_elements_data': {},
'run_data': {},
'lanes_data': {},
'sample_data': {},
'run_status_data': {}
}
email_template_report = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'etc', 'run_report.html'
)
email_template_repeats = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'etc', 'list_repeats.html'
)
logging_default.add_stdout_handler()
logger = logging_default.get_logger(os.path.basename(__file__))
def today():
return date.today().isoformat()
def run_status_data(run_id):
if not cache['run_status_data']:
data = rest_communication.get_documents('lims/status/run_status')
for d in data:
cache['run_status_data'][d['run_id']] = d
return cache['run_status_data'][run_id]
def run_data(run_id):
if run_id not in cache['run_data']:
cache['run_data'][run_id] = rest_communication.get_document('runs', where={'run_id': run_id})
return cache['run_data'][run_id]
def run_elements_data(run_id):
if run_id not in cache['run_elements_data']:
cache['run_elements_data'][run_id] = rest_communication.get_documents('run_elements', where={'run_id': run_id})
return cache['run_elements_data'][run_id]
def sample_data(sample_id):
if sample_id not in cache['sample_data']:
cache['sample_data'][sample_id] = rest_communication.get_document('samples', where={'sample_id': sample_id})
return cache['sample_data'][sample_id]
def samples_from_run(run_id):
return run_status_data(run_id).get('sample_ids')
def get_run_success(run_id):
run_info = {'name': run_id}
re_data = run_elements_data(run_id)
lane_review = defaultdict(set)
lane_review_comment = defaultdict(set)
for re in re_data:
lane_review[re.get('lane')].add(re.get('reviewed'))
lane_review_comment[re.get('lane')].add(re.get('review_comments'))
failed_lanes = 0
reasons = []
for lane in sorted(lane_review):
if len(lane_review.get(lane)) != 1:
raise ValueError('More than one review status for lane %s in run %s' % (lane, run_id))
if lane_review.get(lane).pop() == 'fail':
failed_lanes += 1
reasons.append(
'lane %s: %s' % (lane, lane_review_comment.get(lane).pop()[len('failed due to '):])
)
reasons = sorted(reasons)
message = '%s: %s lanes failed' % (run_id, failed_lanes)
run_info['failed_lanes'] = failed_lanes
if failed_lanes > 0:
message += ':\n%s' % '\n'.join(reasons)
run_info['details'] = reasons
for l in message.split('\n'):
logger.info(l)
return run_info
def check_pending_run_element(sample_id, sdata):
# Checking for other run elements which are still pending
for sample_run_element in query_dict(sdata, 'run_elements') or []:
# Splitting the run element, and generating the run_id by concatenating the first four components
# with an underscore
sample_run_id = '_'.join(sample_run_element.split('_')[:4])
if query_dict(run_data(sample_run_id), 'aggregated.most_recent_proc.status') == 'processing':
logger.info('Another pending run element already exists for sample ' + sample_id)
return True
return False
def remove_duplicate_base_on_flowcell_id(list_runs):
"""
Take a list of runs and remove the duplicated run based on the flowcell id.
It will remove the oldest run when two are found based on the run date.
"""
flowcell_to_run = {}
for run_id in list_runs:
date, machine, run_number, stage_flowcell = run_id.split('_')
flowcell = stage_flowcell[1:]
# If the run id has not been seen or if the date is newer than the previous one then keep it
if flowcell not in flowcell_to_run or run_id > flowcell_to_run[flowcell]:
flowcell_to_run[flowcell] = run_id
return sorted(flowcell_to_run.values())
def report_runs(run_ids, noemail=False):
run_ids.sort()
runs_info = []
for run_id in run_ids:
run_status = run_status_data(run_id).get('run_status')
if run_status == 'RunCompleted':
run_info = get_run_success(run_id)
else:
logger.info('%s: 8 lanes failed due to %s' % (run_id, run_status))
run_info = {'name': run_id, 'failed_lanes': 8, 'details': [str(run_status)]}
runs_info.append(run_info)
logger.info('')
logger.info('_____________________________________')
logger.info('')
run_repeats = []
# Remove the duplicated run from repeated flowcell
run_ids = remove_duplicate_base_on_flowcell_id(run_ids)
for run_id in run_ids:
sample_repeats = []
for sample_id in sorted(samples_from_run(run_id)):
sdata = sample_data(sample_id) or {}
clean_pc_q30 = query_dict(sdata, 'aggregated.clean_pc_q30') or 0
clean_yield_in_gb = query_dict(sdata, 'aggregated.clean_yield_in_gb') or 0
clean_yield = clean_yield_in_gb * 1000000000
mean_cov = query_dict(sdata, 'aggregated.from_run_elements.mean_coverage') or 0
if clean_pc_q30 >= 75 and (clean_yield >= sdata['required_yield'] or mean_cov >= sdata['required_coverage']):
pass
else:
reason = 'unknown'
if not clean_pc_q30:
reason = 'No data'
elif clean_yield < sdata['required_yield'] and mean_cov < sdata['required_coverage']:
reason = 'Not enough data: yield (%s < %s) and coverage (%s < %s)' % (
round(clean_yield/1000000000, 1), int(sdata['required_yield']/1000000000),
round(mean_cov, 1), sdata['required_coverage']
)
# if a pending run element exists, continue to the next sample without logging current one
if check_pending_run_element(sample_id, sdata):
continue
sample_repeats.append({'id': sample_id, 'reason': reason})
sample_repeats.sort(key=lambda s: s['id'])
if sample_repeats:
logger.info('%s: Repeat samples' % run_id)
for s in sample_repeats:
logger.info('%s: %s' % (s['id'], s['reason']))
else:
logger.info('%s: No repeat samples' % run_id)
run_repeats.append({'name': run_id, 'repeat_count': len(sample_repeats), 'repeats': sample_repeats})
if noemail:
return
_today = today()
params = {}
params.update(cfg['run_report']['email_notification'])
params['runs'] = runs_info
send_html_email(
subject='Run report %s' % _today,
email_template=email_template_report,
**params
)
params = {}
params.update(cfg['run_report']['email_notification'])
params['runs'] = run_repeats
send_html_email(
subject='Sequencing repeats %s' % _today,
email_template=email_template_repeats,
**params
)
def main():
p = argparse.ArgumentParser()
p.add_argument('-r', '--run_ids', dest='run_ids', type=str, nargs='+')
p.add_argument('--debug', action='store_true', help='override pipeline log level to debug')
p.add_argument('--noemail', action='store_true')
args = p.parse_args()
load_config()
report_runs(args.run_ids, args.noemail)
if __name__ == '__main__':
sys.exit(main())
|
python
|
# -*- coding: utf-8 -*-
import sys
import gettext
import Adventurer3.Controller
gettext.install(__name__)
class App:
"""UIアプリケーションクラス"""
def __init__(self, ipaddress):
self.adv3 = Adventurer3.Controller.Controller(ipaddress)
def user_interface(self):
while True:
cmd = input("> ").strip()
if cmd.startswith("q") or cmd.startswith("Q"):
break
if cmd.startswith("p") or cmd.startswith("P"):
if self.adv3.start():
self.adv3.update_status()
self.adv3.end()
print(self.adv3.get_status())
if cmd.startswith("s") or cmd.startswith("s"):
if self.adv3.start():
self.adv3.stop()
self.adv3.end()
if cmd.startswith("jobstop"):
if self.adv3.start():
self.adv3.stop_job()
self.adv3.end()
if __name__ == "__main__":
"""引数はホスト名かIPアドレスと仮定して処理をする"""
if len(sys.argv) > 1:
app = App(sys.argv[1])
app.user_interface()
|
python
|
# coding: utf-8
import torch
from torch.nn import functional as F
import torch.utils.data
import torch.utils.data.distributed
from torch import autograd
import numpy as np
from gan_training.random_queue import Random_queue
class Trainer(object):
def __init__(self,
generator,
discriminator,
g_optimizer,
d_optimizer,
gan_type,
reg_type,
reg_param,
pv=1,
iv=0,
dv=0,
time_step=1.,
batch_size=64,
config=None):
print("Using PID Trainer")
self.generator = generator
self.discriminator = discriminator
self.g_optimizer = g_optimizer
self.d_optimizer = d_optimizer
self.gan_type = gan_type
self.reg_type = reg_type
self.reg_param = reg_param
self.d_xfake = None
self.d_previous_z = None
self.d_previous_y = None
self.pv = pv
self.iv = iv
self.dv = dv
self.time_step = time_step
self.batch_size = batch_size
self.config = config
self.i_real_queue = Random_queue(
config['training']['batch_size'] *
config['training']['i_buffer_factor'],
config['training']['batch_size'])
self.i_fake_queue = Random_queue(
config['training']['batch_size'] *
config['training']['i_buffer_factor'],
config['training']['batch_size'])
self.max0 = torch.nn.ReLU()
def generator_trainstep(self, y, z):
assert (y.size(0) == z.size(0))
toggle_grad(self.generator, True)
toggle_grad(self.discriminator, False)
self.generator.train()
self.discriminator.train()
self.g_optimizer.zero_grad()
x_fake = self.generator(z, y)
d_fake = self.discriminator(x_fake, y)
gloss = self.compute_loss(d_fake, 1, is_generator=True)
gloss.backward()
self.g_optimizer.step()
return gloss.item()
def discriminator_trainstep(self, x_real, y, z, it=0):
# print(it)
toggle_grad(self.generator, False)
toggle_grad(self.discriminator, True)
self.generator.train()
self.discriminator.train()
self.d_optimizer.zero_grad()
reg_d = self.config['training']['regularize_output_d']
d_real = self.discriminator(x_real, y)
dloss_real = self.compute_loss(d_real, 1) * self.pv
if reg_d > 0.:
dloss_real += (d_real**2).mean() * reg_d
dloss_real.backward()
# On fake data
with torch.no_grad():
x_fake = self.generator(z, y)
d_fake = self.discriminator(x_fake, y)
dloss_fake = self.compute_loss(d_fake, 0) * self.pv
if reg_d > 0.:
dloss_fake += (d_fake**2).mean() * reg_d
dloss_fake.backward()
i_loss = torch.from_numpy(np.array([0.]))
if self.iv > 0:
# i_factor = self.config['training']['i_buffer_factor']
# i_store = self.config['training']['i_buffer_onestep']
xtmp = x_real.detach().cpu().numpy()
ytmp = y.detach().cpu().numpy()
self.i_real_queue.set_data(xtmp, ytmp)
xtmp = x_fake.detach().cpu().numpy()
ytmp = y.detach().cpu().numpy()
self.i_fake_queue.set_data(xtmp, ytmp)
i_xreal, i_yreal = self.i_real_queue.get_data()
i_xfake, i_yfake = self.i_fake_queue.get_data()
i_xreal = torch.as_tensor(i_xreal, dtype=torch.float32).cuda()
i_xfake = torch.as_tensor(i_xfake, dtype=torch.float32).cuda()
i_yreal = torch.as_tensor(i_yreal, dtype=torch.long).cuda()
i_yfake = torch.as_tensor(i_yfake, dtype=torch.long).cuda()
i_real_doutput = self.discriminator(i_xreal, i_yreal)
i_loss_real = self.compute_loss(i_real_doutput, 1)
i_fake_doutput = self.discriminator(i_xfake, i_yfake)
i_loss_fake = self.compute_loss(i_fake_doutput, 0)
if self.config['training']['pid_type'] == 'function':
i_loss = (i_loss_real + i_loss_fake) * self.iv
elif self.config['training']['pid_type'] == 'square':
i_loss = ((i_real_doutput**2).mean() +
(i_fake_doutput**2).mean()) * self.iv
elif self.config['training']['pid_type'] == 'abs':
i_loss = (torch.abs(i_real_doutput).mean() +
torch.abs(i_fake_doutput).mean()) * self.iv
elif self.config['training']['pid_type'] == 'accurate':
i_fake_doutput = self.max0(i_fake_doutput)
i_real_doutput = -1 * self.max0(-1 * i_real_doutput)
i_loss = (i_fake_doutput - i_real_doutput).mean() * self.iv
i_loss.backward()
d_loss = torch.from_numpy(np.array([0.]))
# print(self.dv)
if self.dv > 0 and it > 0:
if self.d_xfake is None:
self.d_xreal = x_real
self.d_xfake = x_fake
self.d_previous_z = z
self.d_previous_y = y
else:
d_loss_previous_f = self.compute_loss(
self.discriminator(self.d_xfake, self.d_previous_y), 0)
d_loss_previous_r = self.compute_loss(
self.discriminator(self.d_xreal, self.d_previous_y), 1)
d_loss_previous = d_loss_previous_f + d_loss_previous_r
d_loss_current_f = self.compute_loss(
self.discriminator(x_fake, y), 0)
d_loss_current_r = self.compute_loss(
self.discriminator(x_real, y), 1)
d_loss_current = d_loss_current_f + d_loss_current_r
d_loss = (d_loss_current - d_loss_previous) * self.dv
d_loss.backward()
self.d_xreal = x_real
self.d_xfake = x_fake
self.d_previous_z = z
self.d_previous_y = y
self.d_optimizer.step()
toggle_grad(self.discriminator, False)
# Output
dloss = (dloss_real + dloss_fake)
return dloss.item(), d_loss.item(), i_loss.item()
def compute_loss(self, d_out, target, is_generator=False):
targets = d_out.new_full(size=d_out.size(), fill_value=target)
if self.gan_type == 'standard':
loss = F.binary_cross_entropy_with_logits(d_out, targets)
elif self.gan_type == 'wgan':
loss = (2 * target - 1) * d_out.mean()
elif self.gan_type == 'hinge':
if is_generator is False:
loss = (F.relu(1 + (2 * target - 1) * d_out)).mean()
else:
loss = ((2 * target - 1) * d_out).mean()
elif self.gan_type == 'sigmoid':
d_out = d_out * self.config['training']['sigmoid_coe']
loss = ((2 * target - 1) * F.sigmoid(d_out)
).mean() / self.config['training']['sigmoid_coe']
elif self.gan_type == 'lsgan1':
if is_generator is False:
target = target * 2 - 1
loss = ((d_out - target)**2).mean()
else:
loss = (d_out**2).mean()
elif self.gan_type == 'lsgan2':
target -= 0.5
loss = ((d_out - target)**2).mean()
else:
raise NotImplementedError
return loss
def wgan_gp_reg(self, x_real, x_fake, y, center=1.):
batch_size = y.size(0)
eps = torch.rand(batch_size, device=y.device).view(batch_size, 1, 1, 1)
x_interp = (1 - eps) * x_real + eps * x_fake
x_interp = x_interp.detach()
x_interp.requires_grad_()
d_out = self.discriminator(x_interp, y)
reg = (compute_grad2(d_out, x_interp).sqrt() - center).pow(2).mean()
return reg
# Utility functions
def toggle_grad(model, requires_grad):
for p in model.parameters():
p.requires_grad_(requires_grad)
def compute_grad2(d_out, x_in):
batch_size = x_in.size(0)
grad_dout = autograd.grad(outputs=d_out.sum(),
inputs=x_in,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
grad_dout2 = grad_dout.pow(2)
assert (grad_dout2.size() == x_in.size())
reg = grad_dout2.view(batch_size, -1).sum(1)
return reg
def update_average(model_tgt, model_src, beta):
toggle_grad(model_src, False)
toggle_grad(model_tgt, False)
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert (p_src is not p_tgt)
p_tgt.copy_(beta * p_tgt + (1. - beta) * p_src)
|
python
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name, too-few-public-methods
"""wget template"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import hpccm.base_object
class wget(hpccm.base_object):
"""wget template"""
def __init__(self, **kwargs):
"""Initialize wget template"""
super(wget, self).__init__(**kwargs)
self.wget_opts = kwargs.get('opts', ['-q', '-nc',
'--no-check-certificate'])
def download_step(self, outfile=None, referer=None, url=None,
directory='/tmp'):
"""Generate wget command line string"""
if not url:
logging.error('url is not defined')
return ''
# Copy so not to modify the member variable
opts = self.wget_opts
if outfile:
opts.append('-O {}'.format(outfile))
if referer:
opts.append('--referer {}'.format(referer))
opt_string = ' '.join(self.wget_opts)
# Add annotation if the caller inherits from the annotate template
if callable(getattr(self, 'add_annotation', None)):
self.add_annotation('url', url)
# Ensure the directory exists
return 'mkdir -p {1} && wget {0} -P {1} {2}'.format(opt_string,
directory, url)
|
python
|
from apps import db
from datetime import datetime
#基础表
class BaseModel(object):
create_time = db.Column(db.DateTime,default=datetime.now)
update_time = db.Column(db.DateTime,default=datetime.now)
#分类表
class Cate(BaseModel,db.Model):
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(30))
#产品表
class Goods(BaseModel,db.Model):
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(30))
price = db.Column(db.Integer)
descrip = db.Column(db.String(255)) #描述信息
content = db.Column(db.Text) #内容
image_url=db.Column(db.String(100))
number =db.Column(db.Integer) #库存
cid = db.Column(db.Integer,db.ForeignKey('cate.id'))
cate = db.relationship(Cate)
# 用户表
class User(BaseModel,db.Model):
id = db.Column(db.Integer,primary_key=True)
name = db.Column(db.String(30))
password = db.Column(db.String(255))
# 购物车表
class Cart(db.Model):
id = db.Column(db.Integer,primary_key=True)
good_id = db.Column(db.Integer)
user_id = db.Column(db.Integer)
name = db.Column(db.String(30),default='')
price = db.Column(db.Integer)
number = db.Column(db.Integer)
#评论表
class Comment(BaseModel,db.Model):
id = db.Column(db.Integer,primary_key=True)
content = db.Column(db.String(30))
user_id = db.Column(db.Integer,db.ForeignKey("user.id"))
good_id = db.Column(db.Integer,db.ForeignKey("goods.id"))
like_count = db.Column(db.Integer,default=0)
user = db.relationship(User)
# 5.创建迁移仓库
# #这个命令会创建migrations文件夹,所有迁移文件都放在里面。
# python manage.py db init
# 6.创建迁移脚本
# python manage.py db migrate -m 'ini
# ation'
# 7.更新数据库
# python manage.py db upgrade
|
python
|
from decimal import Decimal
from simfile.ssc import SSCSimfile
import unittest
from .helpers import testing_timing_data
from .. import *
from simfile.sm import SMSimfile
class TestBeat(unittest.TestCase):
def test_from_str(self):
self.assertEqual(Beat(0, 1), Beat.from_str('0.000'))
self.assertEqual(Beat(12*3+1, 3), Beat.from_str('12.333'))
self.assertEqual(Beat(4, 192), Beat.from_str('0.021'))
self.assertEqual(Beat(4, 64), Beat.from_str('0.062'))
self.assertEqual(Beat(4, 48), Beat.from_str('0.083'))
self.assertEqual(Beat(4, 32), Beat.from_str('0.125'))
self.assertEqual(Beat(4, 24), Beat.from_str('0.167'))
self.assertEqual(Beat(4, 16), Beat.from_str('0.250'))
self.assertEqual(Beat(4, 12), Beat.from_str('0.333'))
self.assertEqual(Beat(4, 8), Beat.from_str('0.500'))
def test_str(self):
self.assertEqual('0.000', str(Beat(0, 1)))
self.assertEqual('12.333', str(Beat(37, 3)))
self.assertEqual('0.021', str(Beat(4, 192)))
self.assertEqual('0.062', str(Beat(4, 64)))
self.assertEqual('0.083', str(Beat(4, 48)))
self.assertEqual('0.125', str(Beat(4, 32)))
self.assertEqual('0.167', str(Beat(4, 24)))
self.assertEqual('0.250', str(Beat(4, 16)))
self.assertEqual('0.333', str(Beat(4, 12)))
self.assertEqual('0.500', str(Beat(4, 8)))
class TestBeatValues(unittest.TestCase):
def test_from_str(self):
events = BeatValues.from_str('0.000=128.000,\n132.000=64.000,\n147.500=128.000')
self.assertIsInstance(events[0].beat, Beat)
self.assertIsInstance(events[0].value, Decimal)
self.assertEqual(BeatValue(beat=Beat(0, 1), value=Decimal('128.000')), events[0])
self.assertEqual(BeatValue(beat=Beat(132, 1), value=Decimal('64.000')), events[1])
self.assertEqual(BeatValue(beat=Beat(147*2+1, 2), value=Decimal('128.000')), events[2])
def test_serialize(self):
events = BeatValues.from_str('0.000=128.000,\n132.000=64.000,\n147.500=128.000')
self.assertEqual('0.000=128.000,\n132.000=64.000,\n147.500=128.000', events.serialize())
class TestTimingData(unittest.TestCase):
def test_attributes(self):
timing_data = testing_timing_data()
self.assertEqual(BeatValues([
BeatValue(beat=Beat(0), value=Decimal('120.000')),
BeatValue(beat=Beat(1), value=Decimal('150.000')),
BeatValue(beat=Beat(2), value=Decimal('200.000')),
BeatValue(beat=Beat(3), value=Decimal('300.000')),
]), timing_data.bpms)
self.assertEqual(BeatValues([
BeatValue(beat=Beat(2.5), value=Decimal('0.500')),
BeatValue(beat=Beat(3), value=Decimal('0.100')),
]), timing_data.stops)
self.assertEqual(BeatValues(), timing_data.delays)
self.assertEqual(BeatValues(), timing_data.warps)
self.assertEqual(Decimal('-0.009'), timing_data.offset)
def test_from_simfile_with_ssc_chart_without_distinct_timing_data(self):
with open('testdata/Springtime.ssc', 'r', encoding='utf-8') as infile:
ssc = SSCSimfile(file=infile)
ssc_chart = next(filter(
lambda c: c.stepstype == 'pump-single' and c.difficulty == 'Hard',
ssc.charts
))
timing_data = TimingData.from_simfile(ssc, ssc_chart)
self.assertEqual(BeatValues.from_str(ssc.bpms), timing_data.bpms)
self.assertEqual(BeatValues.from_str(ssc.stops), timing_data.stops)
self.assertEqual(BeatValues(), timing_data.warps)
self.assertEqual(Decimal(ssc.offset), timing_data.offset)
def test_from_simfile_with_ssc_chart_with_distinct_timing_data(self):
with open('testdata/Springtime.ssc', 'r', encoding='utf-8') as infile:
ssc = SSCSimfile(file=infile)
ssc_chart = next(filter(
lambda c: c.stepstype == 'pump-single'
and c.difficulty == 'Challenge',
ssc.charts
))
timing_data = TimingData.from_simfile(ssc, ssc_chart)
self.assertEqual(BeatValues.from_str(ssc_chart['BPMS']), timing_data.bpms)
self.assertEqual(BeatValues.from_str(ssc_chart['STOPS']), timing_data.stops)
self.assertEqual(BeatValues(), timing_data.warps)
self.assertEqual(Decimal(ssc_chart['OFFSET']), timing_data.offset)
|
python
|
# For compatibilty
from ignite.utils import convert_tensor, apply_to_tensor, apply_to_type, to_onehot
def _to_hours_mins_secs(time_taken):
"""Convert seconds to hours, mins, and seconds."""
mins, secs = divmod(time_taken, 60)
hours, mins = divmod(mins, 60)
return hours, mins, secs
|
python
|
class ScoreCard:
def __init__(self, score_text: str):
score_texts = score_text.split('|')
self.normal_turns = [score_texts[i] for i in range(10)]
if len(score_texts) == 12:
self.additional_turns = [score_texts[11]]
self.all_turns = self.normal_turns + self.additional_turns
def to_score(self):
sum = 0
for i in range(len(self.normal_turns)):
sum = sum + self.get_score_by_turn(i)
return sum
def get_score_by_turn(self, turn: int)->int:
score = self.text_to_score(self.normal_turns[turn])
if self.__is_strike(self.normal_turns[turn]) or self.__is_spare(self.normal_turns[turn]):
return score + self.__get_bonus_score(turn)
else:
return score
def __get_bonus_score(self, turn:int)->int:
if turn + 1 == len(self.normal_turns):
return self.text_to_score(self.additional_turns[0])
next_2_balls = str(self.all_turns[turn + 1] + self.all_turns[turn + 2])[0:2]
return self.text_to_score(next_2_balls)
def text_to_score(self, score_text:str)->int:
if score_text.find('/') == 1:
return 10
score = 0
for i in range(len(score_text)):
score = score + self.__char_to_score(score_text[i])
return score
def __char_to_score(self, score_text:str)->int:
if self.__is_strike(score_text):
return 10
elif score_text == '-':
return 0
else:
return int(score_text)
def __is_strike(self, score_text: str)->bool:
return True if score_text.upper() == 'X' else False
def __is_spare(self, score_text: str)->bool:
return True if score_text.find('/') == 1 else False
|
python
|
import os
import cgi
import requests
import shutil
def download_file(url, path, file_name=None):
"""Download file from url to directory
URL is expected to have a Content-Disposition header telling us what
filename to use.
Returns filename of downloaded file.
"""
res = requests.get(url, stream=True)
if res.status_code != 200:
raise ValueError('Failed to download')
if file_name is None:
params = cgi.parse_header(
res.headers.get('Content-Disposition', ''))[-1]
if 'filename*' not in params:
raise ValueError('Could not find a filename')
file_name = params['filename*'].replace("UTF-8''", "")
abs_path = os.path.join(path, os.path.basename(file_name))
with open(abs_path, 'wb') as target:
res.raw.decode_content = True
shutil.copyfileobj(res.raw, target)
print(f"Download {file_name}")
|
python
|
from lib.getItensFromEntrance import getItens
from lib.controllerImage import controllerImg
import os
import sys
arguments = sys.argv
pathDirect = os.getcwd()
resizeImage = controllerImg(pathDirect, arguments)
resizeImage.resizeImage()
|
python
|
from django.test import TestCase
from .models import Location, Category, Photographer, Image
# Create your tests here.
class LocationTestClass(TestCase):
def setUp(self):
self.loc = Location(location_name = 'Mombasa, Kenya')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.loc, Location))
def test_save_location(self):
self.loc.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) > 0)
def test_delete_location(self):
self.loc.save_location()
Location.delete_location(self.loc.id)
locations = Location.objects.all()
self.assertEqual(len(locations), 0)
def test_update_location(self):
Location.update_location(self.loc.id, 'london')
self.assertEqual(self.loc.location_name, 'london')
class CategoryTestClass(TestCase):
def setUp(self):
self.cat = Category(category_name = 'official')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.cat, Category))
def test_save_category(self):
self.cat.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_delete_category(self):
self.cat.save_category()
Category.delete_category(self.cat.id)
categories = Category.objects.all()
self.assertEqual(len(categories), 0)
def test_update_category(self):
Category.update_category(self.cat.id, 'official')
self.assertEqual(self.cat.category_name, 'joking')
class PhotographerTestClass(TestCase):
def setUp(self):
self.pho = Photographer(names = 'Fatma Fuaad', email = '[email protected]', ig = 'fatmafuaad', phone_number = '0712345678')
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.pho, Photographer))
def test_save_photographer(self):
self.pho.save_photographer()
photographers = Photographer.objects.all()
self.assertTrue(len(photographers) > 0)
def test_delete_photographer(self):
self.pho.save_photographer()
Photographer.delete_photographer(self.pho.id)
photographers = Photographer.objects.all()
self.assertEqual(len(photographers), 0)
class ImageTestClass(TestCase):
def setUp(self):
self.loc = Location(location_name = 'Mombasa, Kenya')
self.loc.save_location()
self.cat = Category(category_name = 'official')
self.cat.save_category()
self.pho = Photographer(names = 'Fatma Fuaad', email = '[email protected]', ig = 'fatmafuaad', phone_number = '0712345678')
self.pho.save_photographer()
self.img = Image(image_path = 'fuaad.png', name = 'passport photo', description = 'photo fo passports', location = self.loc, category = self.cat, photographer = self.pho)
def tearDown(self):
Location.objects.all().delete()
Category.objects.all().delete()
Photographer.objects.all().delete()
Image.objects.all().delete()
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.img, Image))
def test_save_image(self):
self.img.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 0)
def test_delete_image(self):
self.img.save_image()
Image.delete_image(self.img.id)
images = Image.objects.all()
self.assertEqual(len(images), 0)
def test_get_image_by_id(self):
self.img.save_image()
image = Image.get_image_by_id(self.img.id)
self.assertEqual(self.img, image)
def test_search_image(self):
self.img.save_image()
image = Image.search_image(self.img.category)
self.assertEqual(image)
def test_filter_by_location(self):
self.img.save_image()
image = Image.filter_by_location(self.img.location)
self.assertEqual(image)
def test_update_image(self):
Image.update_image(self.img.id, 'fatma.png')
self.assertEqual(self.img.image_path, 'fatma.png')
|
python
|
#!/usr/bin/env python
import sys
field = int(sys.argv[1])
for i in sys.stdin:
try:
print(i.split()[field - 1], end=" ")
print()
except:
pass
|
python
|
"""maillinter uses setuptools based setup script.
For the easiest installation type the command:
python3 setup.py install
In case you do not have root privileges use the following command instead:
python3 setup.py install --user
This installs the library and automatically handle the dependencies.
"""
import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="maillinter",
use_scm_version={"write_to": "src/maillinter/_version.py"},
description="The e-mail content formatter.",
long_description=long_description,
keywords="automation mail linter formatting",
author="Velibor Zeli",
author_email="[email protected]",
url="https://github.com/vezeli/maillinter",
license="MIT",
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
setup_requires=["setuptools_scm"],
install_requires=["nltk", "pyperclip", "setuptools_scm"],
entry_points={"console_scripts": ["maillinter = maillinter.scripts.__main__:cli"]},
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Operating System :: OS Independent",
"Topic :: Communications :: Email",
],
)
|
python
|
import logging
import re
import os
import sys
from flexget import plugin
from flexget import validator
log = logging.getLogger('rtorrent_magnet')
pat = re.compile('xt=urn:btih:([^&/]+)')
class PluginRtorrentMagnet(object):
"""
Process Magnet URI's into rtorrent compatible torrent files
Magnet URI's will look somethign like this:
magnet:?xt=urn:btih:190F1ABAED7AE7252735A811149753AA83E34309&dn=URL+Escaped+Torrent+Name
rTorrent would expect to see something like meta-URL_Escaped_Torrent_Name.torrent
The torrent file must also contain the text:
d10:magnet-uri88:xt=urn:btih:190F1ABAED7AE7252735A811149753AA83E34309&dn=URL+Escaped+Torrent+Namee
This plugin will check if a download URL is a magnet link, and then create the appropriate torrent file.
Example:
rtorrent_magnet: ~/torrents/
"""
def write_torrent_file(self, task, entry):
path = os.path.join(
entry['path'],
'meta-%s.torrent' % entry['title'].encode(sys.getfilesystemencoding(), 'replace')
)
path = os.path.expanduser(path)
log.info('Writing rTorrent Magnet File: %s', path)
if task.manager.options.test:
log.info('Would write: d10:magnet-uri%d:%se' % (entry['url'].__len__(), entry['url']))
else:
with open(path, 'w') as f:
f.write('d10:magnet-uri%d:%se' % (entry['url'].__len__(), entry['url']))
f.closed
entry['output'] = path
def validator(self):
root = validator.factory()
root.accept('path', allow_replacement=True)
return root
@plugin.priority(0)
def on_task_output(self, task, config):
for entry in task.accepted:
if 'output' in entry:
log.debug('Ignoring, %s already has an output file: %s' % (entry['title'], entry['output']))
continue
urls = entry.get('urls', [entry['url']])
for url in urls:
if url.startswith('magnet:'):
log.debug('Magnet URI detected for url %s (%s)' % (url, entry['title']))
m = pat.search(url)
if m:
entry['url'] = url
entry['path'] = entry.get('path', config)
entry['hash'] = m.groups()[0]
log.debug('Magnet Hash Detected: %s' % entry['hash'])
self.write_torrent_file(task, entry)
break
else:
log.warning('Unrecognized Magnet URI Format: %s', url)
plugin.register_plugin(PluginRtorrentMagnet, 'rtorrent_magnet', api_ver=2)
|
python
|
from collections import OrderedDict
from os.path import dirname, join
from library.commands.nvidia import NvidiaSmiCommand
from library.commands.sensors import SensorsCommand
ROOT_DIR = dirname(__file__)
DATA_FOLDER = join(ROOT_DIR, "data")
COMMANDS = [
SensorsCommand,
NvidiaSmiCommand
]
# How far to look back in the
ALERT_TIMEFRAME_MINUTES = 5
# Number of alert to trigger sending an email
ALERTS_NB_MAIL_TRIGGER = 5
# Difference from baseline level where a mail should be sent
WARNING_LEVELS = OrderedDict({
"CRIT": [1.30, "red"],
"HIGH": [1.15, "orange"],
"WARM": [1.10, "yellow"]
})
# Default alerting level names
DEFAULT_LEVEL = "NORM"
FAIL_LEVEL = "FAIL"
# Default alerting file
ALERTS = {
"recipient": "[email protected]",
"subject": join(ROOT_DIR, "mail/headers.txt"),
"content": join(ROOT_DIR, "mail/content.html"),
}
|
python
|
n1 = float(input("Digite uma nota: "))
n2 = float(input("Digite a outra nota: "))
media = (n1 + n2)/2
print("A média das notas {:.1f} e {:.1f} é {:.1f}".format(n1, n2, media))
|
python
|
'''def do_twice(f):
f()
f()
def print_spam():
print('spam')
do_twice(print_spam)
'''
s = input('input string: ')
def do_twice(function, a):
function(a)
function(a)
def print_twice(a):
print(a)
print(a)
def do_four(function, a):
do_twice(function, a)
do_twice(function, a)
do_twice(print_twice, s) #первая часть из 4 пунктов
print('')
do_four(print, s) #пункт 5
|
python
|
"""Module with implementation of utility classess and functions."""
from WeOptPy.util.utility import (
full_array,
objects2array,
limit_repair,
limit_invers_repair,
wang_repair,
rand_repair,
reflect_repair,
explore_package_for_classes
)
from WeOptPy.util.argparser import (
make_arg_parser,
get_args,
get_dict_args
)
from WeOptPy.util.exception import (
FesException,
GenException,
TimeException,
RefException
)
__all__ = [
'full_array',
'objects2array',
'limit_repair',
'limit_invers_repair',
'wang_repair',
'rand_repair',
'reflect_repair',
'make_arg_parser',
'get_args',
'get_dict_args',
'FesException',
'GenException',
'TimeException',
'RefException',
'explore_package_for_classes'
]
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
|
python
|
from typing import Optional
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.schedules.schedule import Schedule
from ray.rllib.utils.typing import TensorType
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class PolynomialSchedule(Schedule):
"""Polynomial interpolation between `initial_p` and `final_p`.
Over `schedule_timesteps`. After this many time steps, always returns
`final_p`.
"""
def __init__(self,
schedule_timesteps: int,
final_p: float,
framework: Optional[str],
initial_p: float = 1.0,
power: float = 2.0):
"""Initializes a PolynomialSchedule instance.
Args:
schedule_timesteps: Number of time steps for which to
linearly anneal initial_p to final_p
final_p: Final output value.
framework: The framework descriptor string, e.g. "tf",
"torch", or None.
initial_p: Initial output value.
power: The exponent to use (default: quadratic).
"""
super().__init__(framework=framework)
assert schedule_timesteps > 0
self.schedule_timesteps = schedule_timesteps
self.final_p = final_p
self.initial_p = initial_p
self.power = power
@override(Schedule)
def _value(self, t: TensorType) -> TensorType:
"""Returns the result of:
final_p + (initial_p - final_p) * (1 - `t`/t_max) ** power
"""
if self.framework == "torch" and torch and isinstance(t, torch.Tensor):
t = t.float()
t = min(t, self.schedule_timesteps)
return self.final_p + (self.initial_p - self.final_p) * (
1.0 - (t / self.schedule_timesteps))**self.power
@override(Schedule)
def _tf_value_op(self, t: TensorType) -> TensorType:
t = tf.math.minimum(t, self.schedule_timesteps)
return self.final_p + (self.initial_p - self.final_p) * (
1.0 - (t / self.schedule_timesteps))**self.power
|
python
|
from carla_utils import carla
import os
from os.path import join
import random
import signal
import subprocess
import time
import psutil, pynvml
from carla_utils.basic import YamlConfig, Data
from carla_utils.system import is_used
class Core(object):
'''
Inspired by https://github.com/carla-simulator/rllib-integration/blob/main/rllib_integration/carla_core.py
'''
def __init__(self, config: YamlConfig, map_name=None, settings=None, use_tm=True):
self.host, self.port = config.host, config.port
self.timeout = config.get('timeout', 2.0)
self.seed = config.get('seed', 0)
self.mode = config.get('mode', None)
self.connect_to_server()
self.available_map_names = self.client.get_available_maps()
if settings != None:
self.settings = settings
self.load_map(map_name)
if use_tm:
self.add_trafficmanager()
config.set('core', self)
def connect_to_server(self):
"""Connect to the client"""
num_iter = 10
for i in range(num_iter):
try:
self.client = carla.Client(self.host, self.port)
self.client.set_timeout(self.timeout)
self.world = self.client.get_world()
self.town_map = self.world.get_map()
self.map_name = self.town_map.name
self.settings = self.world.get_settings()
print('[Core] connected to server {}:{}'.format(self.host, self.port))
return
except Exception as e:
print('Waiting for server to be ready: {}, attempt {} of {}'.format(e, i + 1, num_iter))
time.sleep(2)
raise Exception("Cannot connect to server. Try increasing 'timeout' or 'retries_on_error' at the carla configuration")
def load_map(self, map_name=None, weather=carla.WeatherParameters.ClearNoon):
### map
map_name = str(map_name)
flag1 = self.map_name not in map_name
flag2 = True in [map_name in available_map_name for available_map_name in self.available_map_names]
if flag1 and flag2:
self.client.load_world(map_name)
self.world = self.client.get_world()
self.town_map = self.world.get_map()
self.map_name = self.town_map.name
print('[Core] load map: ', self.map_name)
### weather
self.world.set_weather(weather) ## ! TODO
### settings
current_settings = self.world.get_settings()
if self.settings.synchronous_mode != current_settings.synchronous_mode \
or self.settings.no_rendering_mode != current_settings.no_rendering_mode \
or self.settings.fixed_delta_seconds != current_settings.fixed_delta_seconds:
self.world.apply_settings(self.settings)
print('[Core] set settings: ', self.settings)
return
def add_trafficmanager(self):
tm_port = self.port + 6000
while is_used(tm_port):
print("Traffic manager's port " + str(tm_port) + " is already being used. Checking the next one")
tm_port += 1000
traffic_manager = self.client.get_trafficmanager(tm_port)
if hasattr(traffic_manager, 'set_random_device_seed'):
traffic_manager.set_random_device_seed(self.seed)
traffic_manager.set_synchronous_mode(self.settings.synchronous_mode)
# traffic_manager.set_hybrid_physics_mode(True) ## do not use this
self.traffic_manager = traffic_manager
self.tm_port = tm_port
return
def tick(self):
if self.settings.synchronous_mode:
return self.world.tick()
def kill(self):
if hasattr(self, 'server'):
kill_server(self.server)
return
# =============================================================================
# -- server ------------------------------------------------------------------
# =============================================================================
def launch_server(env_index, host='127.0.0.1', sleep_time=5.0, low_quality=True, no_display=True):
port = 2000 + env_index *2
time.sleep(random.uniform(0, 1))
port = get_port(port)
cmd = generate_server_cmd(port, env_index, low_quality=low_quality, no_display=no_display)
print('running: ', cmd)
server_process = subprocess.Popen(cmd,
shell=True,
preexec_fn=os.setsid,
stdout=open(os.devnull, 'w'),
)
time.sleep(sleep_time)
server = Data(host=host, port=port, process=server_process)
return server
def launch_servers(env_indices, sleep_time=20.0):
host = '127.0.0.1'
servers = []
for index in env_indices:
server = launch_server(index, host, sleep_time=0.0)
servers.append(server)
time.sleep(sleep_time)
return servers
def kill_server(server):
server.process.send_signal(signal.SIGKILL)
os.killpg(os.getpgid(server.process.pid), signal.SIGKILL)
print('killed server {}:{}'.format(server.host, server.port))
return
def kill_servers(servers):
for server in servers:
kill_server(server)
return
def kill_all_servers():
'''Kill all PIDs that start with Carla'''
processes = [p for p in psutil.process_iter() if "carla" in p.name().lower()]
for process in processes:
os.kill(process.pid, signal.SIGKILL)
def generate_server_cmd(port, env_index=-1, low_quality=True, use_opengl=True, no_display=True):
assert port % 2 == 0
if env_index == -1:
env_index = 0
pynvml.nvmlInit()
gpu_index = env_index % pynvml.nvmlDeviceGetCount()
cmd = join(os.environ['CARLAPATH'], 'CarlaUE4.sh')
cmd += ' -carla-rpc-port=' + str(port)
if low_quality:
cmd += ' -quality-level=Low'
if use_opengl:
cmd += ' -opengl'
if no_display:
# cmd = 'DISPLAY= ' + cmd ### deprecated
cmd = 'SDL_VIDEODRIVER=offscreen SDL_HINT_CUDA_DEVICE={} '.format(str(gpu_index)) + cmd
return cmd
def connect_to_server(host, port, timeout=2.0, map_name=None, **kwargs):
client = carla.Client(host, port)
client.set_timeout(timeout)
available_map_names = client.get_available_maps()
world = client.get_world()
town_map = world.get_map()
### map
map_name = str(map_name)
flag1 = town_map.name not in map_name
flag2 = True in [map_name in available_map_name for available_map_name in available_map_names]
if flag1 and flag2:
client.load_world(map_name)
world = client.get_world()
town_map = world.get_map()
### weather
weather = kwargs.get('weather', carla.WeatherParameters.ClearNoon)
world.set_weather(weather)
### settings
current_settings = world.get_settings()
settings = kwargs.get('settings', current_settings)
if settings.synchronous_mode != current_settings.synchronous_mode \
or settings.no_rendering_mode != current_settings.no_rendering_mode \
or settings.fixed_delta_seconds != current_settings.fixed_delta_seconds:
world.apply_settings(settings)
settings = world.get_settings()
print('connected to server {}:{}'.format(host, port))
return client, world, town_map
def get_port(port):
while is_used(port) or is_used(port+1):
port += 1000
return port
# =============================================================================
# -- setting -----------------------------------------------------------------
# =============================================================================
def default_settings(sync=False, render=True, dt=0.0):
settings = carla.WorldSettings()
settings.synchronous_mode = sync
settings.no_rendering_mode = not render
settings.fixed_delta_seconds = dt
return settings
# =============================================================================
# -- tick --------------------------------------------------------------------
# =============================================================================
# def tick_world(core: Core):
# if core.settings.synchronous_mode:
# return core.world.tick()
|
python
|
"""
File Name : apiview.py
Description :
Author : mxm
Created on : 2020/8/16
"""
import tornado.web
import tornado.ioloop
import tornado.httpserver
import tornado.options
from abc import ABC
from tornado.options import options, define
from tornado.web import RequestHandler
class BaseHandler(RequestHandler, ABC):
pass
|
python
|
from rest_framework import serializers
from .models import UserInfo
class json(serializers.ModelSerializer):
class Meta:
model = UserInfo
fields = ('UserName', 'UserBookLogo')
|
python
|
from django.contrib.auth.models import AbstractUser
from django.core import mail
from django.db.models.signals import post_save
from django.dispatch import receiver
class User(AbstractUser):
pass
@receiver(post_save, sender=User)
def send_success_email(sender, **kwargs):
"""Sends a welcome email after user creation."""
if kwargs['created']:
user = kwargs['instance']
email = mail.EmailMessage('Welcome to Wall App!',
'%s, we\'re really happy you decided to join our website! Thanks!' % user.username,
'[email protected]',
[user.email])
email.send()
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import code_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.lifesciences.v2beta',
manifest={
'RunPipelineRequest',
'RunPipelineResponse',
'Pipeline',
'Action',
'Secret',
'Mount',
'Resources',
'VirtualMachine',
'ServiceAccount',
'Accelerator',
'Network',
'Disk',
'Volume',
'PersistentDisk',
'ExistingDisk',
'NFSMount',
'Metadata',
'Event',
'DelayedEvent',
'WorkerAssignedEvent',
'WorkerReleasedEvent',
'PullStartedEvent',
'PullStoppedEvent',
'ContainerStartedEvent',
'ContainerStoppedEvent',
'UnexpectedExitStatusEvent',
'ContainerKilledEvent',
'FailedEvent',
},
)
class RunPipelineRequest(proto.Message):
r"""The arguments to the ``RunPipeline`` method. The requesting user
must have the ``iam.serviceAccounts.actAs`` permission for the Cloud
Life Sciences service account or the request will fail.
Attributes:
parent (str):
The project and location that this request
should be executed against.
pipeline (google.cloud.lifesciences_v2beta.types.Pipeline):
Required. The description of the pipeline to
run.
labels (Sequence[google.cloud.lifesciences_v2beta.types.RunPipelineRequest.LabelsEntry]):
User-defined labels to associate with the returned
operation. These labels are not propagated to any Google
Cloud Platform resources used by the operation, and can be
modified at any time.
To associate labels with resources created while executing
the operation, see the appropriate resource message (for
example, ``VirtualMachine``).
pub_sub_topic (str):
The name of an existing Pub/Sub topic. The
server will publish messages to this topic
whenever the status of the operation changes.
The Life Sciences Service Agent account must
have publisher permissions to the specified
topic or notifications will not be sent.
"""
parent = proto.Field(
proto.STRING,
number=4,
)
pipeline = proto.Field(
proto.MESSAGE,
number=1,
message='Pipeline',
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=2,
)
pub_sub_topic = proto.Field(
proto.STRING,
number=3,
)
class RunPipelineResponse(proto.Message):
r"""The response to the RunPipeline method, returned in the
operation's result field on success.
"""
class Pipeline(proto.Message):
r"""Specifies a series of actions to execute, expressed as Docker
containers.
Attributes:
actions (Sequence[google.cloud.lifesciences_v2beta.types.Action]):
The list of actions to execute, in the order
they are specified.
resources (google.cloud.lifesciences_v2beta.types.Resources):
The resources required for execution.
environment (Sequence[google.cloud.lifesciences_v2beta.types.Pipeline.EnvironmentEntry]):
The environment to pass into every action.
Each action can also specify additional
environment variables but cannot delete an entry
from this map (though they can overwrite it with
a different value).
timeout (google.protobuf.duration_pb2.Duration):
The maximum amount of time to give the pipeline to complete.
This includes the time spent waiting for a worker to be
allocated. If the pipeline fails to complete before the
timeout, it will be cancelled and the error code will be set
to DEADLINE_EXCEEDED.
If unspecified, it will default to 7 days.
"""
actions = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Action',
)
resources = proto.Field(
proto.MESSAGE,
number=2,
message='Resources',
)
environment = proto.MapField(
proto.STRING,
proto.STRING,
number=3,
)
timeout = proto.Field(
proto.MESSAGE,
number=4,
message=duration_pb2.Duration,
)
class Action(proto.Message):
r"""Specifies a single action that runs a Docker container.
Attributes:
container_name (str):
An optional name for the container. The
container hostname will be set to this name,
making it useful for inter-container
communication. The name must contain only upper
and lowercase alphanumeric characters and
hyphens and cannot start with a hyphen.
image_uri (str):
Required. The URI to pull the container image from. Note
that all images referenced by actions in the pipeline are
pulled before the first action runs. If multiple actions
reference the same image, it is only pulled once, ensuring
that the same image is used for all actions in a single
pipeline.
The image URI can be either a complete host and image
specification (e.g., quay.io/biocontainers/samtools), a
library and image name (e.g., google/cloud-sdk) or a bare
image name ('bash') to pull from the default library. No
schema is required in any of these cases.
If the specified image is not public, the service account
specified for the Virtual Machine must have access to pull
the images from GCR, or appropriate credentials must be
specified in the
[google.cloud.lifesciences.v2beta.Action.credentials][google.cloud.lifesciences.v2beta.Action.credentials]
field.
commands (Sequence[str]):
If specified, overrides the ``CMD`` specified in the
container. If the container also has an ``ENTRYPOINT`` the
values are used as entrypoint arguments. Otherwise, they are
used as a command and arguments to run inside the container.
entrypoint (str):
If specified, overrides the ``ENTRYPOINT`` specified in the
container.
environment (Sequence[google.cloud.lifesciences_v2beta.types.Action.EnvironmentEntry]):
The environment to pass into the container. This environment
is merged with values specified in the
[google.cloud.lifesciences.v2beta.Pipeline][google.cloud.lifesciences.v2beta.Pipeline]
message, overwriting any duplicate values.
In addition to the values passed here, a few other values
are automatically injected into the environment. These
cannot be hidden or overwritten.
``GOOGLE_PIPELINE_FAILED`` will be set to "1" if the
pipeline failed because an action has exited with a non-zero
status (and did not have the ``IGNORE_EXIT_STATUS`` flag
set). This can be used to determine if additional debug or
logging actions should execute.
``GOOGLE_LAST_EXIT_STATUS`` will be set to the exit status
of the last non-background action that executed. This can be
used by workflow engine authors to determine whether an
individual action has succeeded or failed.
pid_namespace (str):
An optional identifier for a PID namespace to
run the action inside. Multiple actions should
use the same string to share a namespace. If
unspecified, a separate isolated namespace is
used.
port_mappings (Sequence[google.cloud.lifesciences_v2beta.types.Action.PortMappingsEntry]):
A map of containers to host port mappings for this
container. If the container already specifies exposed ports,
use the ``PUBLISH_EXPOSED_PORTS`` flag instead.
The host port number must be less than 65536. If it is zero,
an unused random port is assigned. To determine the
resulting port number, consult the ``ContainerStartedEvent``
in the operation metadata.
mounts (Sequence[google.cloud.lifesciences_v2beta.types.Mount]):
A list of mounts to make available to the action.
In addition to the values specified here, every action has a
special virtual disk mounted under ``/google`` that contains
log files and other operational components.
.. raw:: html
<ul>
<li><code>/google/logs</code> All logs written during the pipeline
execution.</li>
<li><code>/google/logs/output</code> The combined standard output and
standard error of all actions run as part of the pipeline
execution.</li>
<li><code>/google/logs/action/*/stdout</code> The complete contents of
each individual action's standard output.</li>
<li><code>/google/logs/action/*/stderr</code> The complete contents of
each individual action's standard error output.</li>
</ul>
labels (Sequence[google.cloud.lifesciences_v2beta.types.Action.LabelsEntry]):
Labels to associate with the action. This
field is provided to assist workflow engine
authors in identifying actions (for example, to
indicate what sort of action they perform, such
as localization or debugging). They are returned
in the operation metadata, but are otherwise
ignored.
credentials (google.cloud.lifesciences_v2beta.types.Secret):
If the specified image is hosted on a private registry other
than Google Container Registry, the credentials required to
pull the image must be specified here as an encrypted
secret.
The secret must decrypt to a JSON-encoded dictionary
containing both ``username`` and ``password`` keys.
timeout (google.protobuf.duration_pb2.Duration):
The maximum amount of time to give the action to complete.
If the action fails to complete before the timeout, it will
be terminated and the exit status will be non-zero. The
pipeline will continue or terminate based on the rules
defined by the ``ALWAYS_RUN`` and ``IGNORE_EXIT_STATUS``
flags.
ignore_exit_status (bool):
Normally, a non-zero exit status causes the
pipeline to fail. This flag allows execution of
other actions to continue instead.
run_in_background (bool):
This flag allows an action to continue
running in the background while executing
subsequent actions. This is useful to provide
services to other actions (or to provide
debugging support tools like SSH servers).
always_run (bool):
By default, after an action fails, no further
actions are run. This flag indicates that this
action must be run even if the pipeline has
already failed. This is useful for actions that
copy output files off of the VM or for
debugging. Note that no actions will be run if
image prefetching fails.
enable_fuse (bool):
Enable access to the FUSE device for this action.
Filesystems can then be mounted into disks shared with other
actions. The other actions do not need the ``enable_fuse``
flag to access the mounted filesystem.
This has the effect of causing the container to be executed
with ``CAP_SYS_ADMIN`` and exposes ``/dev/fuse`` to the
container, so use it only for containers you trust.
publish_exposed_ports (bool):
Exposes all ports specified by ``EXPOSE`` statements in the
container. To discover the host side port numbers, consult
the ``ACTION_STARTED`` event in the operation metadata.
disable_image_prefetch (bool):
All container images are typically downloaded
before any actions are executed. This helps
prevent typos in URIs or issues like lack of
disk space from wasting large amounts of compute
resources.
If set, this flag prevents the worker from
downloading the image until just before the
action is executed.
disable_standard_error_capture (bool):
A small portion of the container's standard error stream is
typically captured and returned inside the
``ContainerStoppedEvent``. Setting this flag disables this
functionality.
block_external_network (bool):
Prevents the container from accessing the
external network.
"""
container_name = proto.Field(
proto.STRING,
number=1,
)
image_uri = proto.Field(
proto.STRING,
number=2,
)
commands = proto.RepeatedField(
proto.STRING,
number=3,
)
entrypoint = proto.Field(
proto.STRING,
number=4,
)
environment = proto.MapField(
proto.STRING,
proto.STRING,
number=5,
)
pid_namespace = proto.Field(
proto.STRING,
number=6,
)
port_mappings = proto.MapField(
proto.INT32,
proto.INT32,
number=8,
)
mounts = proto.RepeatedField(
proto.MESSAGE,
number=9,
message='Mount',
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=10,
)
credentials = proto.Field(
proto.MESSAGE,
number=11,
message='Secret',
)
timeout = proto.Field(
proto.MESSAGE,
number=12,
message=duration_pb2.Duration,
)
ignore_exit_status = proto.Field(
proto.BOOL,
number=13,
)
run_in_background = proto.Field(
proto.BOOL,
number=14,
)
always_run = proto.Field(
proto.BOOL,
number=15,
)
enable_fuse = proto.Field(
proto.BOOL,
number=16,
)
publish_exposed_ports = proto.Field(
proto.BOOL,
number=17,
)
disable_image_prefetch = proto.Field(
proto.BOOL,
number=18,
)
disable_standard_error_capture = proto.Field(
proto.BOOL,
number=19,
)
block_external_network = proto.Field(
proto.BOOL,
number=20,
)
class Secret(proto.Message):
r"""Holds encrypted information that is only decrypted and stored
in RAM by the worker VM when running the pipeline.
Attributes:
key_name (str):
The name of the Cloud KMS key that will be used to decrypt
the secret value. The VM service account must have the
required permissions and authentication scopes to invoke the
``decrypt`` method on the specified key.
cipher_text (str):
The value of the cipherText response from the ``encrypt``
method. This field is intentionally unaudited.
"""
key_name = proto.Field(
proto.STRING,
number=1,
)
cipher_text = proto.Field(
proto.STRING,
number=2,
)
class Mount(proto.Message):
r"""Carries information about a particular disk mount inside a
container.
Attributes:
disk (str):
The name of the disk to mount, as specified
in the resources section.
path (str):
The path to mount the disk inside the
container.
read_only (bool):
If true, the disk is mounted read-only inside
the container.
"""
disk = proto.Field(
proto.STRING,
number=1,
)
path = proto.Field(
proto.STRING,
number=2,
)
read_only = proto.Field(
proto.BOOL,
number=3,
)
class Resources(proto.Message):
r"""The system resources for the pipeline run.
At least one zone or region must be specified or the pipeline
run will fail.
Attributes:
regions (Sequence[str]):
The list of regions allowed for VM allocation. If set, the
``zones`` field must not be set.
zones (Sequence[str]):
The list of zones allowed for VM allocation. If set, the
``regions`` field must not be set.
virtual_machine (google.cloud.lifesciences_v2beta.types.VirtualMachine):
The virtual machine specification.
"""
regions = proto.RepeatedField(
proto.STRING,
number=2,
)
zones = proto.RepeatedField(
proto.STRING,
number=3,
)
virtual_machine = proto.Field(
proto.MESSAGE,
number=4,
message='VirtualMachine',
)
class VirtualMachine(proto.Message):
r"""Carries information about a Compute Engine VM resource.
Attributes:
machine_type (str):
Required. The machine type of the virtual machine to create.
Must be the short name of a standard machine type (such as
"n1-standard-1") or a custom machine type (such as
"custom-1-4096", where "1" indicates the number of vCPUs and
"4096" indicates the memory in MB). See `Creating an
instance with a custom machine
type <https://cloud.google.com/compute/docs/instances/creating-instance-with-custom-machine-type#create>`__
for more specifications on creating a custom machine type.
preemptible (bool):
If true, allocate a preemptible VM.
labels (Sequence[google.cloud.lifesciences_v2beta.types.VirtualMachine.LabelsEntry]):
Optional set of labels to apply to the VM and any attached
disk resources. These labels must adhere to the `name and
value
restrictions <https://cloud.google.com/compute/docs/labeling-resources>`__
on VM labels imposed by Compute Engine.
Labels keys with the prefix 'google-' are reserved for use
by Google.
Labels applied at creation time to the VM. Applied on a
best-effort basis to attached disk resources shortly after
VM creation.
disks (Sequence[google.cloud.lifesciences_v2beta.types.Disk]):
The list of disks to create and attach to the VM.
Specify either the ``volumes[]`` field or the ``disks[]``
field, but not both.
network (google.cloud.lifesciences_v2beta.types.Network):
The VM network configuration.
accelerators (Sequence[google.cloud.lifesciences_v2beta.types.Accelerator]):
The list of accelerators to attach to the VM.
service_account (google.cloud.lifesciences_v2beta.types.ServiceAccount):
The service account to install on the VM.
This account does not need any permissions other
than those required by the pipeline.
boot_disk_size_gb (int):
The size of the boot disk, in GB. The boot
disk must be large enough to accommodate all of
the Docker images from each action in the
pipeline at the same time. If not specified, a
small but reasonable default value is used.
cpu_platform (str):
The CPU platform to request. An instance
based on a newer platform can be allocated, but
never one with fewer capabilities. The value of
this parameter must be a valid Compute Engine
CPU platform name (such as "Intel Skylake").
This parameter is only useful for carefully
optimized work loads where the CPU platform has
a significant impact.
For more information about the effect of this
parameter, see
https://cloud.google.com/compute/docs/instances/specify-
min-cpu-platform.
boot_image (str):
The host operating system image to use.
Currently, only Container-Optimized OS images can be used.
The default value is
``projects/cos-cloud/global/images/family/cos-stable``,
which selects the latest stable release of
Container-Optimized OS.
This option is provided to allow testing against the beta
release of the operating system to ensure that the new
version does not interact negatively with production
pipelines.
To test a pipeline against the beta release of
Container-Optimized OS, use the value
``projects/cos-cloud/global/images/family/cos-beta``.
nvidia_driver_version (str):
The NVIDIA driver version to use when attaching an NVIDIA
GPU accelerator. The version specified here must be
compatible with the GPU libraries contained in the container
being executed, and must be one of the drivers hosted in the
``nvidia-drivers-us-public`` bucket on Google Cloud Storage.
enable_stackdriver_monitoring (bool):
Whether Stackdriver monitoring should be
enabled on the VM.
docker_cache_images (Sequence[str]):
The Compute Engine Disk Images to use as a Docker cache. The
disks will be mounted into the Docker folder in a way that
the images present in the cache will not need to be pulled.
The digests of the cached images must match those of the
tags used or the latest version will still be pulled. The
root directory of the ext4 image must contain ``image`` and
``overlay2`` directories copied from the Docker directory of
a VM where the desired Docker images have already been
pulled. Any images pulled that are not cached will be stored
on the first cache disk instead of the boot disk. Only a
single image is supported.
volumes (Sequence[google.cloud.lifesciences_v2beta.types.Volume]):
The list of disks and other storage to create or attach to
the VM.
Specify either the ``volumes[]`` field or the ``disks[]``
field, but not both.
"""
machine_type = proto.Field(
proto.STRING,
number=1,
)
preemptible = proto.Field(
proto.BOOL,
number=2,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=3,
)
disks = proto.RepeatedField(
proto.MESSAGE,
number=4,
message='Disk',
)
network = proto.Field(
proto.MESSAGE,
number=5,
message='Network',
)
accelerators = proto.RepeatedField(
proto.MESSAGE,
number=6,
message='Accelerator',
)
service_account = proto.Field(
proto.MESSAGE,
number=7,
message='ServiceAccount',
)
boot_disk_size_gb = proto.Field(
proto.INT32,
number=8,
)
cpu_platform = proto.Field(
proto.STRING,
number=9,
)
boot_image = proto.Field(
proto.STRING,
number=10,
)
nvidia_driver_version = proto.Field(
proto.STRING,
number=11,
)
enable_stackdriver_monitoring = proto.Field(
proto.BOOL,
number=12,
)
docker_cache_images = proto.RepeatedField(
proto.STRING,
number=13,
)
volumes = proto.RepeatedField(
proto.MESSAGE,
number=14,
message='Volume',
)
class ServiceAccount(proto.Message):
r"""Carries information about a Google Cloud service account.
Attributes:
email (str):
Email address of the service account. If not
specified, the default Compute Engine service
account for the project will be used.
scopes (Sequence[str]):
List of scopes to be enabled for this service
account on the VM, in addition to the cloud-
platform API scope that will be added by
default.
"""
email = proto.Field(
proto.STRING,
number=1,
)
scopes = proto.RepeatedField(
proto.STRING,
number=2,
)
class Accelerator(proto.Message):
r"""Carries information about an accelerator that can be attached
to a VM.
Attributes:
type_ (str):
The accelerator type string (for example,
"nvidia-tesla-k80").
Only NVIDIA GPU accelerators are currently supported. If an
NVIDIA GPU is attached, the required runtime libraries will
be made available to all containers under
``/usr/local/nvidia``. The driver version to install must be
specified using the NVIDIA driver version parameter on the
virtual machine specification. Note that attaching a GPU
increases the worker VM startup time by a few minutes.
count (int):
How many accelerators of this type to attach.
"""
type_ = proto.Field(
proto.STRING,
number=1,
)
count = proto.Field(
proto.INT64,
number=2,
)
class Network(proto.Message):
r"""VM networking options.
Attributes:
network (str):
The network name to attach the VM's network interface to.
The value will be prefixed with ``global/networks/`` unless
it contains a ``/``, in which case it is assumed to be a
fully specified network resource URL.
If unspecified, the global default network is used.
use_private_address (bool):
If set to true, do not attach a public IP
address to the VM. Note that without a public IP
address, additional configuration is required to
allow the VM to access Google services.
See https://cloud.google.com/vpc/docs/configure-
private-google-access for more information.
subnetwork (str):
If the specified network is configured for custom subnet
creation, the name of the subnetwork to attach the instance
to must be specified here.
The value is prefixed with ``regions/*/subnetworks/`` unless
it contains a ``/``, in which case it is assumed to be a
fully specified subnetwork resource URL.
If the ``*`` character appears in the value, it is replaced
with the region that the virtual machine has been allocated
in.
"""
network = proto.Field(
proto.STRING,
number=1,
)
use_private_address = proto.Field(
proto.BOOL,
number=2,
)
subnetwork = proto.Field(
proto.STRING,
number=3,
)
class Disk(proto.Message):
r"""Carries information about a disk that can be attached to a VM.
See https://cloud.google.com/compute/docs/disks/performance for more
information about disk type, size, and performance considerations.
Specify either [``Volume``][google.cloud.lifesciences.v2beta.Volume]
or [``Disk``][google.cloud.lifesciences.v2beta.Disk], but not both.
Attributes:
name (str):
A user-supplied name for the disk. Used when
mounting the disk into actions. The name must
contain only upper and lowercase alphanumeric
characters and hyphens and cannot start with a
hyphen.
size_gb (int):
The size, in GB, of the disk to attach. If the size is not
specified, a default is chosen to ensure reasonable I/O
performance.
If the disk type is specified as ``local-ssd``, multiple
local drives are automatically combined to provide the
requested size. Note, however, that each physical SSD is
375GB in size, and no more than 8 drives can be attached to
a single instance.
type_ (str):
The Compute Engine disk type. If unspecified,
``pd-standard`` is used.
source_image (str):
An optional image to put on the disk before
attaching it to the VM.
"""
name = proto.Field(
proto.STRING,
number=1,
)
size_gb = proto.Field(
proto.INT32,
number=2,
)
type_ = proto.Field(
proto.STRING,
number=3,
)
source_image = proto.Field(
proto.STRING,
number=4,
)
class Volume(proto.Message):
r"""Carries information about storage that can be attached to a VM.
Specify either [``Volume``][google.cloud.lifesciences.v2beta.Volume]
or [``Disk``][google.cloud.lifesciences.v2beta.Disk], but not both.
Attributes:
volume (str):
A user-supplied name for the volume. Used when mounting the
volume into
[``Actions``][google.cloud.lifesciences.v2beta.Action]. The
name must contain only upper and lowercase alphanumeric
characters and hyphens and cannot start with a hyphen.
persistent_disk (google.cloud.lifesciences_v2beta.types.PersistentDisk):
Configuration for a persistent disk.
existing_disk (google.cloud.lifesciences_v2beta.types.ExistingDisk):
Configuration for a existing disk.
nfs_mount (google.cloud.lifesciences_v2beta.types.NFSMount):
Configuration for an NFS mount.
"""
volume = proto.Field(
proto.STRING,
number=1,
)
persistent_disk = proto.Field(
proto.MESSAGE,
number=2,
oneof='storage',
message='PersistentDisk',
)
existing_disk = proto.Field(
proto.MESSAGE,
number=3,
oneof='storage',
message='ExistingDisk',
)
nfs_mount = proto.Field(
proto.MESSAGE,
number=4,
oneof='storage',
message='NFSMount',
)
class PersistentDisk(proto.Message):
r"""Configuration for a persistent disk to be attached to the VM.
See https://cloud.google.com/compute/docs/disks/performance for
more information about disk type, size, and performance
considerations.
Attributes:
size_gb (int):
The size, in GB, of the disk to attach. If the size is not
specified, a default is chosen to ensure reasonable I/O
performance.
If the disk type is specified as ``local-ssd``, multiple
local drives are automatically combined to provide the
requested size. Note, however, that each physical SSD is
375GB in size, and no more than 8 drives can be attached to
a single instance.
type_ (str):
The Compute Engine disk type. If unspecified,
``pd-standard`` is used.
source_image (str):
An image to put on the disk before attaching
it to the VM.
"""
size_gb = proto.Field(
proto.INT32,
number=1,
)
type_ = proto.Field(
proto.STRING,
number=2,
)
source_image = proto.Field(
proto.STRING,
number=3,
)
class ExistingDisk(proto.Message):
r"""Configuration for an existing disk to be attached to the VM.
Attributes:
disk (str):
If ``disk`` contains slashes, the Cloud Life Sciences API
assumes that it is a complete URL for the disk. If ``disk``
does not contain slashes, the Cloud Life Sciences API
assumes that the disk is a zonal disk and a URL will be
generated of the form ``zones/<zone>/disks/<disk>``, where
``<zone>`` is the zone in which the instance is allocated.
The disk must be ext4 formatted.
If all ``Mount`` references to this disk have the
``read_only`` flag set to true, the disk will be attached in
``read-only`` mode and can be shared with other instances.
Otherwise, the disk will be available for writing but cannot
be shared.
"""
disk = proto.Field(
proto.STRING,
number=1,
)
class NFSMount(proto.Message):
r"""Configuration for an ``NFSMount`` to be attached to the VM.
Attributes:
target (str):
A target NFS mount. The target must be specified as
\`address:/mount".
"""
target = proto.Field(
proto.STRING,
number=1,
)
class Metadata(proto.Message):
r"""Carries information about the pipeline execution that is
returned in the long running operation's metadata field.
Attributes:
pipeline (google.cloud.lifesciences_v2beta.types.Pipeline):
The pipeline this operation represents.
labels (Sequence[google.cloud.lifesciences_v2beta.types.Metadata.LabelsEntry]):
The user-defined labels associated with this
operation.
events (Sequence[google.cloud.lifesciences_v2beta.types.Event]):
The list of events that have happened so far
during the execution of this operation.
create_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which the operation was created
by the API.
start_time (google.protobuf.timestamp_pb2.Timestamp):
The first time at which resources were
allocated to execute the pipeline.
end_time (google.protobuf.timestamp_pb2.Timestamp):
The time at which execution was completed and
resources were cleaned up.
pub_sub_topic (str):
The name of the Cloud Pub/Sub topic where
notifications of operation status changes are
sent.
"""
pipeline = proto.Field(
proto.MESSAGE,
number=1,
message='Pipeline',
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=2,
)
events = proto.RepeatedField(
proto.MESSAGE,
number=3,
message='Event',
)
create_time = proto.Field(
proto.MESSAGE,
number=4,
message=timestamp_pb2.Timestamp,
)
start_time = proto.Field(
proto.MESSAGE,
number=5,
message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE,
number=6,
message=timestamp_pb2.Timestamp,
)
pub_sub_topic = proto.Field(
proto.STRING,
number=7,
)
class Event(proto.Message):
r"""Carries information about events that occur during pipeline
execution.
Attributes:
timestamp (google.protobuf.timestamp_pb2.Timestamp):
The time at which the event occurred.
description (str):
A human-readable description of the event. Note that these
strings can change at any time without notice. Any
application logic must use the information in the
``details`` field.
delayed (google.cloud.lifesciences_v2beta.types.DelayedEvent):
See
[google.cloud.lifesciences.v2beta.DelayedEvent][google.cloud.lifesciences.v2beta.DelayedEvent].
worker_assigned (google.cloud.lifesciences_v2beta.types.WorkerAssignedEvent):
See
[google.cloud.lifesciences.v2beta.WorkerAssignedEvent][google.cloud.lifesciences.v2beta.WorkerAssignedEvent].
worker_released (google.cloud.lifesciences_v2beta.types.WorkerReleasedEvent):
See
[google.cloud.lifesciences.v2beta.WorkerReleasedEvent][google.cloud.lifesciences.v2beta.WorkerReleasedEvent].
pull_started (google.cloud.lifesciences_v2beta.types.PullStartedEvent):
See
[google.cloud.lifesciences.v2beta.PullStartedEvent][google.cloud.lifesciences.v2beta.PullStartedEvent].
pull_stopped (google.cloud.lifesciences_v2beta.types.PullStoppedEvent):
See
[google.cloud.lifesciences.v2beta.PullStoppedEvent][google.cloud.lifesciences.v2beta.PullStoppedEvent].
container_started (google.cloud.lifesciences_v2beta.types.ContainerStartedEvent):
See
[google.cloud.lifesciences.v2beta.ContainerStartedEvent][google.cloud.lifesciences.v2beta.ContainerStartedEvent].
container_stopped (google.cloud.lifesciences_v2beta.types.ContainerStoppedEvent):
See
[google.cloud.lifesciences.v2beta.ContainerStoppedEvent][google.cloud.lifesciences.v2beta.ContainerStoppedEvent].
container_killed (google.cloud.lifesciences_v2beta.types.ContainerKilledEvent):
See
[google.cloud.lifesciences.v2beta.ContainerKilledEvent][google.cloud.lifesciences.v2beta.ContainerKilledEvent].
unexpected_exit_status (google.cloud.lifesciences_v2beta.types.UnexpectedExitStatusEvent):
See
[google.cloud.lifesciences.v2beta.UnexpectedExitStatusEvent][google.cloud.lifesciences.v2beta.UnexpectedExitStatusEvent].
failed (google.cloud.lifesciences_v2beta.types.FailedEvent):
See
[google.cloud.lifesciences.v2beta.FailedEvent][google.cloud.lifesciences.v2beta.FailedEvent].
"""
timestamp = proto.Field(
proto.MESSAGE,
number=1,
message=timestamp_pb2.Timestamp,
)
description = proto.Field(
proto.STRING,
number=2,
)
delayed = proto.Field(
proto.MESSAGE,
number=17,
oneof='details',
message='DelayedEvent',
)
worker_assigned = proto.Field(
proto.MESSAGE,
number=18,
oneof='details',
message='WorkerAssignedEvent',
)
worker_released = proto.Field(
proto.MESSAGE,
number=19,
oneof='details',
message='WorkerReleasedEvent',
)
pull_started = proto.Field(
proto.MESSAGE,
number=20,
oneof='details',
message='PullStartedEvent',
)
pull_stopped = proto.Field(
proto.MESSAGE,
number=21,
oneof='details',
message='PullStoppedEvent',
)
container_started = proto.Field(
proto.MESSAGE,
number=22,
oneof='details',
message='ContainerStartedEvent',
)
container_stopped = proto.Field(
proto.MESSAGE,
number=23,
oneof='details',
message='ContainerStoppedEvent',
)
container_killed = proto.Field(
proto.MESSAGE,
number=24,
oneof='details',
message='ContainerKilledEvent',
)
unexpected_exit_status = proto.Field(
proto.MESSAGE,
number=25,
oneof='details',
message='UnexpectedExitStatusEvent',
)
failed = proto.Field(
proto.MESSAGE,
number=26,
oneof='details',
message='FailedEvent',
)
class DelayedEvent(proto.Message):
r"""An event generated whenever a resource limitation or
transient error delays execution of a pipeline that was
otherwise ready to run.
Attributes:
cause (str):
A textual description of the cause of the
delay. The string can change without notice
because it is often generated by another service
(such as Compute Engine).
metrics (Sequence[str]):
If the delay was caused by a resource shortage, this field
lists the Compute Engine metrics that are preventing this
operation from running (for example, ``CPUS`` or
``INSTANCES``). If the particular metric is not known, a
single ``UNKNOWN`` metric will be present.
"""
cause = proto.Field(
proto.STRING,
number=1,
)
metrics = proto.RepeatedField(
proto.STRING,
number=2,
)
class WorkerAssignedEvent(proto.Message):
r"""An event generated after a worker VM has been assigned to run
the pipeline.
Attributes:
zone (str):
The zone the worker is running in.
instance (str):
The worker's instance name.
machine_type (str):
The machine type that was assigned for the
worker.
"""
zone = proto.Field(
proto.STRING,
number=1,
)
instance = proto.Field(
proto.STRING,
number=2,
)
machine_type = proto.Field(
proto.STRING,
number=3,
)
class WorkerReleasedEvent(proto.Message):
r"""An event generated when the worker VM that was assigned to
the pipeline has been released (deleted).
Attributes:
zone (str):
The zone the worker was running in.
instance (str):
The worker's instance name.
"""
zone = proto.Field(
proto.STRING,
number=1,
)
instance = proto.Field(
proto.STRING,
number=2,
)
class PullStartedEvent(proto.Message):
r"""An event generated when the worker starts pulling an image.
Attributes:
image_uri (str):
The URI of the image that was pulled.
"""
image_uri = proto.Field(
proto.STRING,
number=1,
)
class PullStoppedEvent(proto.Message):
r"""An event generated when the worker stops pulling an image.
Attributes:
image_uri (str):
The URI of the image that was pulled.
"""
image_uri = proto.Field(
proto.STRING,
number=1,
)
class ContainerStartedEvent(proto.Message):
r"""An event generated when a container starts.
Attributes:
action_id (int):
The numeric ID of the action that started
this container.
port_mappings (Sequence[google.cloud.lifesciences_v2beta.types.ContainerStartedEvent.PortMappingsEntry]):
The container-to-host port mappings installed for this
container. This set will contain any ports exposed using the
``PUBLISH_EXPOSED_PORTS`` flag as well as any specified in
the ``Action`` definition.
ip_address (str):
The public IP address that can be used to
connect to the container. This field is only
populated when at least one port mapping is
present. If the instance was created with a
private address, this field will be empty even
if port mappings exist.
"""
action_id = proto.Field(
proto.INT32,
number=1,
)
port_mappings = proto.MapField(
proto.INT32,
proto.INT32,
number=2,
)
ip_address = proto.Field(
proto.STRING,
number=3,
)
class ContainerStoppedEvent(proto.Message):
r"""An event generated when a container exits.
Attributes:
action_id (int):
The numeric ID of the action that started
this container.
exit_status (int):
The exit status of the container.
stderr (str):
The tail end of any content written to standard error by the
container. If the content emits large amounts of debugging
noise or contains sensitive information, you can prevent the
content from being printed by setting the
``DISABLE_STANDARD_ERROR_CAPTURE`` flag.
Note that only a small amount of the end of the stream is
captured here. The entire stream is stored in the
``/google/logs`` directory mounted into each action, and can
be copied off the machine as described elsewhere.
"""
action_id = proto.Field(
proto.INT32,
number=1,
)
exit_status = proto.Field(
proto.INT32,
number=2,
)
stderr = proto.Field(
proto.STRING,
number=3,
)
class UnexpectedExitStatusEvent(proto.Message):
r"""An event generated when the execution of a container results in a
non-zero exit status that was not otherwise ignored. Execution will
continue, but only actions that are flagged as ``ALWAYS_RUN`` will
be executed. Other actions will be skipped.
Attributes:
action_id (int):
The numeric ID of the action that started the
container.
exit_status (int):
The exit status of the container.
"""
action_id = proto.Field(
proto.INT32,
number=1,
)
exit_status = proto.Field(
proto.INT32,
number=2,
)
class ContainerKilledEvent(proto.Message):
r"""An event generated when a container is forcibly terminated by
the worker. Currently, this only occurs when the container
outlives the timeout specified by the user.
Attributes:
action_id (int):
The numeric ID of the action that started the
container.
"""
action_id = proto.Field(
proto.INT32,
number=1,
)
class FailedEvent(proto.Message):
r"""An event generated when the execution of a pipeline has
failed. Note that other events can continue to occur after this
event.
Attributes:
code (google.rpc.code_pb2.Code):
The Google standard error code that best
describes this failure.
cause (str):
The human-readable description of the cause
of the failure.
"""
code = proto.Field(
proto.ENUM,
number=1,
enum=code_pb2.Code,
)
cause = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : [email protected]
###################################################################
from dayu_widgets.label import MLabel
from dayu_widgets.tool_button import MToolButton
from dayu_widgets.avatar import MAvatar
from dayu_widgets.divider import MDivider
from dayu_widgets import dayu_theme
from dayu_widgets.mixin import hover_shadow_mixin, cursor_mixin
from dayu_widgets.qt import QWidget, QVBoxLayout, QHBoxLayout, QLabel, QFormLayout, QSize, Qt
@hover_shadow_mixin
@cursor_mixin
class MCard(QWidget):
def __init__(self, title=None, image=None, size=None, extra=None, type=None, parent=None):
super(MCard, self).__init__(parent=parent)
self.setAttribute(Qt.WA_StyledBackground)
self.setProperty('border', False)
size = size or dayu_theme.default_size
map_label = {
dayu_theme.large: (MLabel.H2Level, 20),
dayu_theme.medium: (MLabel.H3Level, 15),
dayu_theme.small: (MLabel.H4Level, 10),
}
self._title_label = MLabel(text=title)
self._title_label.set_dayu_level(map_label.get(size)[0])
padding = map_label.get(size)[-1]
self._title_layout = QHBoxLayout()
self._title_layout.setContentsMargins(padding, padding, padding, padding)
if image:
self._title_icon = MAvatar()
self._title_icon.set_dayu_image(image)
self._title_icon.set_dayu_size(size)
self._title_layout.addWidget(self._title_icon)
self._title_layout.addWidget(self._title_label)
self._title_layout.addStretch()
if extra:
self._extra_button = MToolButton().icon_only().svg('more.svg')
self._title_layout.addWidget(self._extra_button)
self._content_layout = QVBoxLayout()
self._main_lay = QVBoxLayout()
self._main_lay.setSpacing(0)
self._main_lay.setContentsMargins(1, 1, 1, 1)
if title:
self._main_lay.addLayout(self._title_layout)
self._main_lay.addWidget(MDivider())
self._main_lay.addLayout(self._content_layout)
self.setLayout(self._main_lay)
def get_more_button(self):
return self._extra_button
def set_widget(self, widget):
self._content_layout.addWidget(widget)
def border(self):
self.setProperty('border', True)
self.style().polish(self)
return self
@hover_shadow_mixin
@cursor_mixin
class MMeta(QWidget):
def __init__(self, cover=None, avatar=None, title=None, description=None, extra=False,
parent=None):
super(MMeta, self).__init__(parent)
self.setAttribute(Qt.WA_StyledBackground)
self._cover_label = QLabel()
self._avatar = MAvatar()
self._title_label = MLabel().h4()
self._description_label = MLabel().secondary()
self._description_label.setWordWrap(True)
self._description_label.set_elide_mode(Qt.ElideRight)
self._title_layout = QHBoxLayout()
self._title_layout.addWidget(self._title_label)
self._title_layout.addStretch()
self._extra_button = MToolButton(parent=self).icon_only().svg('more.svg')
self._title_layout.addWidget(self._extra_button)
self._extra_button.setVisible(extra)
content_lay = QFormLayout()
content_lay.setContentsMargins(5, 5, 5, 5)
content_lay.addRow(self._avatar, self._title_layout)
content_lay.addRow(self._description_label)
self._button_layout = QHBoxLayout()
main_lay = QVBoxLayout()
main_lay.setSpacing(0)
main_lay.setContentsMargins(1, 1, 1, 1)
main_lay.addWidget(self._cover_label)
main_lay.addLayout(content_lay)
main_lay.addLayout(self._button_layout)
main_lay.addStretch()
self.setLayout(main_lay)
self._cover_label.setFixedSize(QSize(200, 200))
# self.setFixedWidth(200)
def get_more_button(self):
return self._extra_button
def setup_data(self, data_dict):
if data_dict.get('title'):
self._title_label.setText(data_dict.get('title'))
self._title_label.setVisible(True)
else:
self._title_label.setVisible(False)
if data_dict.get('description'):
self._description_label.setText(data_dict.get('description'))
self._description_label.setVisible(True)
else:
self._description_label.setVisible(False)
if data_dict.get('avatar'):
self._avatar.set_dayu_image(data_dict.get('avatar'))
self._avatar.setVisible(True)
else:
self._avatar.setVisible(False)
if data_dict.get('cover'):
fixed_height = self._cover_label.width()
self._cover_label.setPixmap(
data_dict.get('cover').scaledToWidth(fixed_height, Qt.SmoothTransformation))
self._cover_label.setVisible(True)
else:
self._cover_label.setVisible(False)
|
python
|
input = "hello my name is sparta"
def find_max_occurred_alphabet(string):
alphabet_occurrence_array = [0] * 26
for char in string:
if not char.isalpha():
continue
arr_index = ord(char) - ord("a")
alphabet_occurrence_array[arr_index] += 1
result = find_max_occurred_alphabet(input)
print(result)
|
python
|
import itertools
import numpy as np
from sklearn.preprocessing import scale
import matplotlib.pyplot as plt
from matplotlib import collections as mc
# TODO: document it and wrap it as pip package, make ipynb example
def get_cmap(n, name='hsv'):
"""
Returns a function that maps each index in 0, 1, ..., n-1 to a distinct
RGB color; the keyword argument name must be a standard mpl colormap name.
"""
return plt.cm.get_cmap(name, n + 1) # +1 otherwise last color is almost like first one
def get_y_min_max(nparr):
"""
takes the min and max value of a numpy array and adds 5% of the length of (min, max) to the min and to the max
"""
ymin, ymax = np.amin(nparr), np.amax(nparr)
length = ymax - ymin
ymin -= 0.05 * length
ymax += 0.05 * length
return ymin, ymax
def get_paracoord_plot(values, labels=None, color_dict=None, save_path=None, format='png', dim=(100, 50), linewidths=1, set_legend=False, box=False, show_vertical_axis=True, ylims=None, do_scale=None, show=True):
"""
build parallel coordinates image corresponding to `values`
:param values: 2-dimensional numpy array
:param labels: optional, array containing labels for each row of `values`
:param color_dict: dict, optional, ignored if ` labels` not provided. {label -> color} dict.
If `labels` is provided but not `color_dict`, the color of each label will be automatically chosen
:param save_path: path to the file where the resulting image will be stored.
If not provided, image will not be stored
:param format: str. format of the saved image (if saved), must belong to ['png', 'jpg', 'svg']
:param dim: (int, int), dimension (in pixels) of the resulting image (for some reasons, the persisted images will not have exactly this size)
:param linewidths: int, width (int px) of the plotted line(s)
:param set_legend: boolean, optional, ignored if `labels`not provided. If to set a color legend for the labels or not
:param box: boolean. If to set a frame (x-axis, y-axis etc.) for the resulting image
:param show_vertical_axis: boolean. If to plot the vertical axis of the coordinates
:param ylims: (ymin, ymax). If not provided, will be set to the result to `get_y_min_nax(values)
:param do_scale: boolean. If True, `ylims` is ignored and `values` are centered (vertically) around their mean with std deviation of 1
:param show: boolean. If to show the image though it is saved. If the image is not saved then it is shown anyway.
:return: parallel coordinates image corresponding to `values`
"""
dpi = 100
figsize = (dim[0] / dpi, dim[1] / dpi)
fig, ax = plt.subplots(figsize=figsize, dpi=dpi)
segments = [[(i, values[j, i]), (i + 1, values[j, i + 1])] for j in range(values.shape[0])
for i in range(values.shape[1] - 1)]
if labels is not None:
labels = np.array(labels)
distinct_labels = list(set(labels))
assert labels.shape[0] == values.shape[0], 'there must be as much labels as rows in values, ' \
'here: {} labels for {} rows in values'.format(labels.shape[0], values.shape[0])
if color_dict is not None:
assert set(list(labels)) == set(color_dict.keys()), 'the keys of color_dict and the labels must be the same'
else:
cmap = get_cmap(len(distinct_labels))
color_dict = {distinct_labels[i]: cmap(i) for i in range(len(distinct_labels))}
colors = list(itertools.chain.from_iterable([[color_dict[l]] * (values.shape[1] - 1) for l in list(labels)]))
lcs = []
for color_value in color_dict.values():
# Divide segments by color
segments_color = [segments[i] for i in range(len(segments)) if colors[i] == color_value]
lc = mc.LineCollection(segments_color, linewidths=linewidths, colors=color_value)
ax.add_collection(lc)
lcs.append(lc)
if set_legend:
ax.legend(lcs, distinct_labels, bbox_to_anchor=(1, 1))
else:
lc = mc.LineCollection(segments, linewidths=linewidths, colors='b')
ax.add_collection(lc)
ax.autoscale()
if do_scale:
values = scale(values, axis=0, copy=True)
if ylims is None or do_scale:
ymin, ymax = get_y_min_max(values)
else:
ymin, ymax = ylims[0], ylims[1]
if show_vertical_axis:
for i in range(values.shape[1]):
ax.axvline(x=i, ymin=ymin, ymax=ymax, color='k')
if not box:
plt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='off')
plt.box(False)
plt.xlim(0, values.shape[1])
plt.ylim(ymin, ymax)
if save_path is not None:
assert format in ['png', 'jpg', 'svg'], 'format must belong to [\'png\', \'jpg\', \'svg\']'
plt.savefig(save_path, bbox_inches='tight', format=format, pad_inches=0)
if show:
plt.show()
else:
plt.show()
# Clear the current axes.
plt.cla()
# Clear the current figure.
plt.clf()
# Closes all the figure windows.
plt.close('all')
|
python
|
import numpy as np
import pandas as pd
import time
import os
import sys
from copy import copy
from fastdtw import fastdtw
from scipy import interpolate
from scipy.stats import levy, zscore, mode
from sklearn.metrics import silhouette_score
from sklearn.metrics.pairwise import *
from scipy.spatial.distance import *
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import DBSCAN
def pairwise_fastdtw(X, **kwargs):
X = [list(enumerate(pattern)) for pattern in X]
triu = [fastdtw(X[i], X[j], **kwargs)[0] if i != j else 0 for i in range(len(X)) for j in range(i, len(X))]
matrix = np.zeros([len(X)] * 2)
matrix[np.triu_indices(len(X))] = triu
matrix += np.tril(matrix.T, -1)
return matrix
class individual:
def __init__(self, start: list = None, slen: list = None):
if start is None:
start = []
if slen is None:
slen = []
self.start = start
self.slen = slen
self.cluster = None
class genshapelet:
def __init__(self, ts_path: 'path to file', nsegments, min_support, smin, smax, output_folder=''):
self.ts = pd.read_csv(ts_path, header=None)
self.ts_path = ts_path
self.nsegments = nsegments
if self.nsegments is None:
self.nsegments = int(len(self.ts) / (2 * smax) + 1)
if self.nsegments < 2:
sys.exit('nsegments must be at least 2 for computing clustering quality')
self.min_support = min_support
self.smin = smin
self.smax = smax
if os.path.exists(output_folder):
pass
elif os.access(output_folder, os.W_OK):
pass
else:
sys.exit('output_folder not createable.')
self.output_folder = output_folder
self.probability = 2 / self.nsegments
self.random_walk = False
def run(self, popsize: dict(type=int, help='> 3, should be odd'), sigma: dict(type=float, help='mutation factor'),
t_max, pairwise_distmeasures=[
(pairwise_distances, {'metric': 'cosine'}),
(pairwise_distances, {'metric': 'chebyshev'}),
(pairwise_distances, {'metric': 'euclidean'}),
(pairwise_fastdtw, {'dist': euclidean})],
fusion=True, notes=''):
# For pairwise_distances
# From scikit-learn: ['cityblock', 'cosine', 'euclidean', pairwise_distancesl1', 'l2', 'manhattan'].
# From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev', 'correlation', 'dice', 'hamming',
# 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
# 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
print('-->')
# print('working with ' + str(self.nsegments) + ' windows')
t_max *= 60
t_start = time.time()
population, fitness = [], []
for i in range(0, popsize):
population.append(self.make_individual())
fitness.append(self.evaluate(population[i], pairwise_distmeasures, fusion))
fitness_curve = []
best_fit = -np.inf
iterations = 0
t_elapsed = time.time() - t_start
while(t_elapsed < t_max):
order = np.argsort(fitness)[::-1]
ix_maxfitness = order[0]
if(fitness[ix_maxfitness] > best_fit):
best_fit = fitness[ix_maxfitness]
fitness_curve.append((t_elapsed, iterations, best_fit))
# print((t_elapsed, iterations, best_fit))
# if(iterations % 500) == 0:
# print((t_elapsed, iterations, best_fit))
new_population = []
new_population.append(population[ix_maxfitness]) # elite
fitness[0] = fitness[ix_maxfitness]
if self.random_walk:
for i in range(1, popsize):
new_population.append(self.make_individual())
fitness[i] = self.evaluate(population[i], pairwise_distmeasures, fusion)
else:
for i in range(1, int(popsize / 2), 2):
new_population.append(population[order[i]])
new_population.append(population[order[i + 1]])
self.crossover(new_population[i], new_population[i + 1])
self.mutate(new_population[i], sigma)
self.mutate(new_population[i + 1], sigma)
fitness[i] = self.evaluate(new_population[i], pairwise_distmeasures, fusion)
fitness[i + 1] = self.evaluate(new_population[i + 1], pairwise_distmeasures, fusion)
for i in range(int(popsize / 2), popsize):
new_population.append(self.make_individual())
fitness[i] = self.evaluate(new_population[i], pairwise_distmeasures, fusion)
population = new_population
iterations += 1
t_elapsed = time.time() - t_start
ix_maxfitness = np.argmax(fitness)
fitness_curve.append((t_max, iterations, fitness[ix_maxfitness]))
# print('t_elapsed: ' + str(t_elapsed))
# print('iterations: ' + str(iterations))
# print('fitness: ' + str(fitness[ix_maxfitness]))
name = self.make_filename(popsize, sigma, t_max, notes)
self.write_shapelets(population[ix_maxfitness], name)
self.write_fitness(fitness_curve, name)
# print(population[ix_maxfitness].start)
# print(population[ix_maxfitness].slen)
# print(population[ix_maxfitness].cluster)
# print(self.evaluate(population[ix_maxfitness], pairwise_distmeasures, fusion))
print('--<')
return 0
def evaluate(self, x: individual, pairwise_distmeasures, fusion):
# get patterns from individual
patterns, classlabels = [], []
for i in range(len(x.start)):
df = self.ts.loc[x.start[i]:x.start[i] + x.slen[i] - 1, :]
classlabels.append(mode(df.loc[:, [0]])[0][0][0]) # xD
df = df.loc[:, [1]].apply(zscore).fillna(0) # consider extending for multivariate ts
upsampled_ix = np.linspace(0, len(df) - 1, self.smax) # upsampling
new_values = interpolate.interp1d(np.arange(len(df)), np.array(df).flatten(), kind='cubic')(upsampled_ix)
patterns.append(new_values)
patterns = np.array(patterns)
classlabels = np.array(classlabels)
# print('patterns\n' + str(patterns)) # DEBUG
# print('classlabels ' + str(classlabels)) # DEBUG
distances = {}
cols = len(patterns)
for measure, params in pairwise_distmeasures:
distances[str(measure) + str(params)] = measure(patterns, **params)[np.triu_indices(cols)]
distances = pd.DataFrame(distances)
if fusion:
clf = LogisticRegression()
different_class = np.zeros([cols] * 2)
different_class[classlabels[:, None] != classlabels] = 1
different_class = different_class[np.triu_indices(cols)]
if 1 in different_class:
clf.fit(distances, different_class)
combined_distance = clf.predict_proba(distances)[:, 1]
else:
return -np.inf
dist_matrix = np.zeros([cols] * 2)
dist_matrix[np.triu_indices(cols)] = combined_distance
dist_matrix += np.tril(dist_matrix.T, -1)
else:
measure, params = pairwise_distmeasures[0]
dist_matrix = measure(patterns, **params)
# print('dist_matrix\n' + str(dist_matrix)) # DEBUG
# epsilon! consider: eps=dist_matrix.mean()/1.5
db = DBSCAN(eps=dist_matrix.mean(), min_samples=self.min_support, metric='precomputed', n_jobs=-1).fit(dist_matrix)
x.cluster = db.labels_
try:
fitness = silhouette_score(dist_matrix, x.cluster)
except Exception as e:
fitness = -np.inf
# print(fitness) # DEBUG
return fitness
def validate(self, x):
order = np.argsort(x.start)
for i in range(len(order)):
for j in range(1, len(order) - i):
if(x.start[order[i + j]] - x.start[order[i]] > self.smax):
break
if(x.start[order[i]] + x.slen[order[i]] > x.start[order[i + j]]):
return False
return True
def mutate(self, x, sigma):
for i in range(len(x.start)):
if(np.random.uniform() < self.probability):
tmp_start, tmp_slen = copy(x.start[i]), copy(x.slen[i])
x.slen[i] += int(sigma * (self.smax + 1 - self.smin) * levy.rvs())
x.slen[i] = (x.slen[i] - self.smin) % (self.smax + 1 - self.smin) + self.smin
x.start[i] = (x.start[i] + int(sigma * len(self.ts) * levy.rvs())) % (len(self.ts) - x.slen[i])
if not self.validate(x):
x.start[i], x.slen[i] = copy(tmp_start), copy(tmp_slen)
return 0
def crossover(self, x, y):
for i in range(min(len(x.start), len(y.start))):
if(np.random.uniform() < self.probability):
tmp_start_x, tmp_slen_x = copy(x.start[i]), copy(x.slen[i])
tmp_start_y, tmp_slen_y = copy(y.start[i]), copy(y.slen[i])
x.start[i], y.start[i] = y.start[i], x.start[i]
x.slen[i], y.slen[i] = y.slen[i], x.slen[i]
if not self.validate(x):
x.start[i], x.slen[i] = copy(tmp_start_x), copy(tmp_slen_x)
if not self.validate(y):
y.start[i], y.slen[i] = copy(tmp_start_y), copy(tmp_slen_y)
return 0
def write_fitness(self, x: 'fitness curve', filename):
df = pd.DataFrame(x)
df.to_csv(self.output_folder + '/' + filename + '.fitness.csv', index=False, header=False)
def write_shapelets(self, x: individual, filename):
out = {}
out['start'] = [start for start in x.start]
out['slen'] = [slen for slen in x.slen]
out['cluster'] = [cluster for cluster in x.cluster] if x.cluster is not None else [-2] * len(x.start)
df = pd.DataFrame(out, columns=['start', 'slen', 'cluster'])
df.sort_values('cluster', inplace=True) # unordered indizes .reset_index(inplace=True, drop=True)
df.to_csv(self.output_folder + '/' + filename + '.shapelets.csv', index=False)
return 0
def make_filename(self, popsize, sigma, t_max, notes):
filename = os.path.splitext(os.path.basename(self.ts_path))[0] # get name without path and extension
motifs = str(self.nsegments) + 'x' + str(self.min_support) + 'motifs'
window_length = str(self.smin) + '-' + str(self.smax) + 'window'
hyperparameter = str(popsize) + '_' + str(sigma) + '_' + str(t_max / 60) + '_' + str(notes)
return 'genshapelet_' + filename + '_' + motifs + '_' + window_length + '_' + hyperparameter
def make_individual(self):
x = individual()
for i in range(self.nsegments):
x.slen.append(np.random.randint(self.smin, self.smax + 1))
x.start.append(np.random.randint(0, len(self.ts) - x.slen[i]))
valid = False
attempts = 5 # this is random, right; but the whole should stay random so .. ¯\_(ツ)_/¯
while(not valid and attempts > 0):
valid = True
for j in range(i):
if((x.start[i] + x.slen[i] <= x.start[j]) or (x.start[j] + x.slen[j] <= x.start[i])):
continue
else:
valid = False
attempts -= 1
x.start[i] = np.random.randint(0, len(self.ts) - x.slen[i])
break
if (attempts == 0):
# print('The individual isn\'t complete. Check nsegments and smax parameter.')
x.slen.pop()
x.start.pop()
break
return x
|
python
|
import cv2
import math
import time
import numpy as np
mmRatio = 0.1479406021
scale = 2
frameWidth = 2304
frameHeight = 1536
frameCroopY = [650,950]
windowsName = 'Window Name'
def playvideo():
vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_FRAME_WIDTH, frameWidth)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, frameHeight)
while(True):
ret, frame = vid.read()
if not ret:
vid.release()
print('release')
break
frame = processFrame(frame)
cv2.namedWindow(windowsName)
cv2.startWindowThread()
cv2.imshow(windowsName, frame)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
def processFrame(frame):
frame = frame[frameCroopY[0]:frameCroopY[1], 0:frameWidth]
liveFrame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# liveFrame = cv2.medianBlur(liveFrame, 1)
# frame, kernel, x, y
# liveFrame = cv2.GaussianBlur(liveFrame, (9, 9), 0)
# frame, sigmaColor, sigmaSpace, borderType
liveFrame = cv2.bilateralFilter(liveFrame, 10, 50, cv2.BORDER_WRAP)
# _, liveFrame = cv2.threshold(liveFrame, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
liveFrame = cv2.Canny(liveFrame, 75, 150, 9)
# cv2.goodFeaturesToTrack(img,maxCorners,qualityLevel, minDistance, corners, mask, blockSize, useHarrisDetector)
corners = cv2.goodFeaturesToTrack(liveFrame, 2000, 0.01, 10)
if corners is not None:
corners = np.int0(corners)
for i in corners:
x, y = i.ravel()
cv2.rectangle(liveFrame, (x - 1, y - 1), (x + 1, y + 1), (255, 255, 255), -100)
# cv2.circle(liveFrame, (x, y), 3, 255, -1)
_, cnts, _ = cv2.findContours(
liveFrame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
# detect aproximinated contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
# cv2.drawContours(frame, [approx], 0, (255, 0, 0), 1)
x, y, w, h = cv2.boundingRect(c)
# draw a green rectangle to visualize the bounding rect
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
if len(approx) == 4:
# calculate area
area = cv2.contourArea(approx)
cv2.drawContours(frame, [approx], 0, (0, 0, 255), 1)
if (area >= 1000):
cv2.drawContours(frame, [approx], 0, (255, 0, 0), 2)
difference = abs(round(cv2.norm(approx[0], approx[2]) - cv2.norm(approx[1], approx[3])))
if (difference < 30):
# use [c] insted [approx] for precise detection line
# c = c.astype("float")
# c *= ratio
# c = c.astype("int")
# cv2.drawContours(image, [c], 0, (0, 255, 0), 3)
# (x, y, w, h) = cv2.boundingRect(approx)
# ar = w / float(h)
# draw detected object
cv2.drawContours(frame, [approx], 0, (0, 255, 0), 3)
# draw detected data
M = cv2.moments(c)
if (M["m00"] != 0):
cX = int((M["m10"] / M["m00"]))
cY = int((M["m01"] / M["m00"]))
# a square will have an aspect ratio that is approximately
# equal to one, otherwise, the shape is a rectangle
# shape = "square" if ar >= 0.95 and ar <= 1.05 else "rectangle"
(x, y, w, h) = cv2.boundingRect(approx)
ar = w / float(h)
# calculate width and height
width = w * mmRatio
height = h * mmRatio
messurment = '%0.2fmm * %0.2fmm | %s' % (width, height, difference)
# draw text
cv2.putText(frame, messurment, (approx[0][0][0], approx[0][0][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
liveFrame = cv2.cvtColor(liveFrame, cv2.COLOR_GRAY2BGR)
combined = np.vstack((liveFrame, frame))
height, width = combined.shape[:2]
return cv2.resize(combined, (int(width/scale), int(height/scale)))
playvideo()
|
python
|
import os
from setuptools import setup, find_packages
import lendingblock
def read(name):
filename = os.path.join(os.path.dirname(__file__), name)
with open(filename) as fp:
return fp.read()
meta = dict(
version=lendingblock.__version__,
description=lendingblock.__doc__,
name='lb-py',
author='Luca Sbardella',
author_email="[email protected]",
maintainer_email="[email protected]",
url="https://github.com/lendingblock/lb-py",
license="BSD",
long_description=read('readme.md'),
packages=find_packages(exclude=['tests', 'tests.*']),
include_package_data=True,
zip_safe=False,
install_requires=['aiohttp'],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: JavaScript',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities'
]
)
if __name__ == '__main__':
setup(**meta)
|
python
|
# pylint: disable=import-error
from importlib import import_module
from flask_restplus import Api
# pylint: disable=no-name-in-module
from utils.configmanager import ConfigManager
from app import resources
def make_api(api_config):
api = Api(
prefix=api_config["prefix"],
title=api_config["title"],
version=api_config["version"],
catch_all_404s=True,
)
for module_name in api_config["resources"]:
module = import_module("." + module_name, "app.resources")
namespace = getattr(module, "api")
api.add_namespace(namespace)
return api
|
python
|
import os
import sys
import random
import time
import traceback
import torch
import torch.optim as optim
from configs import g_conf, set_type_of_process, merge_with_yaml
from network import CoILModel, Loss, adjust_learning_rate_auto
from input import CoILDataset, Augmenter, select_balancing_strategy
from logger import coil_logger
from coilutils.checkpoint_schedule import is_ready_to_save, get_latest_saved_checkpoint, \
check_loss_validation_stopped
# The main function maybe we could call it with a default name
def execute(gpu, exp_batch, exp_alias, suppress_output=True, number_of_workers=12):
"""
The main training function. This functions loads the latest checkpoint
for a given, exp_batch (folder) and exp_alias (experiment configuration).
With this checkpoint it starts from the beginning or continue some training.
Args:
gpu: The GPU number
exp_batch: the folder with the experiments
exp_alias: the alias, experiment name
suppress_output: if the output are going to be saved on a file
number_of_workers: the number of threads used for data loading
Returns:
None
"""
try:
# We set the visible cuda devices to select the GPU
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
g_conf.VARIABLE_WEIGHT = {}
# At this point the log file with the correct naming is created.
# You merge the yaml file with the global configuration structure.
merge_with_yaml(os.path.join('configs', exp_batch, exp_alias + '.yaml'))
set_type_of_process('train')
# Set the process into loading status.
coil_logger.add_message('Loading', {'GPU': gpu})
# Put the output to a separate file if it is the case
if suppress_output:
if not os.path.exists('_output_logs'):
os.mkdir('_output_logs')
sys.stdout = open(os.path.join('_output_logs', exp_alias + '_' +
g_conf.PROCESS_NAME + '_' + str(os.getpid()) + ".out"), "a",
buffering=1)
sys.stderr = open(os.path.join('_output_logs',
exp_alias + '_err_'+g_conf.PROCESS_NAME + '_'
+ str(os.getpid()) + ".out"),
"a", buffering=1)
# Preload option
if g_conf.PRELOAD_MODEL_ALIAS is not None:
checkpoint = torch.load(os.path.join('_logs', g_conf.PRELOAD_MODEL_BATCH,
g_conf.PRELOAD_MODEL_ALIAS,
'checkpoints',
str(g_conf.PRELOAD_MODEL_CHECKPOINT)+'.pth'))
# Get the latest checkpoint to be loaded
# returns none if there are no checkpoints saved for this model
checkpoint_file = get_latest_saved_checkpoint()
if checkpoint_file is not None:
checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias,
'checkpoints', str(get_latest_saved_checkpoint())))
iteration = checkpoint['iteration']
best_loss = checkpoint['best_loss']
best_loss_iter = checkpoint['best_loss_iter']
else:
iteration = 0
best_loss = 10000.0
best_loss_iter = 0
# Define the dataset. This structure is has the __get_item__ redefined in a way
# that you can access the positions from the root directory as a in a vector.
full_dataset = os.path.join(os.environ["COIL_DATASET_PATH"], g_conf.TRAIN_DATASET_NAME)
# By instantiating the augmenter we get a callable that augment images and transform them
# into tensors.
augmenter = Augmenter(g_conf.AUGMENTATION)
# Instantiate the class used to read a dataset. The coil dataset generator
# can be found
dataset = CoILDataset(full_dataset, transform=augmenter,
preload_name=str(g_conf.NUMBER_OF_HOURS)
+ 'hours_' + g_conf.TRAIN_DATASET_NAME)
print ("Loaded dataset")
data_loader = select_balancing_strategy(dataset, iteration, number_of_workers)
model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=g_conf.LEARNING_RATE)
if checkpoint_file is not None or g_conf.PRELOAD_MODEL_ALIAS is not None:
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
accumulated_time = checkpoint['total_time']
loss_window = coil_logger.recover_loss_window('train', iteration)
else: # We accumulate iteration time and keep the average speed
accumulated_time = 0
loss_window = []
print ("Before the loss")
criterion = Loss(g_conf.LOSS_FUNCTION)
# Loss time series window
for data in data_loader:
# Basically in this mode of execution, we validate every X Steps, if it goes up 3 times,
# add a stop on the _logs folder that is going to be read by this process
if g_conf.FINISH_ON_VALIDATION_STALE is not None and \
check_loss_validation_stopped(iteration, g_conf.FINISH_ON_VALIDATION_STALE):
break
"""
####################################
Main optimization loop
####################################
"""
iteration += 1
if iteration % 1000 == 0:
adjust_learning_rate_auto(optimizer, loss_window)
# get the control commands from float_data, size = [120,1]
capture_time = time.time()
controls = data['directions']
# The output(branches) is a list of 5 branches results, each branch is with size [120,3]
model.zero_grad()
branches = model(torch.squeeze(data['rgb'].cuda()),
dataset.extract_inputs(data).cuda())
loss_function_params = {
'branches': branches,
'targets': dataset.extract_targets(data).cuda(),
'controls': controls.cuda(),
'inputs': dataset.extract_inputs(data).cuda(),
'branch_weights': g_conf.BRANCH_LOSS_WEIGHT,
'variable_weights': g_conf.VARIABLE_WEIGHT
}
loss, _ = criterion(loss_function_params)
loss.backward()
optimizer.step()
"""
####################################
Saving the model if necessary
####################################
"""
if is_ready_to_save(iteration):
state = {
'iteration': iteration,
'state_dict': model.state_dict(),
'best_loss': best_loss,
'total_time': accumulated_time,
'optimizer': optimizer.state_dict(),
'best_loss_iter': best_loss_iter
}
torch.save(state, os.path.join('_logs', exp_batch, exp_alias
, 'checkpoints', str(iteration) + '.pth'))
"""
################################################
Adding tensorboard logs.
Making calculations for logging purposes.
These logs are monitored by the printer module.
#################################################
"""
coil_logger.add_scalar('Loss', loss.data, iteration)
coil_logger.add_image('Image', torch.squeeze(data['rgb']), iteration)
if loss.data < best_loss:
best_loss = loss.data.tolist()
best_loss_iter = iteration
# Log a random position
position = random.randint(0, len(data) - 1)
output = model.extract_branch(torch.stack(branches[0:4]), controls)
error = torch.abs(output - dataset.extract_targets(data).cuda())
accumulated_time += time.time() - capture_time
coil_logger.add_message('Iterating',
{'Iteration': iteration,
'Loss': loss.data.tolist(),
'Images/s': (iteration * g_conf.BATCH_SIZE) / accumulated_time,
'BestLoss': best_loss, 'BestLossIteration': best_loss_iter,
'Output': output[position].data.tolist(),
'GroundTruth': dataset.extract_targets(data)[
position].data.tolist(),
'Error': error[position].data.tolist(),
'Inputs': dataset.extract_inputs(data)[
position].data.tolist()},
iteration)
loss_window.append(loss.data.tolist())
coil_logger.write_on_error_csv('train', loss.data)
print("Iteration: %d Loss: %f" % (iteration, loss.data))
coil_logger.add_message('Finished', {})
except KeyboardInterrupt:
coil_logger.add_message('Error', {'Message': 'Killed By User'})
except RuntimeError as e:
coil_logger.add_message('Error', {'Message': str(e)})
except:
traceback.print_exc()
coil_logger.add_message('Error', {'Message': 'Something Happened'})
|
python
|
import json
import urllib.request
from urllib.error import URLError
from django.shortcuts import render, get_object_or_404, redirect
from django.core.cache import cache
from django.http import HttpResponse, Http404
from django.template import Context, Template, RequestContext
from django.db.models import Q, Prefetch, Count, OuterRef
from django.utils import timezone
from django.contrib import messages
from django.apps import apps
from django.http import HttpResponse
from collections import OrderedDict
import apps.common.functions as commonfunctions
from apps.objects.models import Node, User
from .models import Page, School, Department, Board, BoardSubPage, News, NewsYear, SubPage, BoardMeetingYear, DistrictCalendarYear,SuperintendentMessage,SuperintendentMessageYear, Announcement
from apps.taxonomy.models import Location, City, State, Zipcode, Language, BoardPrecinct, BoardPolicySection, SchoolType, SchoolOption, SchoolAdministratorType, SubjectGradeLevel
from apps.images.models import Thumbnail, NewsThumbnail, ContentBanner, ProfilePicture, DistrictLogo
from apps.directoryentries.models import (
Staff,
SchoolAdministrator,
Administrator,
BoardMember,
StudentBoardMember,
BoardPolicyAdmin,
SchoolAdministration,
SchoolStaff,
SchoolFaculty,
SchoolCommunityCouncilMember,
)
from apps.links.models import ResourceLink, ActionButton, ClassWebsite
from apps.documents.models import (
Document,
BoardPolicy,
Policy,
AdministrativeProcedure,
SupportingDocument,
DisclosureDocument,
SchoolCommunityCouncilMeetingAgenda,
SchoolCommunityCouncilMeetingMinutes,
)
from apps.files.models import File, AudioFile, VideoFile
from apps.events.models import BoardMeeting, DistrictCalendarEvent, SchoolCommunityCouncilMeeting
from apps.users.models import Employee
from apps.contactmessages.forms import ContactMessageForm
def updates_school_reg_dates():
reg_locations = {
10: 'Online',
20: 'Online/On-Site',
30: 'On-Site',
}
reg_audience = {
105: '1 - 5th Grade',
6: '6th Grade',
7: '7th Grade',
8: '8th Grade',
9: '9th Grade',
199: 'All Students',
99: 'All Unregistered Students',
0: 'Kindergarten',
13: 'New Students',
21: 'Returning Students',
}
try:
response = urllib.request.urlopen('https://apex.slcschools.org/apex/slcsd-apps/regcalendars/')
except URLError:
return
jsonalldates = response.read()
alldates = json.loads(jsonalldates.decode("utf-8"))
groupeddates = {}
for date in alldates['items']:
if date['location'] in reg_locations:
date['location'] = reg_locations[date['location']]
if date['audience'] in reg_audience:
date['audience'] = reg_audience[date['audience']]
if date['school'] not in groupeddates:
groupeddates[date['school']] = []
groupeddates[date['school']].append(date)
return groupeddates
def set_template(request, node):
if request.site.domain == 'www.slcschools.org':
if request.path == '/' or request.path == '/home/':
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
'home.html',
)
if request.path == '/employees/':
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
'page-wide.html',
)
if request.path == '/schools/school-handbooks/':
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
'page-wide.html',
)
if request.path == '/schools/district-demographics/':
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
'page-wide.html',
)
if request.path == '/search/':
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
'page-wide.html',
)
if request.path == '/departments/department-structure/':
return 'cmstemplates/www_slcschools_org/pagelayouts/departmentstructure.html'
if request.path == '/departments/superintendents-office/downloads/':
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
'page-wide.html',
)
if request.path == '/calendars/guidelines-for-developing-calendar-options/':
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
'page-wide.html',
)
if node.content_type == 'board' or node.content_type == 'boardsubpage':
return 'cmstemplates/www_slcschools_org/pagelayouts/boarddetail.html'
if node.content_type == 'newsyear':
return 'cmstemplates/www_slcschools_org/pagelayouts/newsyeararchive.html'
if node.content_type == 'news':
return 'cmstemplates/www_slcschools_org/pagelayouts/articledetail.html'
if request.path == '/schools/':
return 'cmstemplates/www_slcschools_org/pagelayouts/main-school-directory.html'
if request.path == '/schools/elementary-schools/':
return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html'
if request.path == '/schools/k-8-schools/':
return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html'
if request.path == '/schools/middle-schools/':
return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html'
if request.path == '/schools/high-schools/':
return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html'
if request.path == '/schools/charter-schools/':
return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html'
if request.path == '/schools/community-learning-centers/':
return 'cmstemplates/www_slcschools_org/pagelayouts/school-directory.html'
if node.content_type == 'school':
return 'cmstemplates/www_slcschools_org/pagelayouts/schooldetail.html'
if request.path == '/departments/':
return 'cmstemplates/www_slcschools_org/pagelayouts/department-directory.html'
if node.content_type == 'superintendentmessageyear':
return 'cmstemplates/www_slcschools_org/pagelayouts/supermessageyeararchive.html'
if node.content_type == 'department':
return 'cmstemplates/www_slcschools_org/pagelayouts/departmentdetail.html'
if node.content_type == 'superintendentmessage':
return 'cmstemplates/www_slcschools_org/pagelayouts/supermessagedetail.html'
if request.path == '/directory/':
return 'cmstemplates/www_slcschools_org/pagelayouts/directory.html'
if request.path.startswith('/directory/last-name-'):
return 'cmstemplates/www_slcschools_org/pagelayouts/directory-letter.html'
if node.content_type == 'districtcalendaryear':
return 'cmstemplates/www_slcschools_org/pagelayouts/districtcalendaryears.html'
if node.content_type == 'boardmeetingyear':
return 'cmstemplates/www_slcschools_org/pagelayouts/boardmeetingyears.html'
if request.path == '/contact-us/':
return 'cmstemplates/www_slcschools_org/pagelayouts/contact-us.html'
if request.path == '/contact-us/inline/':
return 'cmstemplates/www_slcschools_org/blocks/contact-us-inline.html'
if request.path == '/schools/school-registration-dates/':
return 'cmstemplates/www_slcschools_org/pagelayouts/school-registration-dates.html'
if node.node_type == 'documents':
if node.content_type == 'document':
return 'cmstemplates/www_slcschools_org/pagelayouts/document.html'
if node.content_type == 'policy':
return 'cmstemplates/www_slcschools_org/pagelayouts/document.html'
if node.content_type == 'administrativeprocedure':
return 'cmstemplates/www_slcschools_org/pagelayouts/document.html'
if node.content_type == 'supportingdocument':
return 'cmstemplates/www_slcschools_org/pagelayouts/document.html'
if node.content_type == 'boardmeetingagenda':
return 'cmstemplates/www_slcschools_org/pagelayouts/document.html'
if node.content_type == 'boardmeetingminutes':
return 'cmstemplates/www_slcschools_org/pagelayouts/document.html'
if node.content_type == 'boardmeetingaudio':
return 'cmstemplates/www_slcschools_org/pagelayouts/audio.html'
if node.content_type == 'boardmeetingvideo':
return 'cmstemplates/www_slcschools_org/pagelayouts/video.html'
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
'page.html',
)
else:
return 'cmstemplates/{0}/pagelayouts/{1}'.format(
request.site.dashboard_general_site.template.namespace,
node.pagelayout.namespace,
)
def redirect_request(request):
currentyear = commonfunctions.currentyear()
if request.path == '/board-of-education/board-meetings/':
try:
year = BoardMeetingYear.objects.get(title=currentyear['currentyear']['long'], site=request.site)
except BoardMeetingYear.DoesNotExist:
meeting, created = BoardMeeting.objects.get_or_create(startdate=timezone.now(), site=request.site)
if created:
meeting.save()
meeting.delete()
meeting.delete()
year = BoardMeetingYear.objects.get(title=currentyear['currentyear']['long'], site=request.site)
return redirect(year.url)
if request.path == '/calendars/' and request.site.domain == 'www.slcschools.org':
try:
year = DistrictCalendarYear.objects.get(title=currentyear['currentyear']['long'], site=request.site)
except DistrictCalendarYear.DoesNotExist:
event, created = DistrictCalendarEvent.objects.get_or_create(startdate=timezone.now(), site=request.site)
if created:
event.save()
event.delete()
event.delete()
year = DistrictCalendarYear.objects.get(title=currentyear['currentyear']['long'], site=request.site)
return redirect(year.url)
if request.path == '/news/':
try:
year = NewsYear.objects.get(title=currentyear['currentyear']['long'], site=request.site)
except NewsYear.DoesNotExist:
news, created = News.objects.get_or_create(title='tempnews', site=request.site)
if created:
news.save()
news.delete()
news.delete()
year = NewsYear.objects.get(title=currentyear['currentyear']['long'], site=request.site)
return redirect(year.url)
if request.path == '/departments/superintendents-office/superintendents-message/':
try:
year = SuperintendentMessageYear.objects.get(title=currentyear['currentyear']['long'], site=request.site)
except SuperintendentMessageYear.DoesNotExist:
message, created = SuperintendentMessage.objects.get_or_create(author_date=timezone.now(), site=request.site)
if created:
message.save()
message.delete()
message.delete()
year = SuperintendentMessageYear.objects.get(title=currentyear['currentyear']['long'], site=request.site)
return redirect(year.url)
return None
def prefetch_building_location_detail(qs):
prefetchqs = (
Location
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
return qs.prefetch_related(
Prefetch(
'building_location',
queryset=prefetchqs,
)
)
def prefetch_boardmembers_detail(qs):
prefetchqs = (
BoardMember
.objects
.filter(deleted=0)
.filter(published=1)
.filter(employee__is_active=True)
.filter(employee__is_staff=True)
.order_by('precinct__title')
.only(
'employee',
'is_president',
'is_vicepresident',
'precinct',
'phone',
'street_address',
'city',
'state',
'zipcode',
'term_ends',
'related_node',
)
.prefetch_related(
Prefetch(
'employee',
queryset=(
Employee
.objects
.filter(is_active=1)
.filter(is_staff=1)
.only(
'last_name',
'first_name',
'email',
)
.prefetch_related(
Prefetch(
'images_profilepicture_node',
ProfilePicture.objects
.filter(deleted=0)
.filter(published=1)
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
),
Prefetch(
'precinct',
queryset=(
BoardPrecinct
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
.prefetch_related('files_precinctmap_node')
.order_by('title')
)
),
Prefetch(
'city',
queryset=(
City
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'state',
queryset=(
State
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'zipcode',
queryset=(
Zipcode
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
)
)
)
return qs.prefetch_related(
Prefetch(
'directoryentries_boardmember_node',
queryset=prefetchqs,
)
)
def prefetch_studentboardmember_detail(qs):
prefetchqs = (
StudentBoardMember
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'first_name',
'last_name',
'phone',
'building_location',
'related_node',
)
.prefetch_related(
Prefetch(
'building_location',
queryset=(
Location
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
),
Prefetch(
'images_profilepicture_node',
queryset=(
ProfilePicture
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
return qs.prefetch_related(
Prefetch(
'directoryentries_studentboardmember_node',
queryset=prefetchqs,
)
)
def prefetch_schooladministrators_detail(qs):
prefetchqs = (SchoolAdministrator
.objects
.filter(deleted=False)
.filter(published=True)
.filter(employee__is_active=True)
.filter(employee__is_staff=True)
.order_by('inline_order')
.only(
'pk',
'employee',
'schooladministratortype',
'inline_order',
'related_node',
)
.prefetch_related(
Prefetch(
'employee',
queryset=(
Employee
.objects
.filter(is_active=True)
.filter(is_staff=True)
.only(
'pk',
'last_name',
'first_name',
'email',
'job_title',
)
.prefetch_related(
Prefetch(
'images_profilepicture_node',
queryset=(
ProfilePicture
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
'image_file',
'alttext',
'related_node',
)
),
)
)
),
),
)
.prefetch_related(
Prefetch(
'schooladministratortype',
queryset=(
SchoolAdministratorType
.objects
.filter(deleted=False)
.filter(published=True)
.only(
'pk',
'title',
)
),
),
)
)
return qs.prefetch_related(
Prefetch(
'directoryentries_schooladministrator_node',
queryset=prefetchqs,
)
)
def prefetch_administrators_detail(qs):
prefetchqs = (
Administrator
.objects
.filter(deleted=False)
.filter(published=True)
.filter(employee__is_active=True)
.filter(employee__is_staff=True)
.order_by('inline_order')
.only(
'pk',
'employee',
'job_title',
'inline_order',
'related_node',
)
.prefetch_related(
Prefetch(
'employee',
queryset=(
Employee
.objects
.filter(is_active=True)
.filter(is_staff=True)
.only(
'pk',
'last_name',
'first_name',
'email',
'job_title',
)
.prefetch_related(
Prefetch(
'images_profilepicture_node',
queryset=(
ProfilePicture
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
'image_file',
'alttext',
'related_node',
)
)
)
)
)
)
)
)
return qs.prefetch_related(
Prefetch(
'directoryentries_administrator_node',
queryset=prefetchqs,
)
)
def prefetch_staff_detail(qs):
prefetchqs = (
Staff
.objects
.filter(deleted=False)
.filter(published=True)
.filter(employee__is_active=True)
.filter(employee__is_staff=True)
.order_by('inline_order')
.only(
'pk',
'employee',
'job_title',
'inline_order',
'related_node',
)
.prefetch_related(
Prefetch(
'employee',
queryset=(
Employee
.objects
.filter(is_active=True)
.filter(is_staff=True)
.only(
'pk',
'last_name',
'first_name',
'email',
'job_title',
)
)
)
)
)
return qs.prefetch_related(
Prefetch(
'directoryentries_staff_node',
queryset=prefetchqs,
)
)
def prefetch_documents_detail(qs):
prefetchqs = (
Document
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('inline_order')
.only(
'pk',
'title',
'inline_order',
'related_node'
)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
return qs.prefetch_related(
Prefetch(
'documents_document_node',
queryset=prefetchqs,
)
)
def prefetch_disclosuredocuments_detail(qs):
prefetchqs = (
DisclosureDocument
.objects
.filter(deleted=0)
.filter(published=1)
.annotate(
file_count=Count(
'files_file_node',
filter=Q(
files_file_node__published=1,
files_file_node__deleted=0,
)
)
)
.filter(file_count__gt=0)
.order_by('inline_order')
.only(
'pk',
'title',
'inline_order',
'related_node'
)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
return qs.prefetch_related(
Prefetch(
'documents_disclosuredocument_node',
queryset=prefetchqs,
)
)
def prefetch_schoolcommunitycouncilmembers_detail(qs):
prefetchqs = (
SchoolCommunityCouncilMember
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('inline_order')
)
qs = qs.prefetch_related(
Prefetch(
'directoryentries_schoolcommunitycouncilmember_node',
queryset=prefetchqs,
)
)
return qs
def prefetch_schoolcommunitycouncilmeetings_detail(qs):
prefetchqs = (
SchoolCommunityCouncilMeeting
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('startdate')
)
prefetchqs = prefetch_schoolcommunitycouncilmeetingagenda_detail(prefetchqs)
prefetchqs = prefetch_schoolcommunitycouncilmeetingminutes_detail(prefetchqs)
qs = qs.prefetch_related(
Prefetch(
'events_schoolcommunitycouncilmeeting_node',
queryset=prefetchqs,
)
)
return qs
def prefetch_schoolcommunitycouncilmeetingagenda_detail(qs):
prefetchqs = (
SchoolCommunityCouncilMeetingAgenda
.objects
.filter(deleted=0)
.filter(published=1)
.annotate(
file_count=Count(
'files_file_node',
filter=Q(
files_file_node__published=1,
files_file_node__deleted=0,
)
)
)
.filter(file_count__gt=0)
.order_by('inline_order')
.only(
'pk',
'title',
'inline_order',
'related_node'
)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
qs = qs.prefetch_related(
Prefetch(
'documents_schoolcommunitycouncilmeetingagenda_node',
queryset=prefetchqs,
)
)
return qs
def prefetch_schoolcommunitycouncilmeetingminutes_detail(qs):
prefetchqs = (
SchoolCommunityCouncilMeetingMinutes
.objects
.filter(deleted=0)
.filter(published=1)
.annotate(
file_count=Count(
'files_file_node',
filter=Q(
files_file_node__published=1,
files_file_node__deleted=0,
)
)
)
.filter(file_count__gt=0)
.order_by('inline_order')
.only(
'pk',
'title',
'inline_order',
'related_node'
)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
qs = qs.prefetch_related(
Prefetch(
'documents_schoolcommunitycouncilmeetingminutes_node',
queryset=prefetchqs,
)
)
return qs
def prefetch_contentbanner_detail(qs):
prefetchqs = (
ContentBanner
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'image_file',
'alttext',
'related_node_id',
)
.order_by('inline_order')
)
return qs.prefetch_related(
Prefetch(
'images_contentbanner_node',
queryset=prefetchqs,
)
)
def prefetch_actionbuttons_detail(qs):
prefetchqs = (
ActionButton
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
'link_url',
'inline_order',
'related_node',
)
.order_by('inline_order')
)
return qs.prefetch_related(
Prefetch(
'links_actionbutton_node',
queryset=prefetchqs,
)
)
def prefetch_resourcelinks_detail(qs):
prefetchqs = (
ResourceLink
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
'link_url',
'inline_order',
'related_node',
)
.order_by('inline_order')
)
return qs.prefetch_related(
Prefetch(
'links_resourcelink_node',
queryset=prefetchqs,
)
)
def prefetch_classwebsite_detail(qs):
prefetchqs = (
ClassWebsite
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
'link_url',
'inline_order',
'related_node',
)
)
return qs.prefetch_related(
Prefetch(
'links_classwebsite_node',
queryset=prefetchqs,
)
)
def prefetch_announcement_detail(qs):
prefetchqs = (
Announcement
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('inline_order')
)
return qs.prefetch_related(
Prefetch(
'pages_announcement_node',
queryset=prefetchqs,
)
)
def prefetch_subjectgradelevel_detail(qs):
activesubjects = []
page = qs[0]
for person in page.directoryentries_schoolfaculty_node.all():
if person.primary_subject.pk not in activesubjects:
activesubjects.append(person.primary_subject.pk)
prefetchqs = (
SubjectGradeLevel
.objects
.filter(deleted=0)
.filter(published=1)
.filter(pk__in=activesubjects)
.order_by('inline_order')
)
return qs.prefetch_related(
Prefetch(
'taxonomy_subjectgradelevel_node',
queryset=prefetchqs,
)
)
def prefetch_schooladministration_detail(qs):
prefetchqs = (
SchoolAdministration
.objects
.filter(deleted=0)
.filter(published=1)
.filter(employee__is_active=True)
.filter(employee__is_staff=True)
.order_by('inline_order')
.prefetch_related(
Prefetch(
'employee',
queryset=(
Employee
.objects
.filter(is_active=True)
.filter(is_staff=True)
.only(
'pk',
'last_name',
'first_name',
'email',
'job_title',
)
)
)
)
)
return qs.prefetch_related(
Prefetch(
'directoryentries_schooladministration_node',
queryset=prefetchqs,
)
)
def prefetch_schoolstaff_detail(qs):
prefetchqs = (
SchoolStaff
.objects
.filter(deleted=0)
.filter(published=1)
.filter(employee__is_active=True)
.filter(employee__is_staff=True)
.order_by('inline_order')
.prefetch_related(
Prefetch(
'employee',
queryset=(
Employee
.objects
.filter(is_active=True)
.filter(is_staff=True)
.only(
'pk',
'last_name',
'first_name',
'email',
'job_title',
)
)
)
)
)
return qs.prefetch_related(
Prefetch(
'directoryentries_schoolstaff_node',
queryset=prefetchqs,
)
)
def prefetch_schoolfaculty_detail(qs):
prefetchqs = (
SchoolFaculty
.objects
.filter(deleted=0)
.filter(published=1)
.filter(employee__is_active=True)
.filter(employee__is_staff=True)
.prefetch_related(
Prefetch(
'employee',
queryset=(
Employee
.objects
.filter(is_active=True)
.filter(is_staff=True)
.only(
'pk',
'last_name',
'first_name',
'email',
'job_title',
)
)
)
)
.order_by(
'employee__first_name',
'employee__last_name',
)
)
return qs.prefetch_related(
Prefetch(
'directoryentries_schoolfaculty_node',
queryset=prefetchqs,
)
)
def prefetch_subpage_detail(qs):
prefetchqs = (
SubPage
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'title',
'url',
'inline_order',
'related_node_id',
)
)
return qs.prefetch_related(
Prefetch(
'pages_subpage_node',
queryset=prefetchqs,
)
)
def add_additional_context(request, context, node):
if request.path == '/' or request.path == '/home/':
context['supermessage'] = (
SuperintendentMessage
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('-author_date')
.only(
'title',
'author_date',
'summary',
'url',
)[:1]
)
context['news'] = (
News
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'-pinned',
'-author_date',
)
.only(
'title',
'author_date',
'summary',
'url',
)
.prefetch_related(
Prefetch(
'images_newsthumbnail_node',
queryset=(
NewsThumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)[0:5]
)
if request.path == '/departments/department-structure/':
context['departments'] = (
prefetch_building_location_detail(
Department
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('lft')
)
)
if request.path == '/board-of-education/policies/':
district_policies = (
BoardPolicy
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('section__lft','index')
.only('pk','policy_title','index','section','related_node')
.prefetch_related(
Prefetch('section', queryset= BoardPolicySection.objects.filter(deleted=0).filter(published=1).only('pk','section_prefix','description')),Prefetch('directoryentries_boardpolicyadmin_node',queryset=BoardPolicyAdmin.objects.filter(deleted=0).filter(published=1).order_by('title').only('pk','employee','related_node').prefetch_related(Prefetch('employee',queryset=Employee.objects.filter(is_active=1).filter(is_staff=1).only('pk','last_name','first_name')))),Prefetch('documents_policy_node', queryset = Policy.objects.filter(deleted=0).filter(published=1).only('pk','related_node').prefetch_related(Prefetch('files_file_node', queryset = File.objects.filter(deleted=0).filter(published=1).order_by('file_language__lft','file_language__title').only('title','file_file','file_language','related_node').prefetch_related(Prefetch('file_language',queryset=Language.objects.filter(deleted=0).filter(published=1).only('title')))))),Prefetch('documents_administrativeprocedure_node', queryset = AdministrativeProcedure.objects.filter(deleted=0).filter(published=1).only('pk','related_node').prefetch_related(Prefetch('files_file_node', queryset = File.objects.filter(deleted=0).filter(published=1).order_by('file_language__lft','file_language__title').only('title','file_file','file_language','related_node').prefetch_related(Prefetch('file_language',queryset=Language.objects.filter(deleted=0).filter(published=1).only('title')))))),Prefetch('documents_supportingdocument_node', queryset = SupportingDocument.objects.filter(deleted=0).filter(published=1).only('pk','document_title','related_node').prefetch_related(Prefetch('files_file_node', queryset = File.objects.filter(deleted=0).filter(published=1).order_by('file_language__lft','file_language__title').only('title','file_file','file_language','related_node').prefetch_related(Prefetch('file_language',queryset=Language.objects.filter(deleted=0).filter(published=1).only('title')))))))
)
board_policies = []
community_policies = []
financial_policies = []
general_policies = []
instructional_policies = []
personnel_policies = []
student_policies = []
for policy in district_policies:
if policy.section.title == 'Board Policies':
board_policies.append(policy)
if policy.section.title == 'Community Policies':
community_policies.append(policy)
if policy.section.title == 'Financial Policies':
financial_policies.append(policy)
if policy.section.title == 'General Policies':
general_policies.append(policy)
if policy.section.title == 'Instructional Policies':
instructional_policies.append(policy)
if policy.section.title == 'Personnel Policies':
personnel_policies.append(policy)
if policy.section.title == 'Student Policies':
student_policies.append(policy)
context['board_policies'] = board_policies
context['community_policies'] = community_policies
context['financial_policies'] = financial_policies
context['general_policies'] = general_policies
context['instructional_policies'] = instructional_policies
context['personnel_policies'] = personnel_policies
context['student_policies'] = student_policies
if request.path == '/board-of-education/policies/policy-review-schedule/':
context['policy_review'] = OrderedDict()
policy_review = (
BoardPolicy
.objects
.filter(deleted=0)
.filter(published=1)
.exclude(subcommittee_review=None)
.exclude(boardmeeting_review=None)
.order_by(
'subcommittee_review',
'section__lft',
'index')
.only(
'pk',
'policy_title',
'index',
'section',
'subcommittee_review',
'boardmeeting_review',
'last_approved',
'related_node',
)
.prefetch_related(
Prefetch(
'section',
queryset=(
BoardPolicySection
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'section_prefix',
)
)
)
)
)
for policy in policy_review:
strdate = '{0}{1}'.format(
policy.subcommittee_review.strftime('%Y%m%d'),
policy.boardmeeting_review.strftime('%Y%m%d'),
)
if strdate not in context['policy_review']:
context['policy_review'][strdate] = {}
context['policy_review'][strdate]['subcommittee_review'] = (
policy.subcommittee_review.strftime('%m/%d/%Y')
)
context['policy_review'][strdate]['boardmeeting_review'] = (
policy.boardmeeting_review.strftime('%m/%d/%Y')
)
context['policy_review'][strdate]['policies'] = []
context['policy_review'][strdate]['policies'].append(policy)
if node.content_type == 'newsyear':
context['newsyears'] = NewsYear.objects.all().order_by('-yearend')
context['news'] = (
News.
objects
.filter(parent__url=request.path)
.filter(deleted=0)
.filter(published=1)
.only(
'title',
'author_date',
'summary',
'url',
)
.prefetch_related(
Prefetch(
'images_newsthumbnail_node',
queryset=(
NewsThumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
newsmonths = [
{'month': 'June', 'news': [], },
{'month': 'May', 'news': [], },
{'month': 'April', 'news': [], },
{'month': 'March', 'news': [], },
{'month': 'February', 'news': [], },
{'month': 'January', 'news': [], },
{'month': 'December', 'news': [], },
{'month': 'November', 'news': [], },
{'month': 'October', 'news': [], },
{'month': 'September', 'news': [], },
{'month': 'August', 'news': [], },
{'month': 'July', 'news': [], },
]
for item in context['news']:
if item.author_date.month == 6:
newsmonths[0]['news'].append(item)
if item.author_date.month == 5:
newsmonths[1]['news'].append(item)
if item.author_date.month == 4:
newsmonths[2]['news'].append(item)
if item.author_date.month == 3:
newsmonths[3]['news'].append(item)
if item.author_date.month == 2:
newsmonths[4]['news'].append(item)
if item.author_date.month == 1:
newsmonths[5]['news'].append(item)
if item.author_date.month == 12:
newsmonths[6]['news'].append(item)
if item.author_date.month == 11:
newsmonths[7]['news'].append(item)
if item.author_date.month == 10:
newsmonths[8]['news'].append(item)
if item.author_date.month == 9:
newsmonths[9]['news'].append(item)
if item.author_date.month == 8:
newsmonths[10]['news'].append(item)
if item.author_date.month == 7:
newsmonths[11]['news'].append(item)
context['newsmonths'] = newsmonths
if request.path == '/schools/' or request.path == '/schools/school-registration-dates/':
schools = (
School
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'pk',
'title',
'school_number',
'building_location',
'schooltype',
'schooloptions',
'website_url',
'scc_url',
'calendar_url',
'donate_url',
'boundary_map',
'url',
'main_phone',
)
.prefetch_related(
Prefetch(
'schooltype',
queryset=(
SchoolType
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'schooloptions',
queryset=(
SchoolOption
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'building_location',
queryset=(
Location
.objects
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.only('title')
)
)
)
)
),
Prefetch(
'images_thumbnail_node',
queryset=(
Thumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
context['elementary_schools_directory'] = []
context['k8_schools_directory'] = []
context['middle_schools_directory'] = []
context['high_schools_directory'] = []
context['charter_schools_directory'] = []
context['community_learning_centers_directory'] = []
for school in schools:
if school.schooltype.title == 'Elementary Schools':
context['elementary_schools_directory'].append(school)
if school.schooltype.title == 'K-8 Schools':
context['k8_schools_directory'].append(school)
if school.schooltype.title == 'Middle Schools':
context['middle_schools_directory'].append(school)
if school.schooltype.title == 'High Schools':
context['high_schools_directory'].append(school)
if school.schooltype.title == 'Charter Schools':
context['charter_schools_directory'].append(school)
if school.schooltype.title == 'Community Learning Centers':
context['community_learning_centers_directory'].append(school)
context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title')
context['school_reg_dates'] = cache.get_or_set('school_reg_dates', updates_school_reg_dates(), 120)
if request.path == '/schools/elementary-schools/':
schools = (
School
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'pk',
'title',
'building_location',
'schooltype',
'schooloptions',
'website_url',
'scc_url',
'calendar_url',
'donate_url',
'boundary_map',
'url',
'main_phone',
)
.prefetch_related(
Prefetch(
'schooltype',
queryset=(
SchoolType
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'schooloptions',
queryset=(
SchoolOption
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'building_location',
queryset=(
Location
.objects
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.only('title')
)
)
)
)
),
Prefetch(
'images_thumbnail_node',
queryset=(
Thumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
context['schools'] = []
for school in schools:
if school.schooltype.title == 'Elementary Schools':
context['schools'].append(school)
context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title')
if request.path == '/schools/k-8-schools/':
schools = (
School
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'pk',
'title',
'building_location',
'schooltype',
'schooloptions',
'website_url',
'scc_url',
'calendar_url',
'donate_url',
'boundary_map',
'url',
'main_phone',
)
.prefetch_related(
Prefetch(
'schooltype',
queryset=(
SchoolType
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'schooloptions',
queryset=(
SchoolOption
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'building_location',
queryset=(
Location
.objects
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.only('title')
)
)
)
)
),
Prefetch(
'images_thumbnail_node',
queryset=(
Thumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
context['schools'] = []
for school in schools:
if school.schooltype.title == 'K-8 Schools':
context['schools'].append(school)
context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title')
if request.path == '/schools/middle-schools/':
schools = (
School
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'pk',
'title',
'building_location',
'schooltype',
'schooloptions',
'website_url',
'scc_url',
'calendar_url',
'donate_url',
'boundary_map',
'url',
'main_phone',
)
.prefetch_related(
Prefetch(
'schooltype',
queryset=(
SchoolType
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'schooloptions',
queryset=(
SchoolOption
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'building_location',
queryset=(
Location
.objects
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.only('title')
)
)
)
)
),
Prefetch(
'images_thumbnail_node',
queryset=(
Thumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
context['schools'] = []
for school in schools:
if school.schooltype.title == 'Middle Schools':
context['schools'].append(school)
context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title')
if request.path == '/schools/high-schools/':
schools = (
School
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'pk',
'title',
'building_location',
'schooltype',
'schooloptions',
'website_url',
'scc_url',
'calendar_url',
'donate_url',
'boundary_map',
'url',
'main_phone',
)
.prefetch_related(
Prefetch(
'schooltype',
queryset=(
SchoolType
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'schooloptions',
queryset=(
SchoolOption
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'building_location',
queryset=(
Location
.objects
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.only('title')
)
)
)
)
),
Prefetch(
'images_thumbnail_node',
queryset=(
Thumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
context['schools'] = []
for school in schools:
if school.schooltype.title == 'High Schools':
context['schools'].append(school)
context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title')
if request.path == '/schools/charter-schools/':
schools = (
School
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'pk',
'title',
'building_location',
'schooltype',
'schooloptions',
'website_url',
'scc_url',
'calendar_url',
'donate_url',
'boundary_map',
'url',
'main_phone',
)
.prefetch_related(
Prefetch(
'schooltype',
queryset=(
SchoolType
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'schooloptions',
queryset=(
SchoolOption
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'building_location',
queryset=(
Location
.objects
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.only('title')
)
)
)
)
),
Prefetch(
'images_thumbnail_node',
queryset=(
Thumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
context['schools'] = []
for school in schools:
if school.schooltype.title == 'Charter Schools':
context['schools'].append(school)
context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title')
if request.path == '/schools/community-learning-centers/':
schools = (
School
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'pk',
'title',
'building_location',
'schooltype',
'schooloptions',
'website_url',
'scc_url',
'calendar_url',
'donate_url',
'boundary_map',
'url',
'main_phone',
)
.prefetch_related(
Prefetch(
'schooltype',
queryset=(
SchoolType
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'schooloptions',
queryset=(
SchoolOption
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'pk',
'title',
)
)
),
Prefetch(
'building_location',
queryset=(
Location
.objects
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.only('title')
)
)
)
)
),
Prefetch(
'images_thumbnail_node',
queryset=(
Thumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
context['schools'] = []
for school in schools:
if school.schooltype.title == 'Community Learning Centers':
context['schools'].append(school)
context['learningoptions'] = SchoolOption.objects.filter(deleted=0).filter(published=1).order_by('title')
if request.path == '/departments/':
all_departments = (
Department
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('title')
.only(
'title',
'building_location',
'url',
'main_phone',
'short_description',
'is_department',
)
.prefetch_related(
Prefetch(
'building_location',
queryset=(
Location
.objects
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.only('title')
)
)
)
)
)
)
)
departments = {
'departments': [],
'programs': [],
}
for department in all_departments:
if department.is_department:
departments['departments'].append(department)
else:
departments['programs'].append(department)
context['departments'] = departments
if node.content_type == 'superintendentmessageyear':
context['messageyears'] = SuperintendentMessageYear.objects.all().order_by('-yearend')
context['superintendent_messages'] = (
SuperintendentMessage
.objects
.filter(parent__url=request.path)
.filter(deleted=0)
.filter(published=1)
.only(
'title',
'author_date',
'summary',
'url',
)
.prefetch_related(
Prefetch(
'images_newsthumbnail_node',
queryset=(
NewsThumbnail
.objects
.only(
'image_file',
'alttext',
'related_node_id',
)
)
)
)
)
messagemonths = [
{'month': 'June', 'message': [], },
{'month': 'May', 'message': [], },
{'month': 'April', 'message': [], },
{'month': 'March', 'message': [], },
{'month': 'February', 'message': [], },
{'month': 'January', 'message': [], },
{'month': 'December', 'message': [], },
{'month': 'November', 'message': [], },
{'month': 'October', 'message': [], },
{'month': 'September', 'message': [], },
{'month': 'August', 'message': [], },
{'month': 'July', 'message': [], },
]
for item in context['superintendent_messages']:
if item.author_date.month == 6:
messagemonths[0]['message'].append(item)
if item.author_date.month == 5:
messagemonths[1]['message'].append(item)
if item.author_date.month == 4:
messagemonths[2]['message'].append(item)
if item.author_date.month == 3:
messagemonths[3]['message'].append(item)
if item.author_date.month == 2:
messagemonths[4]['message'].append(item)
if item.author_date.month == 1:
messagemonths[5]['message'].append(item)
if item.author_date.month == 12:
messagemonths[6]['message'].append(item)
if item.author_date.month == 11:
messagemonths[7]['message'].append(item)
if item.author_date.month == 10:
messagemonths[8]['message'].append(item)
if item.author_date.month == 9:
messagemonths[9]['message'].append(item)
if item.author_date.month == 8:
messagemonths[10]['message'].append(item)
if item.author_date.month == 7:
messagemonths[11]['message'].append(item)
context['messagemonths'] = messagemonths
if request.path == '/departments/communications-and-community-relations/district-logo/':
all_logos = DistrictLogo.objects.filter(deleted=0).filter(published=1).order_by('district_logo_group__lft','district_logo_style_variation__lft')
districtlogos = {
'primary':[],
'primaryrev':[],
'secondary':[],
'secondaryrev':[],
'wordmark':[],
}
for logo in all_logos:
if logo.district_logo_group.title == 'Primary Logo':
districtlogos['primary'].append(logo)
if logo.district_logo_group.title == 'Primary Logo Reversed':
districtlogos['primaryrev'].append(logo)
if logo.district_logo_group.title == 'Secondary Logo':
districtlogos['secondary'].append(logo)
if logo.district_logo_group.title == 'Secondary Logo Reversed':
districtlogos['secondaryrev'].append(logo)
if logo.district_logo_group.title == 'Wordmark':
districtlogos['wordmark'].append(logo)
context['districtlogos'] = districtlogos
if node.content_type == 'department':
context['department_children'] = (
Department
.objects
.filter(deleted=0)
.filter(published=1)
.filter(parent__url=request.path)
.order_by('title')
.only(
'pk',
'title',
'short_description',
'main_phone',
'building_location',
'content_type',
'menu_title',
'url',
)
.prefetch_related(
Prefetch(
'building_location',
queryset=(
Location
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'street_address',
'location_city',
'location_state',
'location_zipcode',
'google_place',
)
.prefetch_related(
Prefetch(
'location_city',
queryset=(
City
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
),
Prefetch(
'location_state',
queryset=(
State
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
),
Prefetch(
'location_zipcode',
queryset=(
Zipcode
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
if request.path == '/directory/':
context['people'] = (
Employee
.objects
.filter(is_active=1)
.filter(is_staff=1)
.filter(in_directory=1)
.order_by('last_name')
.only(
'pk',
'last_name',
'first_name',
'job_title',
'email',
'department',
)
.prefetch_related(
Prefetch(
'department',
queryset=(
Node
.objects
.only(
'node_title',
'url',
)
)
)
)
)
if request.path.startswith('/directory/last-name-'):
letter = request.path[-2]
context['people'] = (
Employee
.objects
.filter(is_active=1)
.filter(is_staff=1)
.filter(in_directory=1)
.filter(last_name__istartswith=letter)
.order_by('last_name')
.only(
'pk',
'last_name',
'first_name',
'job_title',
'email',
'department',
)
.prefetch_related(
Prefetch(
'department',
queryset=(
Node
.objects
.only(
'node_title',
'url',
)
)
)
)
)
if node.content_type == 'districtcalendaryear':
context['districtcalendaryears'] = (
DistrictCalendarYear
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('-yearend')
)
context['districtcalendarevents'] = (
DistrictCalendarEvent
.objects
.filter(deleted=0)
.filter(published=1)
.filter(parent__url=request.path)
)
if node.content_type == 'boardmeetingyear':
context['board_meeting_years'] = (
BoardMeetingYear
.objects
.filter(deleted=0)
.filter(published=1)
.order_by('-yearend')
)
context['board_meetings'] = (
BoardMeeting
.objects
.filter(deleted=0)
.filter(published=1)
.filter(parent__url=request.path)
.order_by('-startdate')
)
if (
request.method != 'POST' and
(
request.path == '/contact-us/' or
request.path == '/contact-us/inline/'
)
):
context['form'] = contactmessage_get(request)
context['from_page'] = (
commonfunctions
.nodefindobject(
Node.objects
.get(pk=context['form'].fields['parent'].initial)
)
)
try:
context['in_this_section'] = (
node
.get_ancestors(ascending=True)
.filter(
deleted=0,
published=1,
pagelayout__namespace='site-section.html'
).first()
.get_children()
.filter(
node_type='pages',
content_type='page',
published=1,
deleted=0,
)
.order_by('page__page__inline_order')
)
if not commonfunctions.is_siteadmin(request):
context['in_this_section'] = context['in_this_section'].filter(section_page_count__gte=1)
except AttributeError:
pass
return context
def process_post(request):
if (
request.method == 'POST' and
(
request.path == '/contact-us/' or
request.path == '/contact-us/inline/'
)
):
post = contactmessage_post(request)
return post.parent.url
def contactmessage_post(request):
form = ContactMessageForm(request.POST)
if form.is_valid():
if request.user.is_anonymous:
user = User.objects.get(username='AnonymousUser')
else:
user = User.objects.get(pk=request.user.pk)
post = form.save(commit=False)
message_parent = Node.objects.get(pk=post.parent.pk)
if post.primary_contact == '':
if message_parent.primary_contact:
post.primary_contact = message_parent.primary_contact
else:
post.primary_contact = request.site.dashboard_general_site.primary_contact
post.create_user = user
post.update_user = user
post.site = request.site
post.searchable = False
post.remote_addr = request.META['HTTP_X_FORWARDED_FOR']
post.user_agent = request.META['HTTP_USER_AGENT']
post.http_headers = json.dumps(request.META, default=str)
if not post.our_message:
post.save()
messages.success(
request,
'Thank you for contacting us. '
'Someone will get back to you shortly.')
else:
messages.error(
request,
'Something was wrong with your message. Please try again.')
return post
def contactmessage_get(request):
form = ContactMessageForm()
try:
if request.GET['pid']:
form.fields['parent'].initial = request.GET['pid']
except:
form.fields['parent'].initial = commonfunctions.get_contactpage(request)
try:
if request.GET['cid']:
form.fields['primary_contact'].initial = request.GET['cid']
except:
try:
form.fields['primary_contact'].initial = str(Node.objects.get(pk=form.fields['parent'].initial).primary_contact.pk)
except:
try:
form.fields['primary_contact'].initial = str(request.site.dashboard_general_site.primary_contact.pk)
except:
form.fields['primary_contact'].initial = str(User.objects.get(username='[email protected]').pk)
try:
message_to = User.objects.get(
pk=form.fields['primary_contact'].initial
)
except User.DoesNotExist:
message_to = User.objects.get(
username='[email protected]',
)
form.fields['message_to'].initial = '{0} {1}'.format(
message_to.first_name,
message_to.last_name,
)
form.fields['message_to'].disabled = True
return form
# def contact(request):
# template = 'cmstemplates/www_slcschools_org/pagelayouts/contact-us.html'
# context = {}
# context['page'] = get_object_or_404(Page, url=request.path)
# context['pageopts'] = context['page']._meta
# if request.method == "POST":
# post = contactmessage_post(request)
# return redirect(post.parent.url)
# else:
# context['form'] = contactmessage_get(request)
# context['from_page'] = commonfunctions.nodefindobject(Node.objects.get(pk=context['form'].fields['parent'].initial))
# return render(request, template, context)
# def contact_inline(request):
# template = 'cmstemplates/www_slcschools_org/blocks/contact-us-inline.html'
# context = {}
# context['page'] = get_object_or_404(Page, url=request.path)
# context['pageopts'] = context['page']._meta
# if request.method == "POST":
# post = contactmessage_post(request)
# return redirect(post.parent.url)
# else:
# context['form'] = contactmessage_get(request)
# context['from_page'] = commonfunctions.nodefindobject(Node.objects.get(pk=context['form'].fields['parent'].initial))
# return render(request, template, context)
def node_lookup(request):
if redirect_request(request) is not None:
return redirect_request(request)
try:
if request.path == '/':
node = Node.objects.get(url='/home/', site=request.site)
else:
node = Node.objects.get(url=request.path, site=request.site)
except Node.DoesNotExist:
raise Http404('Page not found.')
Model = apps.get_model(node.node_type, node.content_type)
if node.pagelayout.namespace == 'site-section.html':
first_child = (
node
.get_children()
.filter(
node_type='pages',
content_type='page',
deleted=0,
published=1
)
.exclude(
pagelayout__namespace='site-section.html'
)
.order_by('page__page__inline_order')
.first()
)
if first_child:
return redirect(first_child.url)
else:
if not commonfunctions.is_siteadmin(request):
raise Http404('Page not found.')
if node.pagelayout.namespace == 'disclosure-document.html':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.pagelayout.namespace == 'school-community-council-meeting-agenda.html':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.pagelayout.namespace == 'school-community-council-meeting-minutes.html':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.node_type == 'pages':
if request.method == 'POST':
return redirect(process_post(request))
template = set_template(request, node)
context = {}
context['page'] = (Model
.objects
.filter(pk=node.pk)
)
fields = context['page'].model._meta.get_fields(include_hidden=True)
# Add prefetch function calls here
if 'building_location' in fields:
context['page'] = (
prefetch_building_location_detail(context['page'])
)
context['page'] = prefetch_contentbanner_detail(context['page'])
context['page'] = prefetch_actionbuttons_detail(context['page'])
context['page'] = prefetch_boardmembers_detail(context['page'])
context['page'] = prefetch_studentboardmember_detail(context['page'])
context['page'] = prefetch_schooladministrators_detail(context['page'])
context['page'] = prefetch_administrators_detail(context['page'])
context['page'] = prefetch_staff_detail(context['page'])
context['page'] = prefetch_resourcelinks_detail(context['page'])
context['page'] = prefetch_documents_detail(context['page'])
context['page'] = prefetch_disclosuredocuments_detail(context['page'])
context['page'] = prefetch_subpage_detail(context['page'])
context['page'] = prefetch_announcement_detail(context['page'])
context['page'] = prefetch_schooladministration_detail(context['page'])
context['page'] = prefetch_schoolstaff_detail(context['page'])
context['page'] = prefetch_schoolfaculty_detail(context['page'])
context['page'] = prefetch_subjectgradelevel_detail(context['page'])
context['page'] = prefetch_schoolcommunitycouncilmeetings_detail(context['page'])
context['page'] = prefetch_schoolcommunitycouncilmembers_detail(context['page'])
# Add additional context here
context = add_additional_context(request, context, node)
# Change Queryset into object
context['page'] = context['page'].first()
context['pageopts'] = context['page']._meta
context['section'] = context['page'].get_ancestors(ascending=True).filter(
deleted=0,
published=1,
pagelayout__namespace='site-section.html'
).first()
if context['section']:
context['section'] = commonfunctions.nodefindobject(context['section'])
context['sectionopts'] = context['section']._meta
return render(request, template, context)
if node.node_type == 'documents':
if node.content_type == 'document':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.content_type == 'boardpolicy':
return HttpResponse(status=200)
if node.content_type == 'policy':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.content_type == 'administrativeprocedure':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.content_type == 'supportingdocument':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.content_type == 'boardmeetingagenda':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.content_type == 'boardmeetingminutes':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_file_node',
queryset=(
File
.objects
.filter(deleted=0)
.filter(published=1)
.order_by(
'file_language__lft',
'file_language__title',
)
.only(
'title',
'file_file',
'file_language',
'related_node',
)
.prefetch_related(
Prefetch(
'file_language',
queryset=(
Language
.objects
.filter(deleted=0)
.filter(published=1)
.only('title')
)
)
)
)
)
)
)
item = item.first()
if item.files_file_node.all().count() == 1:
return redirect(item.files_file_node.first().url)
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.content_type == 'boardmeetingaudio':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_audiofile_node',
queryset=(
AudioFile
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'title',
'file_file',
'related_node',
)
)
)
)
)
item = item.first()
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.content_type == 'boardmeetingvideo':
item = (
Model
.objects
.filter(pk=node.pk)
.prefetch_related(
Prefetch(
'files_videofile_node',
queryset=(
VideoFile
.objects
.filter(deleted=0)
.filter(published=1)
.only(
'title',
'file_file',
'related_node',
)
)
)
)
)
item = item.first()
template = set_template(request, node)
context = {}
context['page'] = item
context['pageopts'] = context['page']._meta
return render(request, template, context)
if node.content_type == 'boardmeetingexhibit':
return HttpResponse(status=200)
if node.content_type == 'boardmeetingagendaitem':
return HttpResponse(status=200)
if node.node_type == 'files':
item = (
Model
.objects
.get(pk=node.pk)
)
response = HttpResponse()
response['Content-Type'] = ''
response['X-Accel-Redirect'] = item.file_file.url
response['Content-Disposition'] = 'filename={0}'.format(
item.file_name()
)
return response
if node.node_type == 'images':
item = (
Model
.objects
.get(pk=node.pk)
)
response = HttpResponse()
response['Content-Type'] = ''
response['X-Accel-Redirect'] = item.image_file.url
response['Content-Disposition'] = 'filename={0}'.format(
item.file_name()
)
return response
if node.node_type == 'directoryentries':
if node.content_type == 'schoolfaculty':
template = set_template(request, node)
context = {}
context['page'] = (
Model
.objects
.filter(deleted=0)
.filter(published=1)
.filter(pk=node.pk)
)
context['page'] = prefetch_disclosuredocuments_detail(context['page'])
context['page'] = prefetch_classwebsite_detail(context['page'])
context = add_additional_context(request, context, node)
context['page'] = context['page'].first()
context['pageopts'] = context['page']._meta
context['section'] = context['page'].get_ancestors(ascending=True).filter(
deleted=0,
published=1,
pagelayout__namespace='site-section.html'
).first()
if context['section']:
context['section'] = commonfunctions.nodefindobject(context['section'])
context['sectionopts'] = context['section']._meta
return render(request, template, context)
return HttpResponse(status=404)
|
python
|
"""
After implementation of DIP (Dependency Inversion Principle).
The dependency (Database or CSV) is now injected in the Products class via a repository of type Database or CSV.
A factory is used to create a Database or CSV repository.
"""
class ProductRepository:
@staticmethod
def select_products():
raise NotImplementedError
class DatabaseProductRepository(ProductRepository):
@staticmethod
def select_products():
"""Mock data retrieval from a database."""
return ['Laptop', 'Car']
class CSVProductRepository(ProductRepository):
@staticmethod
def select_products():
"""Mock data retrieval from a CSV file."""
return ['TV', 'Radio']
class ProductFactory:
@staticmethod
def create(repo_type):
if repo_type == 'DB':
return DatabaseProductRepository()
else:
return CSVProductRepository()
class Products:
def __init__(self, repo: ProductRepository):
self._products = []
self.repo = repo
def get_products(self):
self._products = self.repo.select_products()
@property
def products(self):
return self._products
if __name__ == '__main__':
product_repo = ProductFactory.create('DB')
products = Products(product_repo)
products.get_products()
print(products.products)
product_repo = ProductFactory.create('CSV')
products = Products(product_repo)
products.get_products()
print(products.products)
|
python
|
##
import os
from setuptools import setup, find_namespace_packages
from pkg_resources import get_distribution, DistributionNotFound
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup (
name = "namespace.HeardBox",
use_scm_version = True,
setup_requires=['setuptools_scm'],
version = "0.0.1",
author = "Conor Edwards",
author_email = "[email protected]",
description = ("Interface the Heard Proteomic Database with Python"),
url = "http://wwww.github.com/ConorEd/HeardBox",
# packages['HeardBox', 'tests'],
#license = "BSD",
keywords = "Uniprot Excel Interface, Bioinformatics, Quality of life improvement, BLAST, ALIGN, GO, Excel",
long_description=read("README.txt"),
classifiers=[
"Development Sttus :: 2 - Pre-Alpha",
"Topic :: Science",
"License :: OSI Approved :: BSD License",
],
package_dir={'': 'src'},
packages=find_namespace_packages(where='src'),
install_requires=['xlwings', 'biopython', 'uniprot_tools'],
entry_points={
'console_scripts': [
'main = HeardBox.main:main_func',
#'ext_mod = HeardBox._mod:some_func',
]
}
)
#├── setup.py
#├── src
#│ └── namespace
#│ └── mypackage
#│ ├── __init__.py
#│ └── mod1.py
#└── tests
# └── test_mod1.py
|
python
|
"""Regression."""
from ._linear_regression import LinearRegression
from ._neighbors_regression import (
KNeighborsRegressor,
RadiusNeighborsRegressor,
)
|
python
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
# >.>.>.>.>.>.>.>.>.>.>.>.>.>.>.>.
# Licensed under the Apache License, Version 2.0 (the "License")
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# --- File Name: parser.py
# --- Creation Date: 24-02-2020
# --- Last Modified: Tue 25 Feb 2020 16:26:55 AEDT
# --- Author: Xinqi Zhu
# .<.<.<.<.<.<.<.<.<.<.<.<.<.<.<.<
"""
Parser for VP metrics
"""
import argparse
def init_parser():
parser = argparse.ArgumentParser(description='VP metrics.')
parser.add_argument('--result_dir',
help='Results directory.',
type=str,
default='/mnt/hdd/repo_results/VP-metrics-pytorch')
parser.add_argument('--data_dir',
help='Dataset directory.',
type=str,
default='/mnt/hdd/Datasets/test_data')
parser.add_argument('--no_gpu',
help='Do not use GPUs.',
action='store_true')
parser.add_argument('--in_channels',
help='Num channels for model input.',
type=int,
default=6)
parser.add_argument('--out_dim',
help='Num output dimension.',
type=int,
default=7)
parser.add_argument('--lr',
help='Learning rate.',
type=float,
default=0.01)
parser.add_argument('--batch_size',
help='Batch size.',
type=int,
default=32)
parser.add_argument('--epochs',
help='Num epochs to train.',
type=int,
default=60)
parser.add_argument('--input_mode',
help='Input mode for model.',
type=str,
default='concat',
choices=['concat', 'diff'])
parser.add_argument('--test_ratio',
help='Test set ratio.',
type=float,
default=0.5)
parser.add_argument('--workers', help='Num workers.', type=int, default=4)
return parser
|
python
|
# Generated by Django 3.2.6 on 2021-08-27 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('university', '0006_representative'),
]
operations = [
migrations.AddField(
model_name='course',
name='slug_unimi',
field=models.CharField(max_length=200, null=True, unique=True),
),
migrations.AlterField(
model_name='department',
name='name',
field=models.CharField(max_length=128, unique=True, verbose_name='name'),
),
migrations.AlterUniqueTogether(
name='degree',
unique_together={('name', 'type')},
),
]
|
python
|
from utils import hashable_boundaries, predicted_len
from objects import Database
from collections import defaultdict
from math import ceil
import gen_spectra
def modified_sort_masses_in_sorted_keys_b(db_dict_b,mz,kmer_list_b):
kmers = db_dict_b[mz]
kmer_list_b += kmers
def modified_sort_masses_in_sorted_keys_y(db_dict_y,mz,kmer_list_y):
kmers = db_dict_y[mz]
kmer_list_y += kmers
def handle_sorting_keys(db_dict_b, db_dict_y, kmer_list):
sorted_b_keys = sorted(db_dict_b.keys())
sorted_y_keys = sorted(db_dict_y.keys())
for mz in sorted_b_keys:
modified_sort_masses_in_sorted_keys_b(db_dict_b,mz,kmer_list)
for mz in sorted_y_keys:
modified_sort_masses_in_sorted_keys_y(db_dict_y,mz,kmer_list)
def modified_add_all(kmer, prot_name,db_dict_b,db_dict_y,kmer_set,start_location,end_location,protein_number):
for ion in 'by':
for charge in [1, 2]:
pre_spec = gen_spectra.gen_spectrum(kmer, ion=ion, charge=charge)
spec = pre_spec
if isinstance(pre_spec,dict):
spec = pre_spec.get('spectrum')
for i, mz in enumerate(spec):
start_position = start_location if ion == 'b' else end_location
end_position = start_position + i if ion == 'b' else end_location - i
kmer_to_add = kmer[:i+1] if ion == 'b' else kmer[-i-1:]
r_d = db_dict_b if ion == 'b' else db_dict_y
# r_d[mz].add(kmer_to_add)
if ion == 'b':
r_d[mz].add((mz, protein_number, kmer_to_add, str(start_position) + '-' + str(end_position), ion, charge))
else:
r_d[mz].add((mz, protein_number, kmer_to_add, str(end_position) + '-' + str(start_position), ion, charge))
kmer_set[kmer_to_add].append(prot_name)
def make_database_set_for_protein(i,plen,max_len,prot_entry,prot_name,db_dict_b,db_dict_y,kmer_set):
print(f'\rOn protein {i+1}/{plen} [{int((i+1) * 100 / plen)}%]', end='')
start = 1
stop = max_len
for j in range(start, stop):
kmer = prot_entry.sequence[:j]
start_position = 1
end_position = j
modified_add_all(kmer, prot_name, db_dict_b,db_dict_y,kmer_set, start_position, end_position, i)
start = 0
stop = len(prot_entry.sequence) - max_len
for j in range(start, stop):
kmer = prot_entry.sequence[j:j+max_len]
start_position = j + 1
end_position = j + max_len
modified_add_all(kmer, prot_name, db_dict_b,db_dict_y,kmer_set,start_position, end_position, i)
start = len(prot_entry.sequence) - max_len
stop = len(prot_entry.sequence)
for j in range(start, stop):
kmer = prot_entry.sequence[j:]
start_position = j+1
end_position = len(prot_entry.sequence)
modified_add_all(kmer, prot_name,db_dict_b,db_dict_y,kmer_set,start_position, end_position, i)
def make_database_set_for_proteins(proteins,max_len,db_dict_b,db_dict_y,kmer_set):
plen = len(proteins)
for i, (prot_name, prot_entry) in enumerate(proteins):
make_database_set_for_protein(i,plen,max_len,prot_entry,prot_name,db_dict_b,db_dict_y,kmer_set)
def modified_make_database_set(proteins: list, max_len: int):
db_dict_b = defaultdict(set)
db_dict_y = defaultdict(set)
kmer_set = defaultdict(list)
make_database_set_for_proteins(proteins,max_len,db_dict_b,db_dict_y,kmer_set)
print('\nSorting the set of protein masses...')
kmer_list = []
handle_sorting_keys(db_dict_b, db_dict_y, kmer_list)
kmer_list = sorted(kmer_list, key=lambda x: x[0])
print('Sorting the set of protein masses done')
return kmer_list, kmer_set
def in_bounds(int1, interval):
if int1 >= interval[0] and int1 <= interval[1]:
return True
else:
return False
def modified_merge(kmers, boundaries: dict):
matched_masses_b, matched_masses_y = defaultdict(list), defaultdict(list)
#Goal: b and y dictionaries mapping mz values to lists of kmers that have a mass within the tolerance
# kmers = make_database_set(db.proteins, max_len)
mz_mapping = dict()
for i,mz in enumerate(boundaries):
mz_mapping[i] = boundaries[mz]
boundary_index, kmer_index, starting_point = 0,0,0
while (boundary_index < len(boundaries)) and (kmer_index < len(kmers)):
#idea is to increment kmer index when mass is too small for boundaries[0] and then stop when mass is too big for boundaries[1]
target_kmer = kmers[kmer_index]
target_boundary = mz_mapping[boundary_index]
if in_bounds(target_kmer[0], target_boundary):
if target_kmer[4] == 'b':
hashable_boundary = hashable_boundaries(target_boundary)
matched_masses_b[hashable_boundary].append(target_kmer)
kmer_index = kmer_index + 1
if target_kmer[4] == 'y':
hashable_boundary = hashable_boundaries(target_boundary)
matched_masses_y[hashable_boundary].append(target_kmer)
kmer_index = kmer_index + 1
elif target_kmer[0] < target_boundary[0]:
kmer_index = kmer_index + 1
starting_point = starting_point + 1
else: #target_kmer > target_boundary[1]
boundary_index = boundary_index + 1
kmer_index = starting_point
return matched_masses_b, matched_masses_y
# def modified_add_matched_to_matched_set(matched_masses_b_batch,matched_masses_b,kmer_set,batch_kmer_set,matched_masses_y_batch,matched_masses_y):
# for k, v in matched_masses_b_batch.items():
# matched_masses_b[k] += v
# for kmer in v:
# kmer_set[kmer] += batch_kmer_set[kmer]
# for k, v in matched_masses_y_batch.items():
# matched_masses_y[k] += v
# for kmer in v:
# kmer_set[kmer] += batch_kmer_set[kmer]
def modified_match_masses_per_protein(kv_prots,max_len,boundaries,kmer_set):
extended_kv_prots = [(k, entry) for (k, v) in kv_prots for entry in v]
kmers, kmer_set = modified_make_database_set(extended_kv_prots, max_len)
# check_for_y_kmers(kmers)
matched_masses_b, matched_masses_y = modified_merge(kmers, boundaries)
# modified_add_matched_to_matched_set(matched_masses_b,kmer_set,kmers,matched_masses_y)
return matched_masses_b, matched_masses_y, kmer_set
def modified_match_masses(boundaries: dict, db: Database, max_pep_len: int):
# matched_masses_b, matched_masses_y, kmer_set = defaultdict(list), defaultdict(list), defaultdict(list) #Not sure this is needed
max_boundary = max(boundaries.keys())
estimated_max_len = ceil(boundaries[max_boundary][1] / 57.021464)
max_len = min(estimated_max_len, max_pep_len)
kv_prots = [(k, v) for k, v in db.proteins.items()]
matched_masses_b, matched_masses_y, kmer_set = modified_match_masses_per_protein(kv_prots,max_len,boundaries,db)
return (matched_masses_b, matched_masses_y, kmer_set)
def check_for_y_kmers(kmers):
for i, kmer in enumerate(kmers):
if kmer[4] == 'y':
print("FOUND at:", i, kmer)
|
python
|
"""This module contains the support resources for the two_party_negotiation protocol."""
|
python
|
"""
Manages updateing info-channels (Currently only `log` channel)
"""
from typing import Union
import discord
from discord.ext import commands
class InfoChannels(commands.Cog):
def __init__(self, bot: commands.Bot):
print('Loading InfoChannels module...', end='')
self.bot = bot
self.guild_config_cog = bot.get_cog('GuildConfigCog')
self.allowed_mentions = discord.AllowedMentions(everyone=False, users=False, roles=False)
print(' Done')
@commands.Cog.listener()
async def on_member_ban(self, guild: discord.Guild, user: Union[discord.User, discord.Member]):
guild_config = await self.guild_config_cog.get_guild(guild)
log_channel = await guild_config.get_log_channel()
if log_channel:
await log_channel.send('{} was banned'.format(str(user)))
@commands.Cog.listener()
async def on_member_unban(self, guild: discord.Guild, user: discord.User):
guild_config = await self.guild_config_cog.get_guild(guild)
log_channel = await guild_config.get_log_channel()
if log_channel:
await log_channel.send('{} was unbanned'.format(str(user)))
def setup(bot: commands.Bot):
bot.add_cog(InfoChannels(bot))
|
python
|
print("""
042) Refaça o DESAFIO 035 dois triângulos, acrescentando o recurso de mostrar que
tipo de triângulo será formado:
- Equilátero: todos os lados iguais
- Isósceles: dois lados iguais
- Escaleno: todos os lados diferentes
""")
print("""
Este programa recebe o valor de três retas e verifica se, com elas,
é possível formar um triângulo. Para isso é importante saber que a
a soma de quaisquer dois lados de um triângulo deve, SEMPRE, ser
maior que o terceiro lado restante.
""")
L1 = float(input('Digite o comprimento da primeira reta: '))
L2 = float(input('Digite o comprimento da segunda reta: '))
L3 = float(input('Digite o comprimento da terceira reta: '))
if L1 < L2 + L3 and L2 < L1 + L3 and L3 < L1 + L2:
print('Os segmentos formam um triângulo: ')
if L1 == L2 == L3:
print('O triângulo possui os três lados iguais, portanto ele é EQUILÁTERO')
elif L1 == L2 or L1 == L3 or L2 == L3:
print('O triângulo possui dois lados iguais, portanto ele é ISÓSCELES')
else:
print('O triângulo possui dos três lados diferentes, porém ele é ESCALENO')
else:
print('Os segmentos não formam um triângulo')
|
python
|
import unittest
import hcl2
from checkov.common.models.enums import CheckResult
from checkov.terraform.checks.resource.aws.AppLoadBalancerTLS12 import check
class TestAppLoadBalancerTLS12(unittest.TestCase):
def test_failure(self):
resource_conf = {'load_balancer_arn': ['${aws_lb.examplea.arn}'], 'port': ['443'], 'protocol': ['HTTPS'], 'ssl_policy': ["ELBSecurityPolicy-2016-08"],
'default_action': [{'type': ['forward'], 'target_group_arn': ['${aws_lb_target_group.examplea.arn}'] }]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
resource_conf = {
'load_balancer_arn': [
'${aws_lb.examplea.arn}'
],
'port': ['443'],
'protocol': ['HTTPS'],
'ssl_policy': ["ELBSecurityPolicy-TLS-1-2-2017-01"],
'default_action': [
{
'type': ['forward'],
'target_group_arn': [
'${aws_lb_target_group.examplea.arn}'
]
}
]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_nlb_tls_success(self):
resource_conf = {
'load_balancer_arn': [
'${aws_lb.example.arn}'
],
'port': ['443'],
'protocol': ['TLS'],
'ssl_policy': ["ELBSecurityPolicy-FS-1-2-Res-2019-08"],
'default_action': [
{
'type': ['forward'],
'target_group_arn': [
'${aws_lb_target_group.example.arn}'
]
}
]
}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_redirect(self):
hcl_res = hcl2.loads("""
resource "aws_lb_listener" "http" {
load_balancer_arn = aws_lb.public.arn
port = "80"
protocol = "HTTP"
default_action {
redirect {
port = "443"
protocol = "HTTPS"
status_code = "HTTP_301"
}
type = "redirect"
}
}
""")
resource_conf = hcl_res['resource'][0]['aws_lb_listener']['http']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
unittest.main()
|
python
|
class Solution:
def longestValidParenthesesSlow(self, s: str) -> int:
stack, ans = [-1], 0
for i in range(len(s)):
if s[i] == ")" and len(stack) > 1 and s[stack[-1]] == "(":
stack.pop()
ans = max(ans, i - stack[-1])
else:
stack.append(i)
return ans
def longestValidParenthesesFast(self, s: str) -> int:
left, right, ans = 0, 0, 0
for c in s:
if c == "(":
left += 1
else:
right += 1
if left == right:
ans = max(ans, right * 2)
elif left < right:
left, right = 0, 0
left, right = 0, 0
for c in s[::-1]:
if c == "(":
left += 1
else:
right += 1
if left == right:
ans = max(ans, left * 2)
elif left > right:
left, right = 0, 0
return ans
# TESTS
for s, expected in [
("(()", 2),
(")()())", 4),
("", 0),
("()(()", 2),
("()(())", 6),
("((()()", 4),
("((())", 4),
("))(())", 4),
("()(()()", 4),
]:
sol = Solution()
actual1 = sol.longestValidParenthesesSlow(s)
actual2 = sol.longestValidParenthesesFast(s)
print("Longest valid parentheses in", s, "->", actual1)
assert actual1 == expected and actual2 == expected
|
python
|
from flask import Flask, redirect
from flask_fileupload import FlaskFileUpload
from flask_login import LoginManager, UserMixin, login_user, logout_user
app = Flask(__name__)
app.config.from_object("config")
lm = LoginManager(app)
fue = FlaskFileUpload(app)
class User(UserMixin):
def __init__(self, user_id):
self.id = user_id
@lm.user_loader
def load_user(user_id):
return User(user_id)
@app.route("/login/")
def login():
user = User("testuser")
login_user(user)
return redirect("/upload")
@app.route("/logout/")
def logout():
logout_user()
return redirect("/")
|
python
|
"""
This module implements graph/sparse matrix partitioning inspired by Gluon.
The goal of this module is to allow simple algorithms to be written against the current sequential implementation,
and eventually scale (without changes) to parallel or even distributed partitioning.
As such, many functions guarantee much less than they currently provide
(e.g., `~PartitioningAlgorithm.get_edge_master` is only guaranteed to see it's source and destination in `~PartitioningAlgorithm.vertex_masters` even though the initial sequential implementation will actually provide all masters.)
The partitioner uses two functions getVertexMaster and getEdgeMaster similar to those used by Gluon, but also provides access to vertex attributes like position.
The `~PartitioningAlgorithm.get_vertex_master` function selects the master based on vertex attributes or more typical graph properties.
The `~PartitioningAlgorithm.get_edge_master` function selects the master based on edge properties and the masters selected for the endpoints.
The partitioner also takes a `~PartitioningAlgorithm.neighborhood_size` parameter which specifies how far away from each vertex proxies are needed.
Edge proxies are included for all edges between vertices present on each node (either as master or as a proxy).
This module will work just like normal Gluon if neighborhood size is 1.
For vertex position based partitioning, we can just assign the node masters based on position and set an appropriate neighborhood.
For your sweeps algorithm, set neighborhood size to 2 and assign masters as needed.
"""
from abc import abstractmethod, ABCMeta, abstractproperty
from collections import namedtuple
from typing import Sequence, Set
# FIXME: This load of numpy will cause problems if it needs to be multiloaded
# from parla import multiload
# with multiload():
import numpy as np
import scipy.sparse
__all__ = [
"VertexID",
"PartitionID",
"GraphProperties",
"PartitioningAlgorithm"
]
VertexID = int
PartitionID = int
class GraphProperties:
def __init__(self, A: scipy.sparse.spmatrix):
"""
Compute the graph properties of `A`. This is called by the `PartitioningAlgorithm` framework.
"""
assert A.shape[0] == A.shape[1], "Parla only support partitioning homogeneous graphs with square edge matrices."
self.A = A
"""
The edge matrix of the graph.
"""
self.n_vertices = A.shape[0]
"""
The number of vertices.
"""
self.n_edges = A.count_nonzero()
"""
The number of edges.
"""
nonzeros = (A != 0)
# TODO: There MUST be a better way to do this.
self.in_degree = nonzeros.sum(0).A.flatten()
"""
A dense array containing the in degree of each vertex.
"""
self.out_degree = nonzeros.sum(1).A.flatten()
"""
A dense array containing the out degree of each vertex.
"""
class Partition(namedtuple("Partition", ("edges", "vertex_global_ids", "vertex_masters", "edge_masters"))):
"""
An instance of `Partition` contains all of the data available to a specific partition.
"""
edges: scipy.sparse.spmatrix
"""
A sparse matrix containing all edges in this partition (both master copies and proxies).
"""
vertex_global_ids: np.ndarray
"""
A dense array of the global IDs of each vertex which is available locally (as a master copy or a proxy).
In other words, this array is a mapping from local ID to global ID for all vertices what exist locally.
The global IDs are always in ascending order.
"""
vertex_masters: np.ndarray
"""
An array of the master partitions for every vertex.
"""
edge_masters: scipy.sparse.spmatrix
"""
A sparse matrix of the master partitions for all locally available edges.
The structure of this sparse matrix is identical to `edges`.
"""
class PartitioningAlgorithm(metaclass=ABCMeta):
graph_properties: GraphProperties
"""
The `GraphProperties` of the graph bring partitioned.
What data is available in it depends on the context in which it is accessed.
"""
vertex_masters: np.ndarray
"""
The vertex masters that have already been assigned.
This data structure is not sequential consistent.
See `get_vertex_master` and `get_edge_master` for information about what elements are guaranteed to be up to date during those calls.
"""
# ... user-defined state ...
@abstractproperty
def n_partitions(self) -> int:
"""
:return: The number of partitions this partitioner will create. (All partition IDs must be 0 < id < `n_partitions`)
"""
pass
@abstractproperty
def neighborhood_size(self) -> int:
"""
:return: The number of neighboring proxy vertices to include in each partition.
Must be >= 0. A value of 0 will result in no proxies at all.
"""
pass
@abstractmethod
def get_vertex_master(self, vertex_id: VertexID) -> PartitionID:
"""
Compute the master partition ID for a vertex.
This function may use `graph_properties` and the metadata of the specific vertex.
:param vertex_id: The global ID of the vertex.
:return: The master partition ID for the vertex.
"""
pass
@abstractmethod
def get_edge_master(self, src_id: VertexID, dst_id: VertexID) -> PartitionID:
"""
Compute the master partition ID for the specified edge.
This function may use `vertex_masters`, but the only elements guaranteed to be present are `src_id` and `dst_id`.
This function may use `graph_properties` freely.
:param src_id: The global ID of the source vertex
:param dst_id: The global ID of the target vertex
:return: The master partition ID for the edge.
"""
pass
def partition(self, A: scipy.sparse.spmatrix, edge_matrix_type=scipy.sparse.csr_matrix) -> Sequence[Partition]:
"""
Partition `A`.
This operation mutates `self` and hence is not thread-safe.
Some implementation of this may be internally parallel.
:param A: The complete sparse edge matrix.
:param edge_matrix_type: The type of edge matrix to build for each partition.
This is used for both `Partition.edges` and `Partition.edge_masters`.
:return: A sequence of `Partition` objects in ID order.
"""
n_parts = self.n_partitions
neighborhood_size = self.neighborhood_size
self.graph_properties = GraphProperties(A)
self.vertex_masters = np.empty(shape=(self.graph_properties.n_vertices,), dtype=int)
self.vertex_masters[:] = -1
edge_masters = scipy.sparse.csr_matrix(A.shape, dtype=int)
partition_vertices: Sequence[Set[int]] = [set() for _ in range(n_parts)]
# partition_n_edges = np.zeros(shape=(n_parts,), dtype=int)
n, m = A.shape
assert n == m, "Parla only support partitioning homogeneous graphs with square edge matrices."
# Assign vertex masters
for i in range(n):
master = self.get_vertex_master(i)
assert master >= 0 and master < n_parts, f"partition {master} is invalid ({n_parts} partitions)"
self.vertex_masters[i] = master
# TODO: This does not yet implement neighborhood > 1
# Assign edge owners
# TODO:PERFORMANCE: Iterate values without building index lists?
for (i, j) in zip(*A.nonzero()):
owner = self.get_edge_master(i, j)
assert owner >= 0 and owner < n_parts, f"partition {owner} is invalid ({n_parts} partitions)"
# partition_n_edges[owner] += 1
partition_vertices[owner].add(i)
partition_vertices[owner].add(j)
# Build id maps
partition_global_ids = [np.array(sorted(vs)) for vs in partition_vertices]
# Construct in a efficiently updatable form (LiL)
# TODO:PERFORMANCE: It would be more efficient to build directly in CSR or the appropriate output format.
partition_edges = [scipy.sparse.lil_matrix((m.shape[0], m.shape[0])) for m in partition_global_ids]
for (i, j) in zip(*A.nonzero()):
owner = self.get_edge_master(i, j)
assert owner >= 0 and owner < n_parts, f"partition {owner} is invalid ({n_parts} partitions)"
global_ids = partition_global_ids[owner]
# TODO:PERFORANCE: Use a reverse index?
partition_edges[owner][global_ids.searchsorted(i), global_ids.searchsorted(j)] = A[i, j]
# Convert to compressed form
return [Partition(edge_matrix_type(edges), global_ids, self.vertex_masters, edge_masters) for edges, global_ids
in
zip(partition_edges, partition_global_ids)]
|
python
|
## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
## -----------------------------------
## PubNub 3.1 Real-time Push Cloud API
## -----------------------------------
import sys
import datetime
import time
import math
from pubnub import PubnubTwisted as Pubnub
## -----------------------------------------------------------------------
## Configuration
## -----------------------------------------------------------------------
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or 'demo'
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
origin = len(sys.argv) > 6 and sys.argv[6] or 'pubsub.pubnub.com'
origin = '184.72.9.220'
## -----------------------------------------------------------------------
## Analytics
## -----------------------------------------------------------------------
analytics = {
'publishes': 0, # Total Send Requests
'received': 0, # Total Received Messages (Deliveries)
'queued': 0, # Total Unreceived Queue (UnDeliveries)
'successful_publishes': 0, # Confirmed Successful Publish Request
'failed_publishes': 0, # Confirmed UNSuccessful Publish Request
'failed_deliveries': 0, # (successful_publishes - received)
'deliverability': 0 # Percentage Delivery
}
trips = {
'last': None,
'current': None,
'max': 0,
'avg': 0
}
## -----------------------------------------------------------------------
## Initiat Class
## -----------------------------------------------------------------------
channel = 'deliverability-' + str(time.time())
pubnub = Pubnub(
publish_key,
subscribe_key,
secret_key=secret_key,
cipher_key=cipher_key,
ssl_on=ssl_on,
origin=origin
)
## -----------------------------------------------------------------------
## BENCHMARK
## -----------------------------------------------------------------------
def publish_sent(info=None):
if info and info[0]:
analytics['successful_publishes'] += 1
else:
analytics['failed_publishes'] += 1
analytics['publishes'] += 1
analytics['queued'] += 1
pubnub.timeout(send, 0.1)
def send():
if analytics['queued'] > 100:
analytics['queued'] -= 10
return pubnub.timeout(send, 10)
pubnub.publish({
'channel': channel,
'callback': publish_sent,
'message': "1234567890"
})
def received(message):
analytics['queued'] -= 1
analytics['received'] += 1
current_trip = trips['current'] = str(datetime.datetime.now())[0:19]
last_trip = trips['last'] = str(
datetime.datetime.now() - datetime.timedelta(seconds=1)
)[0:19]
## New Trip Span (1 Second)
if current_trip not in trips:
trips[current_trip] = 0
## Average
if last_trip in trips:
trips['avg'] = (trips['avg'] + trips[last_trip]) / 2
## Increment Trip Counter
trips[current_trip] = trips[current_trip] + 1
## Update Max
if trips[current_trip] > trips['max']:
trips['max'] = trips[current_trip]
def show_status():
## Update Failed Deliveries
analytics['failed_deliveries'] = \
analytics['successful_publishes'] \
- analytics['received']
## Update Deliverability
analytics['deliverability'] = (
float(analytics['received']) /
float(analytics['successful_publishes'] or 1.0)
) * 100.0
## Print Display
print((
"max:%(max)03d/sec " +
"avg:%(avg)03d/sec " +
"pubs:%(publishes)05d " +
"received:%(received)05d " +
"spub:%(successful_publishes)05d " +
"fpub:%(failed_publishes)05d " +
"failed:%(failed_deliveries)05d " +
"queued:%(queued)03d " +
"delivery:%(deliverability)03f%% " +
""
) % {
'max': trips['max'],
'avg': trips['avg'],
'publishes': analytics['publishes'],
'received': analytics['received'],
'successful_publishes': analytics['successful_publishes'],
'failed_publishes': analytics['failed_publishes'],
'failed_deliveries': analytics['failed_deliveries'],
'publishes': analytics['publishes'],
'deliverability': analytics['deliverability'],
'queued': analytics['queued']
})
pubnub.timeout(show_status, 1)
def connected():
show_status()
pubnub.timeout(send, 1)
print("Connected: %s\n" % origin)
pubnub.subscribe({
'channel': channel,
'connect': connected,
'callback': received
})
## -----------------------------------------------------------------------
## IO Event Loop
## -----------------------------------------------------------------------
pubnub.start()
|
python
|
# Configuration file for the Sphinx documentation builder.
#
# For a full list of configuration options, see the documentation:
# http://www.sphinx-doc.org/en/master/usage/configuration.html
# Project information
# --------------------------------------------------
project = 'Apollo'
version = '0.2.0'
release = ''
copyright = '2018, Georgia Power Company'
author = 'Chris Barrick, Zach Jones, Fred Maier'
# Configuration
# --------------------------------------------------
needs_sphinx = '1.7' # v1.7.0 was released 2018-02-12
master_doc = 'index'
language = 'en'
pygments_style = 'sphinx'
templates_path = ['_templates']
source_suffix = ['.rst']
exclude_patterns = ['_build', '_static', 'Thumbs.db', '.DS_Store']
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.githubpages',
'sphinx.ext.ifconfig',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Theme
# --------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_logo = '_static/logo/apollo-logo-text-color.svg'
html_static_path = ['_static']
html_css_files = ['css/overrides.css']
# Theme specific,
# see https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html
html_theme_options = {
'logo_only': True,
'display_version': True,
'style_nav_header_background': '#EEEEEE',
# Sidebar
'collapse_navigation': False,
'sticky_navigation': True,
'navigation_depth': 3,
'includehidden': True,
'titles_only': False
}
# Extension: sphinx.ext.intersphinx
# --------------------------------------------------
# A mapping: id -> (target, invintory)
# where target is the base URL of the target documentation,
# and invintory is the name of the inventory file, or None for the default.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'matplotlib': ('https://matplotlib.org/', None),
'xarray': ('http://xarray.pydata.org/en/stable/', None),
'sklearn': ('http://scikit-learn.org/stable', None),
}
# Extension: sphinx.ext.autodoc
# --------------------------------------------------
autodoc_default_options = {
'members': True,
}
# Extension: sphinx.ext.autosummary
# --------------------------------------------------
autosummary_generate = True
autosummary_generate_overwrite = True
# Extension: sphinx.ext.napoleon
# --------------------------------------------------
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_use_param = False
napoleon_use_rtype = False
# Extension: sphinx.ext.todo
# --------------------------------------------------
# Toggle output for ..todo:: and ..todolist::
todo_include_todos = True
# Path setup
# --------------------------------------------------
# All extensions and modules to document with autodoc must be in sys.path.
def add_path(path):
'''Add a directory to the import path, relative to the documentation root.
'''
import os
import sys
path = os.path.abspath(path)
sys.path.insert(0, path)
add_path('..') # The root of the repo, puts the `apollo` package on the path.
|
python
|
from bson.objectid import ObjectId
from pymongo import MongoClient
def readValue(value):
try:
return float(value)
except ValueError:
pass
try:
return int(value)
except ValueError:
pass
return value
def run(host=None, db=None, coll=None, key=None, prop=None, value=None):
# Connect to the Mongo collection
client = MongoClient(host)
db = client[db]
graph = db[coll]
value = readValue(value)
graph.update({"_id": ObjectId(key)}, {"$set": {"data.%s" % (prop): value}})
|
python
|
from flask import Flask, render_template, Blueprint
from modules.BBCScraper import BBCScraper
from modules.MaanScraper import MaanHealthScraper
from modules.MOHScraper import CovidPalestine
from modules.MaanScraper import MaanNewsScraper
import time
import concurrent.futures
from modules.CovidScraper import BBCCovidScraper, WhoCovidScraper
covid = Blueprint('covid', __name__)
@covid.route('/covid19')
def load_covid():
start = time.perf_counter()
with concurrent.futures.ThreadPoolExecutor() as executer:
# f1 = executer.submit(MaanNewsScraper.get_covid_status)
f2 = executer.submit(BBCCovidScraper.get_content)
f3 = executer.submit(WhoCovidScraper.get_content)
# palestine_summary = f1.result()
bbc_corona_articles = f2.result()
who_corona_articles = f3.result()
finish = time.perf_counter() # end timer
print(f"Finished in {round(finish-start,2)} seconds")
return render_template("covid/covid19.html",
bbc_corona_articles=bbc_corona_articles,
who_corona_articles=who_corona_articles
)
|
python
|
from fractions import Fraction
def answer(pegs):
arrLength = len(pegs)
if ((not pegs) or arrLength == 1):
return [-1,-1]
even = True if (arrLength % 2 == 0) else False
sum = (- pegs[0] + pegs[arrLength - 1]) if even else (- pegs[0] - pegs[arrLength -1])
# print sum
if (arrLength > 2):
for index in xrange(1, arrLength-1):
sum += 2 * (-1)**(index+1) * pegs[index]
# print sum
FirstGearRadius = Fraction(2 * (float(sum)/3 if even else sum)).limit_denominator()
print(FirstGearRadius)
currentRadius = FirstGearRadius
for index in xrange(0, arrLength-2):
CenterDistance = pegs[index+1] - pegs[index]
NextRadius = CenterDistance - currentRadius
if (currentRadius < 1 or NextRadius < 1):
return [-1,-1]
else:
currentRadius = NextRadius
return [FirstGearRadius.numerator, FirstGearRadius.denominator]
if __name__ == "__main__":
l = map(int,raw_input().split())
print(answer(l))
# print answer([4, 9, 17, 31, 40])
# print answer([4,30,50])
|
python
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
df=pd.read_csv('car_data.csv')
# In[3]:
df.head()
# In[5]:
df.shape
# In[6]:
print(df['Seller_Type'].unique())
# In[26]:
print(df['Transmission'].unique())
print(df['Owner'].unique())
print(df['Fuel_Type'].unique())
# In[8]:
# check missing or null values
df.isnull().sum()
# In[9]:
df.describe()
# In[11]:
df.columns
# In[12]:
final_dataset=df[['Year', 'Selling_Price', 'Present_Price', 'Kms_Driven','Fuel_Type', 'Seller_Type', 'Transmission', 'Owner']]
# In[13]:
final_dataset.head()
# In[14]:
final_dataset['Current_Year']=2020
# In[15]:
final_dataset.head()
# In[16]:
final_dataset['no_of_year']=final_dataset['Current_Year']-final_dataset['Year']
# In[17]:
final_dataset.head()
# In[19]:
final_dataset.drop(['Year'],axis=1,inplace=True)
# In[20]:
final_dataset.head()
# In[21]:
final_dataset.drop(['Current_Year'],axis=1,inplace=True)
# In[22]:
final_dataset.head()
# In[30]:
final_dataset=pd.get_dummies(final_dataset,drop_first=True)
# In[31]:
final_dataset.head()
# In[32]:
final_dataset.corr()
# In[33]:
import seaborn as sns
# In[34]:
sns.pairplot(final_dataset)
# In[35]:
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[37]:
corrmat=final_dataset.corr()
top_corr_features=corrmat.index
plt.figure(figsize=(20,20))
#plot heat map
g=sns.heatmap(final_dataset[top_corr_features].corr(),annot=True,cmap="RdYlGn")
# In[59]:
final_dataset.head()
# In[60]:
# independent and dependent features
X=final_dataset.iloc[:,1:]
y=final_dataset.iloc[:,0]
# In[61]:
X.head()
# In[62]:
y.head()
# In[63]:
# ordering of features importance
from sklearn.ensemble import ExtraTreesRegressor
model=ExtraTreesRegressor()
model.fit(X,y)
# In[64]:
print(model.feature_importances_)
# In[65]:
# plot graph of feature importance for visualzation
feat_importances=pd.Series(model.feature_importances_,index=X.columns)
feat_importances.nlargest(5).plot(kind='barh')
plt.show()
# In[67]:
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
# In[69]:
X_train.shape
# In[70]:
from sklearn.ensemble import RandomForestRegressor
rf_random = RandomForestRegressor()
# In[78]:
# Hyperparameters
# Randomized Search CV
# Number Of trees in random forest
import numpy as np
n_estimators=[int(x) for x in np.linspace(start = 100,stop = 1200,num = 12)]
#Number of features to consider at every split
max_features=['auto','sqrt']
# Maximum number of levels in a tree
max_depth =[int(x) for x in np.linspace(5, 30,num =6)]
# max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split =[2,5,10,15,100]
# Minimum number of samples required to split each leaf node
min_samples_leaf = [1,2,5,10]
# In[79]:
from sklearn.model_selection import RandomizedSearchCV
# In[80]:
# create random grid
random_grid = {'n_estimators':n_estimators,
'max_features':max_features,
'max_depth':max_depth,
'min_samples_split':min_samples_split,
'min_samples_leaf':min_samples_leaf}
print(random_grid)
# In[83]:
# use random grid to search for best hyperparameters
# first create the base model to tune
rf=RandomForestRegressor()
# In[85]:
rf_random = RandomizedSearchCV(estimator = rf,param_distributions = random_grid,scoring ='neg_mean_squared_error',n_iter=10,cv=5,verbose=2,random_state=42,n_jobs =1)
# In[86]:
rf_random.fit(X_train,y_train)
# In[87]:
predictions = rf_random.predict(X_test)
# In[88]:
predictions
# In[89]:
sns.distplot(y_test-predictions)
# In[90]:
plt.scatter(y_test,predictions)
# In[92]:
import pickle
# open a file where you want to store data
file=open('random_forest_regression_model.pkl' , 'wb')
#dump information to that file
pickle.dump(rf_random,file)
# In[ ]:
# In[ ]:
# In[ ]:
|
python
|
from PIL import Image
import numpy as np
import time
def renk_degisimi():
h=pic.shape[0]
w=pic.shape[1]
newpic=np.zeros((h ,w ))
for i in range(h):
for j in range(w):
newpic[i][j]+=pic[i][j][0]*0.2989+pic[i][j][1]*0.5870+pic[i][j][2]*0.1140
matrix2=[]
for i in range(3):
b =[]
for j in range(3):
b.append(int(input()))
matrix2.append(b)
def kirpma():
x1=int(input("x1 değerini giriniz: "))
def terscevir():
tersfoto = pic.copy()
tersfotos=tersfoto[::-1]
son=Image.fromarray(tersfotos)
son.show()
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
while secim!="1" and secim!="2" and secim!="3" and secim!="4":
time.sleep(1)
print()
print("Lütfen seçeneklerden birini giriniz.")
print()
time.sleep(1)
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
print()
while secim=="2" or secim=="3":
time.sleep(0.5)
print("İşlem yapmadan önce resim yüklemeniz gerekmektedir\n")
time.sleep(1)
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
while secim!="1" and secim!="2" and secim!="3" and secim!="4":
print("Lütfen seçeneklerden birini giriniz.")
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
while True:
while secim=="1":
picture=input("Lütfen yüklemek istediğiniz resmin adını giriniz(örnek: resim.jpg): ")
print()
time.sleep(1)
print("{} resmi yüklendi".format(picture))
print()
time.sleep(1)
picture_open=Image.open(picture)
pic=np.array(picture_open)
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
while secim!="1" and secim!="2" and secim!="3" and secim!="4":
print("Lütfen seçeneklerden birini giriniz.")
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
print()
while secim=="2":
renk_degisimi()
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
while secim!="1" and secim!="2" and secim!="3" and secim!="4":
print("Lütfen seçeneklerden birini giriniz.")
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
print()
while secim=="3":
terscevir()
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
while secim!="1" and secim!="2" and secim!="3" and secim!="4":
print("Lütfen seçeneklerden birini giriniz.")
secim=input("1-Resim Ekleyin\n2-Resmi siyah ve beyaz hale çevirin\n3-Resmi ters çevirin\n4-Çıkış\nSeçiminizi giriniz: ")
print()
while secim=="4":
print("Çıkış yapılıyor.")
time.sleep(1)
quit()
|
python
|
from django.shortcuts import render, redirect
from django.contrib.auth import login, authenticate,logout
from django.http import HttpResponse
from account.forms import AccountAuthenticationForm, RegistrationForm,AccountUpdateForm
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.models import User
from account.models import Account
from posts.models import Post
from posts.forms import PostModelForm
import random
from django.views.generic import (
DetailView
)
'''
def profiles(request):
data =Post.objects.all()
paginate_by = 2
ordering = ['post_date']
return render(request,'account/account.html',{'data':data})
'''
def Userfeed(request):
allposts=Post.objects.all()
# details=Post.objects.get(id=id)
# posts_form = PostModelForm()
# if request.method == 'POST':
# posts_form = PostModelForm(request.POST )
# if posts_form.is_valid():
# content = request.POST.get('content')
# # image = request.POST.get('image')
# # comment = PostModelForm.objects.create(post = Post, user = request.user, content = content)
# posts_form .save()
# return redirect("baseapp:details" )
# else:
# posts_form = PostModelForm()
# initials
p_form = PostModelForm()
post_added = False
# profile = Account.objects.get(user=request.user)
# profile= self.request.usersettings.AUTH_USER_MODEL
profile = request.user
if 'submit_p_form' in request.POST:
print(request.POST)
p_form = PostModelForm(request.POST, request.FILES)
if p_form.is_valid():
instance = p_form.save(commit=False)
instance.writer = profile
instance.save()
p_form = PostModelForm()
post_added = True
# ppp= Account.objects.all()
profiles = []
all_posts = list(Account.objects.all())
# random_post_number = post_number - len(profiles)
# random_posts = random.sample(all_posts, random_post_number)
random_posts = random.sample(all_posts, 3)
for random_post in random_posts:
profiles.append(random_post)
# return post_objects
context = {
'allposts': allposts,
'p_form': p_form,
'profiles': profiles,
'post_added': post_added,
}
return render(request, 'users/userfeed.html', context)
class ProfileDetailView(DetailView):
model = Account
context_object_name = 'my_profile'
template_name = 'account/account.html'
def get_object(self,**kwargs):
pk= self.kwargs.get('pk')
view_profile = Account.objects.get(pk=pk)
return view_profile
def edit_account_view(request, *args, **kwargs):
if not request.user.is_authenticated:
return redirect("login")
user_id = kwargs.get("user_id")
account = Account.objects.get(pk=user_id)
if account.pk != request.user.pk:
return HttpResponse("You cannot edit someone elses profile.")
context = {}
if request.POST:
form = AccountUpdateForm(request.POST, request.FILES, instance=request.user)
if form.is_valid():
form.save()
new_username = form.cleaned_data['username']
return redirect("account:profile-page")
else:
form = AccountUpdateForm(request.POST, instance=request.user,
initial={
"id": account.pk,
"email": account.email,
"username": account.username,
"profile_image": account.profile_image,
"hide_email": account.hide_email,
"bio": account.bio,
"full_name": account.full_name,
}
)
context['form'] = form
else:
form = AccountUpdateForm(
initial={
"id": account.pk,
"email": account.email,
"username": account.username,
"profile_image": account.profile_image,
"bio": account.bio,
"full_name": account.full_name,
}
)
context['form'] = form
context['DATA_UPLOAD_MAX_MEMORY_SIZE'] = settings.DATA_UPLOAD_MAX_MEMORY_SIZE
return render(request, "account/edit_account.html", context)
def registration_view(request, *args, **kwargs):
context = {}
if request.POST:
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
# email = form.cleaned_data.get('email')
# raw_password = form.cleaned_data.get('password1')
# accounts = authenticate(email=email, password=raw_password)
# login(request, accounts)
return redirect('account:login')
else:
context['registration_form'] = form
else: #GET request
form = RegistrationForm()
context['registration_form'] = form
return render(request, 'account/register.html', context)
def login_view(request, *args, **kwargs):
context = {}
user = request.user
if user.is_authenticated:
return redirect("account:profile-page")
if request.POST:
form = AccountAuthenticationForm(request.POST)
if form.is_valid():
email = request.POST['email']
password = request.POST['password']
user = authenticate(email=email, password=password)
if user:
login(request, user)
return redirect("account:profile-page")
else:
form = AccountAuthenticationForm()
context['login_form'] = form
return render(request, 'account/login.html', context)
def logout_view(request, *args, **kwargs):
logout(request)
return render(request,'account/logout.html')
|
python
|
import numpy as np
import cv2
from skimage import segmentation
def runGrabCut(_image, boxes, indices):
imgs = []
image = _image.copy()
# ensure at least one detection exists
indices = len(boxes)
if indices > 0:
# loop over the indices we are keeping
for i in range(0,indices):
image = _image.copy()
mask = np.zeros(_image.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgbModel = np.zeros((1, 65), np.float64)
# extract the bounding box coordinates
rect = (int(boxes[i][0]), int(boxes[i][1]), int(boxes[i][2]), int(boxes[i][3]))
# outline = segmentation.slic(_image, n_segments=100,enforce_connectivity=False)
print('rect',rect)
# print(boxes)
# apply GrabCut
cv2.grabCut(image, mask, rect, bgdModel, fgbModel, 3, cv2.GC_INIT_WITH_RECT)
# 0和2做背景
grab_mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
# 使用蒙板来获取前景区域
image = image * grab_mask[:, :, np.newaxis]
# regions = outline*grab_mask
# segmented = np.unique(regions)
# print('segmented',segmented)
# cv2.imshow('segmented',segmented)
# segmented = segmented[1:len(segmented)]
# pxtotal = np.bincount(outline.flatten())
# pxseq = np.bincount(regions.flatten())
#
#
# pxseg = np.bincount(regions.flatten())
# seg_mask = np.zeros(_image.shape[:2], np.uint8)
# label = (pxseg[segmented] / pxtotal[segmented].astype(float)) < 0.75
# for j in range(0, len(label)):
# if label[j] == 0:
# temp = outline == segmented[j]
# seg_mask = seg_mask + temp
# mask = seg_mask > 0
# mask = np.where((mask == 1), 255, 0).astype("uint8")
# mask = cv2.bitwise_not(mask)
# cv2.imshow('mask', mask)
# cv2.waitKey()
imgs.append(image)
# imgs = image|image
# cv2.bitwise_xor(image, image)
return imgs
if __name__ == '__main__':
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.25,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.45,
help="threshold when applying non-maxima suppression")
args = vars(ap.parse_args())
import yolo
img, boxes, idxs = yolo.runYOLOBoundingBoxes(args)
images = runGrabCut(img, boxes, idxs)
# show the output images
#cv.namedWindow("Image", cv.WINDOW_NORMAL)
#cv.resizeWindow("image", 1920, 1080)
for i in range(len(images)):
cv2.imshow("Image{}".format(i), images[i])
cv2.imwrite("grabcut{}.jpg".format(i), images[i])
cv2.waitKey(0)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.