seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
16995335370
|
# find GCD/HCF of two numbers
def fun(a, b):
res = min(a, b)
ans = None
for i in range(1, res + 1):
if a % res == 0 and b % res == 0:
ans = i
return ans
# using recursion
def funrecursive(a, b):
if b == 0:
return a
return fun(b, a % b)
if __name__ == '__main__':
print(fun(30, 60))
print(funrecursive(30, 60))
|
pvr30/DSA
|
Recursion/GCD_or_HCF.py
|
GCD_or_HCF.py
|
py
| 376 |
python
|
en
|
code
| 2 |
github-code
|
6
|
6713722350
|
from __future__ import print_function, division
import time
from floor_plans.visualize import View
from floor_plans.floorplan import FloorPlan
from floor_plans import polygon
from floor_plans.math_util import dist
from floor_plans import floorplan_statistics as fps
names = [
'SPEECH', 'ST', 'COMP LAB', 'PK',
'SUPER', 'FINANCE', 'ASST', 'SPED', 'BOILER',
'ART', 'PK', 'PRINCIPLE', 'AP ASSISTANT PRINCIPLE', 'CONFERENCE',
'G', 'G', 'WORK', 'DIS', 'VOL', 'STORAGE',
'TOILET', 'CONF', 'CURR', 'WORK', 'SEC',
'RECEPT', 'TOILET', 'hallway', 'STORAGE', 'STAGE',
'KITCHEN', 'AUTISM', 'STORAGE', 'LIFE SKILLS', 'TOILET',
'FACULTY', 'hallway', 'PROJ', 'CONF', 'LIT',
'MATH', 'K', 'K', 'hallway', '2',
'2', '2', '2', '1', '2',
'1', '1', '1', '1', 'hallway',
'hallway', 'TEAM', 'K', 'TITLE 1', 'TEST',
'hallway', 'K', 'K', 'hallway', 'JANITORIAL',
'TOILET', 'RR', 'hallway', 'STORAGE', 'NONE',
'hallway', 'hallway', 'hallway', 'RESOURCE',
'hallway', 'WORK', 'hallway', 'hallway', 'TOILET',
'NURSE', 'WAIT', 'hallway', 'PK', 'LIBRARY',
'EXAM', 'hallway', 'hallway', 'WORK', 'RECORDS',
'EVAL', 'EVAL', 'TOILET', 'hallway', 'LOUNGE',
'GYM STORAGE', 'hallway',
'hallway',# 'entrance',
'KILN', 'ART STORAGE',
'MUSIC STORAGE', 'OT/PT', 'NONE', 'ELECTRICAL', 'MUSIC',
'hallway', 'TOILET', 'GYM', 'hallway', 'CAFETERIA',
'hallway', 'TOILET', 'hallway', 'TOILET','CONFERENCE3',
'RECEPT2', 'CUSTODIAL', 'RECYCLE', 'hallway', 'hallway',
'hallway', 'hallway', 'BLDG/EQUIP STORAGE', 'hallway', 'hallway',
'hallway'
]
names = [n.lower() for n in names]
doors = [
(43, 44, .5),
(44, 45, .5),
(45, 46, .5),
(46, 47, .5),
(47, 48, .5),
(48, 49, .1),
(19, 20, .5),
(20, 21, .5),
(21, 22, .5),
(22, 23, .5),
(23, 24, .5),
(29, 30, .5),
(65, 78, .5),
(66, 65, .5),
(67, 66, .5),
(51, 44, .1),
(20, 13, .1),
(33, 29, .5),
(35, 33, .1),
(60, 64, .5),
(59, 60, .5),
(57, 59, .5),
(39, 57, .5),
(24, 69, .5),
(248, 250, .5),
(250, 251, .5),
(49, 251, .5),
(68, 77, .5),
(64, 63, .5),
(246, 247, .5),
(70, 65, .1),
(71, 66, 1),
(29, 31, .1),
(63, 37, .5),
(76, 245, .5),
(76, 246, .5),
(25, 235, .5),
(27, 236, .5),
(74, 2, .9),
(2, 3, .5),
(2, 0, .5),
(26, 28, .5),
(26, 18, .5),
(8, 10, .5),
(10, 1, .5),
(4, 9, .5),
(23, 16, .5),
(4, 253, .5),
(28, 142, .5),
(238, 240, .5),
(243, 242, .5),
(242, 84, .5),
(208, 148, .9),
(155, 213, .9),
(207, 155, .5),
(138, 223, .5),
(214, 216, .5),
(201, 138, .5),
(216, 218, .5),
(220, 203, .5),
(221, 203, .5),
(221, 255, .5),
(139, 137, .1),
(139, 137, .9),
(152, 93, .5),
(89, 91, .1),
(89, 87, .5),
(234, 232, .1),
(234, 232, .9),
(142, 144, .5),
(53, 46, .1),
(54, 47, .5),
(21, 14, .5),
(154, 145, .5),
(153, 149, .5),
(146, 139, .5),
(150, 98, .5),
(161, 163, .5),
(161, 162, .5),
(136, 137, .5),
(178, 182, .5),
(107, 106, .5),
(157, 133, .5), # Entrance.
]
entrances = [352, 311, 299, 283, 376, 333, 365]
obj_path = 'school/floor1.obj'
bounds = (750, 750)
view = View(*bounds, scale=2)
source_upf = (39.645008 - 32.433640) / 24. # units per foot
# source_ppm = ppf / 0.3048 # pixels per meter
target_upf = 1.
scale = target_upf / source_upf
floor = FloorPlan.from_obj(obj_path, names, doors, scale, entrances)
def merge_rooms(name, room_ids, verts):
room_ids = set(room_ids)
new_room_id = max(floor.rooms.keys())+1
# Delete interior room nodes.
for rid in room_ids:
if floor.room_centers[rid] in floor:\
# , floor.room_names[rid]
floor.remove_node(floor.room_centers[rid])
for vid in floor.rooms[rid]:
if vid not in verts and vid in floor:
floor.remove_node(vid)
for ID in room_ids:
del floor.rooms[ID]
del floor.room_names[ID]
del floor.room_centers[ID]
for vid, rooms in floor.vert_to_room.items():
if len(rooms.intersection(room_ids)):
rooms -= room_ids
rooms.add(new_room_id)
poly = [floor.vertices[vi] for vi in verts]
center = polygon.center(poly)
center_i = len(floor.vertices)
floor.vertices.append(center)
floor.rooms[new_room_id] = verts
floor.room_names[new_room_id] = name
floor.room_centers[new_room_id] = center_i
floor.room_sizes[new_room_id] = polygon.area(poly)
return new_room_id
""" Merge administration room clusters together.
"""
# admin1_id = len(floor.rooms) # create new room ID.
# # IDs of rooms to merge
admin1_rooms = [113, 87, 88, 89, 90, 93, 92, 117, 114, 112, 4, 5, 6, 7, 22, 21, 23]
admin1_verts = [156, 170, 172, 174, 176, 178, 179, 183, 182, 180, 157, 168,
167, 187, 229, 159, 185, 200, 198, 196, 194, 192, 190, 158,
204, 254, 166, 169]
admin2_rooms = [25, 16, 17, 18, 19, 20, 80, 79, 84, 15, 14, 13, 12, 11, 24, 86]
admin2_verts = [133, 128, 129, 130, 165, 225, 131, 121, 107, 227, 226, 104,
108, 122, 114, 113, 112, 111, 110, 109, 134, 230, 135, 132]
admin1_id = merge_rooms('administration', admin1_rooms, admin1_verts)
admin2_id = merge_rooms('administration', admin2_rooms, admin2_verts)
floor.add_edge(floor.room_centers[admin1_id], floor.room_centers[119], outside=False, inner=True, width=10)
floor.add_edge(floor.room_centers[admin2_id], floor.room_centers[81], outside=False, inner=True, width=10)
# try:
# print(fps.calculate_all(floor))
for k, v in fps.calculate_all(floor).items():
print(k, v)
# except Exception as e:
# print(e)
view.draw_floorplan(floor)
view.hold()
|
joel-simon/evo_floorplans
|
test.py
|
test.py
|
py
| 5,851 |
python
|
en
|
code
| 84 |
github-code
|
6
|
41589623453
|
class MyError(Exception):
def __init__(self, stri):
self.stri = stri
def process(self):
if len(self.stri) < 5:
print("字符串长度小于5")
else:
print("咕咕咕")
try:
MEr = MyError("waaaa")
MEr.process()
except MyError as error:
print(error)
|
Wainemo/PythonPractice
|
定义类继承Exception,判断输入字符串的长度是否小于5.py
|
定义类继承Exception,判断输入字符串的长度是否小于5.py
|
py
| 317 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5387953203
|
from telegram.ext import Updater, MessageHandler,Filters
from Adafruit_IO import Client
import os
aio = Client('adeebsheriff', os.getenv('adeebsheriff'))
def demo1(bot,update):
chat_id = bot.message.chat_id
path = 'https://cdn3.vectorstock.com/i/1000x1000/87/22/i-am-fine-lettering-typography-calligraphy-overlay-vector-15208722.jpg'
bot.message.reply_text('I am fine')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def demo2(bot,update):
chat_id = bot.message.chat_id
path = 'https://static.scientificamerican.com/sciam/cache/file/2B38DE31-C1D3-4339-8808D61972976EE4.jpg'
bot.message.reply_text('Light is turned ON')
aio.send('bedroom-light', 1)
data1 = aio.receive('bedroom-light')
print(f'Received value: {data1.value}')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def demo3(bot,update):
chat_id = bot.message.chat_id
path = 'https://image.shutterstock.com/image-photo/light-bulb-turned-off-over-260nw-320485652.jpg'
bot.message.reply_text('Light is turned OFF')
aio.send('bedroom-light', 0)
data1 = aio.receive('bedroom-light')
print(f'Received value: {data1.value}')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def demo4(bot,update):
chat_id = bot.message.chat_id
path = 'https://cdn.frontdoorhome.com/ahs/blog/prod/static/cs/ahs/image/running-fan.jpg'
bot.message.reply_text('Fan is turned ON')
aio.send('bedroom-fan', 1)
data2 = aio.receive('bedroom-fan')
print(f'Received value: {data2.value}')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def demo5(bot,update):
chat_id = bot.message.chat_id
path = 'https://www.destinationlighting.com/fliptheswitch/wp-content/uploads/sites/2/2018/05/zudio-casablanca.jpg'
bot.message.reply_text('Fan is turned OFF')
aio.send('bedroom-fan', 0)
data2 = aio.receive('bedroom-fan')
print(f'Received value: {data2.value}')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def main(bot,update):
a = bot.message.text.lower()
print(a)
if a == "how are you?":
demo1(bot,update)
elif a =="light on" or a=="turn on light":
demo2(bot,update)
elif a =="light off" or a=="turn off light":
demo3(bot,update)
elif a =="switch on fan" or a=="turn on fan":
demo4(bot,update)
elif a =="switch off fan" or a=="turn off fan":
demo5(bot,update)
else:
bot.message.reply_text('Invalid Text')
BOT_TOKEN = os.getenv('BOT_TOKEN')
u = Updater(BOT_TOKEN,use_context=True)
dp = u.dispatcher
dp.add_handler(MessageHandler(Filters.text,main))
u.start_polling()
u.idle()
|
adeebsheriff/telegramiotchatbot
|
app.py
|
app.py
|
py
| 2,506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9020012724
|
from multiprocessing import Pool
import math
from functools import partial
import numpy as np
from pyfaidx import Fasta, Faidx
import subprocess
import pysam
from liftoff import aligned_seg, liftoff_utils
from os import path
def align_features_to_target(ref_chroms, target_chroms, args, feature_hierarchy, liftover_type, unmapped_features):
if args.subcommand == "polish":
sam_files = [args.dir + "/polish.sam"]
else:
target_fasta_dict = split_target_sequence(target_chroms, args.target, args.dir)
genome_size = get_genome_size(target_fasta_dict)
threads_per_alignment = max(1, math.floor(int(args.p) / len(ref_chroms)))
sam_files = []
pool = Pool(int(args.p))
print("aligning features")
func = partial(align_single_chroms, ref_chroms, target_chroms, threads_per_alignment, args, genome_size,
liftover_type)
for result in pool.imap_unordered(func, np.arange(0, len(target_chroms))):
sam_files.append(result)
pool.close()
pool.join()
return parse_all_sam_files(feature_hierarchy, unmapped_features, liftover_type, sam_files)
def split_target_sequence(target_chroms, target_fasta_name, inter_files):
Faidx(target_fasta_name)
target_fasta_dict = Fasta(target_fasta_name, key_function=lambda x: x.split()[0])
for chrm in target_chroms:
if chrm != target_fasta_name:
out = open(inter_files + "/" + chrm + ".fa", 'w')
out.write(">" + chrm + "\n" + str(target_fasta_dict[chrm]))
return target_fasta_dict
def get_genome_size(target_fasta_dict):
genome_size = 0
for value in target_fasta_dict.values():
genome_size += len(value)
return genome_size
def align_single_chroms(ref_chroms, target_chroms, threads, args, genome_size, liftover_type, index):
max_single_index_size = 4000000000
features_file, features_name = get_features_file(ref_chroms, args, liftover_type, index)
target_file, output_file = get_target_file_and_output_file(liftover_type, target_chroms, index, features_name, args)
threads_arg = str(threads)
minimap2_path = get_minimap_path(args)
target_prefix = get_target_prefix_name(target_chroms, index, args, liftover_type)
if genome_size > max_single_index_size:
split_prefix = args.dir + "/" + features_name + "_to_" + target_prefix + "_split"
command = [minimap2_path, '-o', output_file, target_file, features_file] + args.mm2_options.split(" ") + [
"--split-prefix", split_prefix, '-t', threads_arg]
subprocess.run(command)
else:
minimap2_index = build_minimap2_index(target_file, args, threads_arg, minimap2_path)
command = [minimap2_path, '-o', output_file, minimap2_index, features_file] + args.mm2_options.split(" ") + [
'-t', threads_arg]
subprocess.run(command)
return output_file
def get_features_file(ref_chroms, args, liftover_type, index):
if ref_chroms[index] == args.reference and (liftover_type == "chrm_by_chrm" or liftover_type == "copies"):
features_name = 'reference_all'
elif liftover_type == "unmapped":
features_name = "unmapped_to_expected_chrom"
elif liftover_type == "unplaced":
features_name = "unplaced"
else:
features_name = ref_chroms[index]
return args.dir + "/" + features_name + "_genes.fa", features_name
def get_target_file_and_output_file(liftover_type, target_chroms, index, features_name, args):
if liftover_type != "chrm_by_chrm" or target_chroms[0] == args.target:
target_file = args.target
out_file_target = "target_all"
else:
target_file = args.dir + "/" + target_chroms[index] + ".fa"
out_file_target = target_chroms[index]
output_file = args.dir + "/" + features_name + "_to_" + out_file_target + ".sam"
return target_file, output_file
def get_minimap_path(args):
if args.m is None:
minimap2 = "minimap2"
else:
minimap2 = args.m
return minimap2
def get_target_prefix_name(target_chroms, index, args, liftover_type):
if liftover_type != "chrm_by_chrm" or target_chroms[0] == args.target:
prefix = "target_all"
else:
prefix = target_chroms[index]
return prefix
def build_minimap2_index(target_file, args, threads, minimap2_path):
if path.exists(target_file + ".mmi") is False:
subprocess.run(
[minimap2_path, '-d', target_file + ".mmi", target_file] + args.mm2_options.split(" ") + ['-t',
threads ])
return target_file + ".mmi"
def parse_all_sam_files(feature_hierarchy, unmapped_features, liftover_type, sam_files):
aligned_segments_dict = {}
for file in sam_files:
aligned_segments = parse_alignment(file, feature_hierarchy, unmapped_features, liftover_type)
aligned_segments_dict.update(aligned_segments)
return aligned_segments_dict
def parse_alignment(file, feature_hierarchy, unmapped_features, search_type):
all_aligned_blocks = {}
sam_file = pysam.AlignmentFile(file, 'r', check_sq=False, check_header=False)
sam_file_iter = sam_file.fetch()
aln_id = 0
name_dict = {}
align_count_dict = {}
for ref_seq in sam_file_iter:
if ref_seq.is_unmapped is False:
aln_id = add_alignment(ref_seq, align_count_dict, search_type, name_dict,aln_id, feature_hierarchy,
all_aligned_blocks)
else:
unmapped_features.append(feature_hierarchy.parents[ref_seq.query_name])
remove_alignments_without_children(all_aligned_blocks, unmapped_features, feature_hierarchy)
return all_aligned_blocks
def add_alignment(ref_seq, align_count_dict, search_type, name_dict, aln_id, feature_hierarchy,
all_aligned_blocks):
ref_seq.query_name = edit_name(search_type, ref_seq, name_dict)
aln_id += 1
if ref_seq.query_name in align_count_dict:
align_count = align_count_dict[ref_seq.query_name] + 1
else:
align_count = 0
align_count_dict[ref_seq.query_name] = align_count
aligned_blocks = get_aligned_blocks(ref_seq, aln_id, feature_hierarchy, search_type)
if ref_seq.query_name in all_aligned_blocks:
all_aligned_blocks[ref_seq.query_name].extend(aligned_blocks)
else:
all_aligned_blocks[ref_seq.query_name] = aligned_blocks
return aln_id
def edit_name(search_type, ref_seq, name_dict):
if search_type != "copies":
return ref_seq.query_name + "_0"
else:
if ref_seq.query_name not in name_dict:
name_dict[ref_seq.query_name] = 0
name_dict[ref_seq.query_name] += 1
return ref_seq.query_name + "_" + str(name_dict[ref_seq.query_name])
def get_aligned_blocks(alignment, aln_id, feature_hierarchy, search_type):
cigar_operations = get_cigar_operations()
cigar = alignment.cigar
parent = feature_hierarchy.parents[liftoff_utils.convert_id_to_original(alignment.query_name)]
query_start, query_end = get_query_start_and_end(alignment, cigar, cigar_operations)
children = feature_hierarchy.children[liftoff_utils.convert_id_to_original(alignment.query_name)]
end_to_end = is_end_to_end_alignment(parent, query_start, query_end)
if search_type == "copies" and end_to_end is False:
return []
reference_block_start, reference_block_pos = alignment.reference_start, alignment.reference_start
query_block_start, query_block_pos = query_start, query_start
new_blocks, mismatches = [], []
merged_children_coords = liftoff_utils.merge_children_intervals(children)
for operation, length in cigar:
if base_is_aligned(operation, cigar_operations):
query_block_pos, reference_block_pos = add_aligned_base(operation, query_block_pos, reference_block_pos,
length, cigar_operations, mismatches)
if query_block_pos == query_end:
add_block(query_block_pos, reference_block_pos, aln_id, alignment, query_block_start,
reference_block_start, mismatches, new_blocks, merged_children_coords, parent)
break
elif is_alignment_gap(operation, cigar_operations):
add_block(query_block_pos, reference_block_pos, aln_id, alignment, query_block_start, reference_block_start,
mismatches, new_blocks, merged_children_coords, parent)
mismatches, query_block_start, reference_block_start, query_block_pos, reference_block_pos = \
end_block_at_gap(
operation, query_block_pos, reference_block_pos, length, cigar_operations)
return new_blocks
def get_cigar_operations():
return {"insertion": 1, "deletion": 2, "hard_clip": 5, "match": 7, "mismatch": 8}
def get_query_start_and_end(alignment, cigar, cigar_operations):
query_start = alignment.query_alignment_start
query_end = alignment.query_alignment_end
if cigar[0][0] == cigar_operations["hard_clip"]:
query_start += cigar[0][1]
query_end += cigar[0][1]
return query_start, query_end
def is_end_to_end_alignment(parent, query_start, query_end):
return parent.end - parent.start + 1 == query_end - query_start
def base_is_aligned(operation, cigar_operations):
return operation == cigar_operations["match"] or operation == cigar_operations["mismatch"]
def add_aligned_base(operation, query_block_pos, reference_block_pos, length, cigar_operations, mismatches):
if operation == cigar_operations["mismatch"]:
for i in range(query_block_pos, query_block_pos + length):
mismatches.append(i)
query_block_pos, reference_block_pos = adjust_position(operation, query_block_pos, reference_block_pos,
length, cigar_operations)
return query_block_pos, reference_block_pos
def adjust_position(operation, query_block_pos, reference_block_pos, length, cigar_operations):
if operation == cigar_operations["match"] or operation == cigar_operations["mismatch"] or operation == \
cigar_operations["insertion"]:
query_block_pos += length
if operation == cigar_operations["match"] or operation == cigar_operations["mismatch"] or operation == \
cigar_operations["deletion"]:
reference_block_pos += length
return query_block_pos, reference_block_pos
def add_block(query_block_pos, reference_block_pos, aln_id, alignment, query_block_start, reference_block_start,
mismatches, new_blocks, merged_children_coords, parent):
query_block_end = query_block_pos - 1
reference_block_end = reference_block_pos - 1
new_block = aligned_seg.aligned_seg(aln_id, alignment.query_name, alignment.reference_name, query_block_start,
query_block_end,
reference_block_start, reference_block_end, alignment.is_reverse,
np.array(mismatches).astype(int))
overlapping_children = find_overlapping_children(new_block, merged_children_coords, parent)
if overlapping_children != []:
new_blocks.append(new_block)
def find_overlapping_children(aln, children_coords, parent):
overlapping_children = []
for child_interval in children_coords:
relative_start = liftoff_utils.get_relative_child_coord(parent, child_interval[0], aln.is_reverse)
relative_end = liftoff_utils.get_relative_child_coord(parent, child_interval[1], aln.is_reverse)
child_start, child_end = min(relative_start, relative_end), max(relative_start, relative_end)
overlap = liftoff_utils.count_overlap(child_start, child_end, aln.query_block_start, aln.query_block_end)
if overlap > 0:
overlapping_children.append(child_start)
overlapping_children.append(child_end)
return overlapping_children
def is_alignment_gap(operation, cigar_operations):
return operation == cigar_operations["insertion"] or operation == cigar_operations["deletion"]
def end_block_at_gap(operation, query_block_pos, reference_block_pos, length, cigar_operations):
mismatches = []
query_block_pos, reference_block_pos = adjust_position(operation, query_block_pos, reference_block_pos,
length, cigar_operations)
query_block_start = query_block_pos
reference_block_start = reference_block_pos
return mismatches, query_block_start, reference_block_start, query_block_pos, reference_block_pos
def remove_alignments_without_children(all_aligned_blocks, unmapped_features, feature_hierarchy):
features_to_remove = []
for seq in all_aligned_blocks:
if all_aligned_blocks[seq] == []:
features_to_remove.append(seq)
unmapped_features.append(feature_hierarchy.parents[liftoff_utils.convert_id_to_original(seq)])
for feature in features_to_remove:
del all_aligned_blocks[feature]
return all_aligned_blocks
|
agshumate/Liftoff
|
liftoff/align_features.py
|
align_features.py
|
py
| 13,119 |
python
|
en
|
code
| 360 |
github-code
|
6
|
39830016444
|
#정렬 : 데이터를 특정한 기준에 따라 순서대로 나열하는것
#1.선택정렬 : 처리되지 않은 데이터 중에서 가장 작은 데이터를 선택해 맨 앞에 있는 데이터와 바꾸는 것을 반복 - O(N^2)
array = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
print(array)
for i in range(len(array)):
min_idx = i
for j in range(i + 1, len(array)):
if array[min_idx] > array[j]:
min_idx = j
array[i], array[min_idx] = array[min_idx], array[i]
print(array)
print()
#2.삽입정렬 : 처리되지 않은 데이터를 하나씩 골라 적절한 위치에 삽입 - O(N^2) 이지만 정렬정도에 따라 O(N) 까지 달라짐
array = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
print(array)
for i in range(1, len(array)):
for j in range(i, 0, -1):
if array[j] < array[j - 1]:
array[j], array[j - 1] = array[j - 1], array[j]
else:
break
print(array)
print()
#3.퀵정렬 : 기준 데이터를 설정하고 그 기준보다 큰 데이터와 작은 데이터의 위치를 바꾸는 방법
array = [5, 7, 9, 0, 3, 1, 6, 2, 4, 8]
print(array)
def quick_sort(array):
if len(array) <= 1:
return array
pivot = array[0]
tail = array[1:]
left_side = [x for x in tail if x <= pivot]
right_side = [x for x in tail if x > pivot]
return quick_sort(left_side) + [pivot] + quick_sort(right_side)
print(quick_sort(array))
#4.계수정렬 : 특정한 조건이 부합할 때만 사용할 수 있지만 매우 빠르게 동작하는 정렬 알고리즘 O(N + K)
# 데이터의 크기 범위가 제한되어 정수 형태로 표현할 수 있을 때 사용 가능
# 동일한 값을 가지는 데이터가 여러개 등장할 때 효과적
array = [7, 5, 9, 0, 3, 1, 6, 2, 9, 1, 4, 8, 0, 5, 2]
count = [0] * (max(array) + 1)
for i in range(len(array)):
count[array[i]] += 1
for i in range(len(count)):
for j in range(count[i]):
print(i, end=' ')
|
omg7152/CodingTestPractice
|
Algorithm/Sort.py
|
Sort.py
|
py
| 1,977 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
7722787082
|
from spynnaker.pyNN import exceptions
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
from spynnaker.pyNN.models.neural_properties.synaptic_list import SynapticList
from spynnaker.pyNN.models.neural_properties.synapse_row_info \
import SynapseRowInfo
import logging
import numpy
logger = logging.getLogger(__name__)
class FromListConnector(AbstractConnector):
"""
Make connections according to a list.
:param `list` conn_list:
a list of tuples, one tuple for each connection. Each
tuple should contain::
(pre_idx, post_idx, weight, delay)
where pre_idx is the index (i.e. order in the Population,
not the ID) of the presynaptic neuron, and post_idx is
the index of the postsynaptic neuron.
"""
def __init__(self, conn_list=None, safe=True, verbose=False):
"""
Creates a new FromListConnector.
"""
if not safe:
logger.warn("the modification of the safe parameter will be "
"ignored")
if verbose:
logger.warn("the modification of the verbose parameter will be "
"ignored")
if conn_list is None:
conn_list = []
self._conn_list = conn_list
def generate_synapse_list(
self, presynaptic_population, postsynaptic_population, delay_scale,
weight_scale, synapse_type):
prevertex = presynaptic_population._get_vertex
postvertex = postsynaptic_population._get_vertex
# Convert connection list into numpy record array
conn_list_numpy = numpy.array(
self._conn_list, dtype=[("source", "uint32"), ("target", "uint32"),
("weight", "float"), ("delay", "float")])
if (conn_list_numpy["target"] >= postvertex.n_atoms).any():
raise exceptions.ConfigurationException("Target atom out of range")
# Sort by pre-synaptic neuron
conn_list_numpy = numpy.sort(conn_list_numpy, order="source")
# Apply weight and delay scaling
conn_list_numpy["weight"] *= weight_scale
conn_list_numpy["delay"] *= delay_scale
# Count number of connections per pre-synaptic neuron
pre_counts = numpy.histogram(
conn_list_numpy["source"], numpy.arange(prevertex.n_atoms + 1))[0]
# Take cumulative sum of these counts to get start and end indices of
# the blocks of connections coming from each pre-synaptic neuron
pre_end_idxs = numpy.cumsum(pre_counts)
pre_start_idxs = numpy.append(0, pre_end_idxs[:-1])
# Loop through slices of connections
synaptic_rows = []
for _, (start, end) in enumerate(zip(pre_start_idxs, pre_end_idxs)):
# Get slice
pre_conns = conn_list_numpy[start:end]
# Repeat synapse type correct number of times
synapse_type_row = numpy.empty(len(pre_conns), dtype="uint32")
synapse_type_row.fill(synapse_type)
# Combine post-synaptic neuron ids, weights, delays
# and synapse types together into synaptic row
synaptic_rows.append(
SynapseRowInfo(pre_conns["target"],
pre_conns["weight"],
pre_conns["delay"],
synapse_type_row))
# Return full synaptic list
return SynapticList(synaptic_rows)
|
ominux/sPyNNaker
|
spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py
|
from_list_connector.py
|
py
| 3,525 |
python
|
en
|
code
| null |
github-code
|
6
|
12791911896
|
from django.contrib.auth.models import User
from django.http import JsonResponse, Http404
from django.shortcuts import redirect, get_object_or_404
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.generic import DetailView, CreateView, View, TemplateView, DeleteView
from blog.decorators import authorized_only
from blog.forms import PostForm
from blog.models import Post, Subscription, PostRead
@method_decorator(authorized_only, name='dispatch')
class SubscribeView(View):
def post(self, request):
try:
user_id = request.POST.get('pk')
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
return JsonResponse({'status': 'error', 'error': 'Пользователя не существует'})
sub, created = Subscription.objects.get_or_create(user=user)
# если пользователь уже в подписках, то удаляем его оттуда, иначе добавляем
if request.user in sub.subscribers.all():
sub.subscribers.remove(request.user.id)
subscribed = False
PostRead.objects.filter(user_id=request.user.id, post__user_id=user_id).delete()
else:
sub.subscribers.add(request.user.id)
subscribed = True
sub.save()
return JsonResponse({'status': 'ok', 'subscribed': subscribed})
class BlogView(DetailView):
model = User
template_name = 'blog/user.html'
def get_context_data(self, **kwargs):
context = super(BlogView, self).get_context_data(**kwargs)
context['posts'] = Post.objects.filter(user_id=self.object.id).prefetch_related('user')
return context
class PostCreateView(CreateView):
model = Post
form_class = PostForm
template_name = 'blog/create.html'
def form_valid(self, form):
post = form.save(commit=False)
post.user = self.request.user
post.save()
return redirect(reverse('blog:detail', args=(post.user_id, post.id)))
class PostDetailView(DetailView):
model = Post
template_name = 'blog/detail.html'
class HomeView(TemplateView):
template_name = 'home.html'
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
# 2 запроса для получения всех постов
# Получение списка id пользователей на которых подписан
# Получение всех постов с этими id
subscription = list(self.request.user.subscription.values_list('user_id', flat=True))
subscription.append(self.request.user.id)
context['posts'] = Post.objects.filter(user_id__in=subscription).prefetch_related('user').distinct()
return context
class PostReadView(View):
def post(self, request):
try:
post = Post.objects.exclude(user_id=request.user.id).get(pk=request.POST.get('post_id'))
except Post.DoesNotExist:
return JsonResponse({'status': 'error', 'error': 'Пост не найден'})
PostRead.objects.get_or_create(user=request.user, post=post)
return JsonResponse({'status': 'ok'})
class PostDeleteView(DeleteView):
model = Post
def get_object(self, queryset=None):
return get_object_or_404(Post, user_id=self.request.user.id, pk=self.kwargs.get('pk'))
def get_success_url(self):
return reverse('blog:home')
|
skazancev/NeKidaem
|
project/blog/views.py
|
views.py
|
py
| 3,548 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75241434428
|
__all__ = ["save", "load", "load_state_dict", "arange", "cat", "cos", "clamp", "Device", "from_numpy", "flatten",
"LongTensor", "matmul", "mm", "normal", "ones", "x2ms_pow", "sin", "tanh", "x2ms_tensor", "Tensor",
"split", 'as_tensor', 'argmax', 'Generator', 'sigmoid', 'rand', 'floor', 'bernoulli', 'equal', 'var_mean',
'randperm', 'sqrt', 'stack', 'log', 'exp', 'typename', 'is_tensor', 'randn', 'FloatTensor', 'x2ms_max',
'x2ms_min', 'bmm', 'x2ms_abs', 'square', 'squeeze', 'unsqueeze', 'transpose', 'repeat_interleave', 'div',
'ones_like', 'where', 'tensordot', 'meshgrid', 'roll', 'linspace', 'full', 'empty', 'x2ms_sum',
'multinomial', 'gather', 'sort', 'topk', 'x2ms_all', 'cumsum', 'einsum', 'full_like', 'masked_select',
'x2ms_mean', 'mul', 'isfinite', 'diag', 'acos', 'add', 'argsort', 'asin', 'atan2', 'bincount',
'broadcast_tensors', 'chunk', 'conj', 'cosh', 'cross', 'cumprod', 'diagflat', 'x2ms_diagonal', 'eq',
'zeros_like', 'atan', 'unique', 'triu', 'nonzero', 'log2', 'cdist', 'erf', 'softmax', 'eye', 'prod', 'norm',
'zeros', 'lt', 'ge', 'ne', 'le', 'reshape', 'reminder', 'result_type', 'real', 'reciprocal', 'neg', 'isinf',
'isnan', 'argmin', 'floor_divide', 'fmod', 'empty_like', 'erfc', 'erfinv', 'expm1', 'flip', 'gt',
'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bartlett_window', 'blackman_window', 'hamming_window', 'histc',
'imag', 'ceil', 'lerp', 'log1p', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'var', 'unbind',
'trunc', 'true_divide', 'triu_indices', 'triu', 'tril', 'trapz', 'trapezoid', 'trace', 'tan', 'take', 'lt',
'ge', 'ne', 'le', 'reshape', 'reminder', 'result_type', 'real', 'reciprocal', 'neg',
'minimum', 'hann_window', 'dot', 'scatter', 'ger', 'addmm', 'BoolTensor', 'finfo', 'IntTensor',
'get_rng_state', 'set_rng_state', 'randint', 'randn_like', 'ByteTensor', 'index_select', 'allclose', 't',
'vstack', 'rsqrt', 'x2ms_round', 'acosh', 'addcmul', 'addcdiv', 'asinh', 'atanh', 'amax', 'amin',
'cummax', 'cummin', 'logsumexp', 'renorm', 'xlogy', 'sign', 'sinh', 'less', 'narrow', 'tensor_zeros_like',
'Size', 'DoubleTensor', 'cosine_similarity', 'block_diag', 'cholesky_solve', 'lu_solve', 'x2ms_any',
'greater', 'greater_equal', 'less_equal', 'not_equal', 'multiply', 'logspace', 'tril_indices', 'vander',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'column_stack', 'rad2deg', 'outer', 'negative', 'log10',
'count_nonzero', 'signbit', 'isposinf', 'isin', 'isneginf', 'copysign', 'deg2rad', 'diff', 'gcd',
'heaviside']
import numbers
import numpy as np
import mindspore
import mindspore.default_config
import mindspore.nn
import mindspore.numpy
import mindspore.dataset.transforms
import mindspore.ops.functional as F
from mindspore.nn import DistributedGradReducer
from mindspore.ops import composite
from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean, _get_parallel_mode)
from mindspore.common.parameter import ParameterTuple
from mindspore.context import ParallelMode
from .core.decorator import x2ms_func_decorator
from .torch_api.save_load import save, load, load_state_dict
from .torch_api import tensor_api
from .torch_api import autograd
from .third_party_adapter import math_api
from .torch_api.nn_api import nn_cell
from .torch_api.nn_api import nn_init
from .torch_api.nn_api import nn_functional
from .core.context import x2ms_context
from .utils.util_api import logger
from .torch_api.torch_base_api import arange, cat, cos, clamp, Device, from_numpy, flatten, \
LongTensor, matmul, mm, normal, ones, x2ms_pow, sin, tanh, x2ms_tensor, Tensor, zeros, split, as_tensor, dot, \
x2ms_sum, argmax, Generator, sigmoid, rand, floor, bernoulli, equal, randperm, var_mean, sqrt, stack, log, exp, \
typename, is_tensor, randn, FloatTensor, x2ms_max, x2ms_min, bmm, x2ms_abs, square, squeeze, unsqueeze, \
transpose, repeat_interleave, div, ones_like, where, tensordot, meshgrid, roll, linspace, full, empty, \
multinomial, gather, sort, topk, x2ms_all, cumsum, einsum, full_like, masked_select, x2ms_mean, mul, isfinite, \
diag, acos, add, argsort, asin, atan2, bincount, broadcast_tensors, chunk, conj, cosh, cross, cumprod, \
diagflat, x2ms_diagonal, zeros_like, atan, unique, nonzero, log2, cdist, erf, softmax, eye, prod, norm, \
lt, ge, eq, ne, le, reshape, reminder, result_type, real, reciprocal, neg, isinf, isnan, argmin, floor_divide, \
fmod, empty_like, erfc, erfinv, expm1, flip, gt, bitwise_and, bitwise_or, bitwise_xor, bartlett_window, \
blackman_window, hamming_window, histc, imag, ceil, lerp, log1p, logical_and, logical_not, logical_or, \
logical_xor, var, unbind, trunc, true_divide, triu_indices, triu, tril, trapz, trapezoid, trace, tan, take, \
minimum, hann_window, scatter, ger, addmm, BoolTensor, finfo, IntTensor, get_rng_state, set_rng_state, randint, \
randn_like, ByteTensor, index_select, allclose, t, vstack, rsqrt, x2ms_round, acosh, addcmul, addcdiv, asinh, \
atanh, amax, amin, cummax, cummin, logsumexp, renorm, xlogy, sign, sinh, less, narrow, tensor_zeros_like, Size, \
DoubleTensor, cosine_similarity, block_diag, cholesky_solve, lu_solve, x2ms_any, greater, greater_equal, \
less_equal, not_equal, multiply, logspace, tril_indices, vander, atleast_1d, atleast_2d, atleast_3d, \
column_stack, rad2deg, outer, negative, log10, count_nonzero, signbit, isposinf, isin, isneginf, copysign, \
deg2rad, diff, gcd, heaviside
# overwrite Magic methods
mindspore.Tensor.__and__ = tensor_api.tensor_and
mindspore.Tensor.__or__ = tensor_api.tensor_or
mindspore.Tensor.__format__ = tensor_api.tensor_format
mindspore.Tensor.__getitem__ = tensor_api.tensor_getitem
mindspore.Tensor.__matmul__ = tensor_api.matmul
mindspore.Tensor.__setitem__ = tensor_api.tensor_setitem
mindspore.Tensor.T = tensor_api.transpose_
mindspore.Tensor.__float__ = lambda t: float(t.asnumpy())
mindspore.Tensor.__int__ = lambda t: int(t.asnumpy())
mindspore.Parameter.__iadd__ = tensor_api.parameter_iadd
mindspore.Parameter.__isub__ = tensor_api.parameter_isub
mindspore.Parameter.__imul__ = tensor_api.parameter_imul
mindspore.Parameter.__idiv__ = tensor_api.parameter_idiv
# overwrite properties
mindspore.Tensor.is_cuda = tensor_api.is_cuda
mindspore.Tensor.data = tensor_api.property_data
mindspore.Tensor.device = tensor_api.property_device
mindspore.Parameter.grad = tensor_api.grad
mindspore.Parameter.grad = tensor_api.set_grad
@property
def parameter_data(self):
return self
@parameter_data.setter
def set_data(self, new_data):
self.set_data(new_data)
mindspore.Parameter.data = parameter_data
mindspore.Parameter.data = set_data
def _get_calculate_shape(obj, other):
if not isinstance(other, mindspore.Tensor):
return obj.shape
return np.broadcast_shapes(obj.shape, other.shape)
def _replace_tensor_calculate_func(origin_func_name, output_type=None):
origin_func = getattr(mindspore.Tensor, origin_func_name)
def new_func(obj, other):
if obj.dtype == mindspore.float64:
obj = obj.astype(mindspore.float32)
if isinstance(other, np.ndarray):
other = mindspore.Tensor(other, obj.dtype)
if obj.size == 0 or (isinstance(other, mindspore.Tensor) and other.size == 0):
if output_type is None:
return mindspore.ops.Zeros()(_get_calculate_shape(obj, other), obj.dtype)
else:
return mindspore.ops.Zeros()(_get_calculate_shape(obj, other), output_type)
return origin_func(obj, other)
setattr(mindspore.Tensor, origin_func_name, new_func)
for func_name in ("__add__", "__sub__", "__mul__", "__truediv__", "__mod__", "__pow__"):
_replace_tensor_calculate_func(func_name)
for func_name in ("__lt__", "__gt__", "__le__", "__ge__", "__eq__", "__ne__"):
_replace_tensor_calculate_func(func_name, mindspore.bool_)
class GraphTrainStep(mindspore.nn.TrainOneStepCell):
def __init__(self, network, optimizer):
super(GraphTrainStep, self).__init__(network, optimizer)
def call_construct(self, *inputs):
new_input = list(mindspore.Tensor(value, dtype=mindspore.float32)
if not isinstance(value, mindspore.Tensor)
else value
for value in inputs)
return self.__call__(*new_input)
def construct(self, *inputs):
output = self.network(*inputs)
loss = output[0]
model_output = output[1:]
sens = (F.fill(loss.dtype, loss.shape, self.sens),)
for output_value in model_output:
sens += self._get_sens(output_value)
grads = self.grad(self.network, self.weights)(*inputs, sens)
grads = self.grad_reducer(grads)
F.depend(loss, self.optimizer(grads))
return output
@staticmethod
def _get_sens(value):
if isinstance(value, mindspore.Tensor):
return (F.fill(value.dtype, value.shape, 0),)
if isinstance(value, list):
sens = []
for tensor in value:
sens.append(F.fill(tensor.dtype, tensor.shape, 0))
return (sens,)
if isinstance(value, tuple):
sens = ()
for tensor in value:
sens += (F.fill(tensor.dtype, tensor.shape, 0),)
return (sens,)
return (0,)
def add_module(obj, name, module):
setattr(obj, name, module)
classic_cell_init = mindspore.nn.Cell.__init__
def new_cell_init(self, auto_prefix=True, flags=None):
classic_cell_init(self, auto_prefix, flags)
self.training = True
# same name and inherit subclass api
mindspore.nn.Cell.add_module = add_module
mindspore.nn.Cell.__init__ = new_cell_init
mindspore.nn.Cell._modules = nn_cell._modules
@property
def is_floating_point(self):
return self in (mindspore.float16, mindspore.float32, mindspore.float64)
mindspore.dtype.typing.Number.is_floating_point = is_floating_point
def cuda_set_device(device):
pass
def is_cuda_available():
"""
Stub function for torch.cuda.is_available.
get the info from default_config.
"""
return True
def memory_cached():
return 0.0
def memory_reserved():
return 0.0
def max_memory_reserved(device=None):
return 0.0
def max_memory_allocated(device=None):
return 0.0
def memory_allocated(device=None):
return 0.0
def get_device():
return mindspore.context.get_context('device_target')
@x2ms_func_decorator(mindspore.nn.Cell)
def parameters(obj, *args, **kwargs):
return get_cell_params(obj, *args, **kwargs)
def get_cell_params(cell, recurse=True):
return iter(cell.trainable_params(recurse) + cell.untrainable_params(recurse))
@x2ms_func_decorator(mindspore.nn.Cell)
def named_parameters(model, prefix='', recurse=True):
return list(param for param in model.parameters_and_names(prefix, recurse))
@x2ms_func_decorator(mindspore.nn.Cell)
def named_modules(model, prefix=''):
return model.cells_and_names(prefix)
@x2ms_func_decorator(mindspore.nn.Cell)
def graph_forward(obj, *args, **kwargs):
return obj(*args, **kwargs)
@x2ms_func_decorator(mindspore.nn.Cell)
def forward(obj, *args, **kwargs):
return obj.construct(*args, **kwargs)
@x2ms_func_decorator(mindspore.nn.Cell)
def x2ms_train(obj, *args, **kwargs):
if len(obj.trainable_params()) > 0:
if obj not in x2ms_context.amp_model:
x2ms_context.amp_model.append(obj)
return obj.set_train(*args, **kwargs)
@x2ms_func_decorator(mindspore.nn.Cell)
def x2ms_eval(obj, *args, **kwargs):
return obj.set_train(False)
class TrainCellWithoutOptimizer(mindspore.nn.Cell):
def __init__(self, network, sens=1.0):
super(TrainCellWithoutOptimizer, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.model_weights = ParameterTuple(parameters(network))
self.grad = composite.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.parallel_flag = False
self.grad_reducer = F.identity
self.parallel_mode = _get_parallel_mode()
self.parallel_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL)
if self.parallel_flag:
self.grads_mean = _get_gradients_mean()
self.device_num = _get_device_num()
self.grad_reducer = DistributedGradReducer(self.model_weights, self.grads_mean, self.device_num)
def construct(self, *inputs):
train_loss = self.network(*inputs)
train_sens = F.fill(train_loss.dtype, train_loss.shape, self.sens)
grads = self.grad(self.network, self.model_weights)(*inputs, train_sens)
grads = self.grad_reducer(grads)
for i, parameter in enumerate(self.model_weights):
parameter.grad = grads[i]
return train_loss
_wrapped_model_dict = {}
def train_one_step_cell(model, optimizer=None):
key = id(model)
if key in _wrapped_model_dict.keys():
return _wrapped_model_dict.get(key)
if x2ms_context.amp_opt_level is None or x2ms_context.amp_model is None:
if optimizer is None:
wrapped_model = TrainCellWithoutOptimizer(model)
else:
wrapped_model = mindspore.nn.TrainOneStepCell(model, optimizer)
else:
if isinstance(x2ms_context.loss_scale, numbers.Number) and x2ms_context.amp_opt_level != "O2":
wrapped_model = mindspore.amp.build_train_network(model, optimizer, level=x2ms_context.amp_opt_level,
loss_scale_manager=mindspore.FixedLossScaleManager(
x2ms_context.loss_scale))
else:
wrapped_model = mindspore.amp.build_train_network(model, optimizer, level=x2ms_context.amp_opt_level)
_wrapped_model_dict[key] = wrapped_model
return wrapped_model
def graph_train_one_step_cell(model, optimizer):
key = id(model)
if key in _wrapped_model_dict.keys():
return _wrapped_model_dict.get(key)
if x2ms_context.amp_opt_level is None or x2ms_context.amp_model is None:
wrapped_model = GraphTrainStep(model, optimizer)
else:
raise NotImplementedError("Graph mode does not currently support Mixed precision")
_wrapped_model_dict[key] = wrapped_model
return wrapped_model
def load_state_dict_from_url(url, model_dir=None, map_location=None, progress=True, check_hash=False):
"""
Current not support 'model_dir', 'map_location', 'progress', 'check_hash' parameter.
"""
logger.warning("Not support load_state_dict_from_url now")
return {}
def to(obj, *args, **kwargs):
if isinstance(obj, mindspore.nn.Cell):
return _cell_to(obj, *args, **kwargs)
elif isinstance(obj, mindspore.Tensor):
return _tensor_to(obj, *args, **kwargs)
else:
return obj.to(*args, **kwargs)
def _cell_to(obj, *args, **kwargs):
if args:
param = args[0]
if isinstance(param, mindspore.Type) and param in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=param)
if isinstance(param, mindspore.Tensor) and param.dtype in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=param.dtype)
if len(args) > 1:
param = args[1]
if param in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=param)
if 'dtype' in kwargs.keys() and kwargs['dtype'] in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=kwargs['dtype'])
if 'other' in kwargs.keys() and kwargs['other'].dtype in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=kwargs['other'])
return obj
def _tensor_to(obj, *args, **kwargs):
if args:
param = args[0]
if isinstance(param, mindspore.common.Type):
return obj.astype(dtype=param)
if isinstance(param, mindspore.Tensor):
return obj.astype(dtype=param.dtype)
if len(args) > 1:
return obj.astype(dtype=args[1])
if 'dtype' in kwargs.keys():
return obj.astype(dtype=kwargs['dtype'])
if 'other' in kwargs.keys():
return obj.astype(dtype=kwargs['other'].dtype)
return obj
def get_device_properties(device):
return CUDADeviceProperty()
def convert_sync_batchnorm(module, process_group=None):
return module
class CUDADeviceProperty:
def __init__(self):
device_target = mindspore.context.get_context('device_target')
device_id = mindspore.context.get_context('device_id')
self.name = f'{device_target}:{device_id}'
self.total_memory = 0
|
Gufrannn/W-MAE
|
MindSpore/x2ms_adapter/__init__.py
|
__init__.py
|
py
| 16,986 |
python
|
en
|
code
| 12 |
github-code
|
6
|
17908940134
|
# Chapter 1
spam_amount = 0
print(spam_amount)
spam_amount += 4
if spam_amount > 0 :
print("But I don't want Any spam!")
viking_song = "spam " * spam_amount # spam spam spam spam
#viking_song = "spam " + spam_amount
print(viking_song)
print(spam_amount*4) # 16
print(float(str(spam_amount)*4)*4) #17776.0
print(type(spam_amount))
print(5/2) # 2.5
print(5//2) # 2
print(5%2) # 1
print(5**2) # 25
print(min(1,2,3,4,5)) #1
print(max(1,2,3,4,5)) #5
print(abs(-32)) #32
a = [1,2,3]
b = [3,2,1]
temp=a; a=b; b=temp
print(a)
a,b = b,a #미쳤다
print(a)
#Chapter 2 : help & function
help(round)
help(round(-2.01)) #help(int)
help(print)
def least_difference(a, b, c) :
""" Return the smallest diferrence between any two numbers
among a, b, and c.
>>> least_difference(1,5,-5)
4
"""
diff1 = abs(a-b);
diff2 = abs(b-c);
diff3 = abs(a-c);
return min(diff1, diff2, diff3)
print(
least_difference(1,10,100),
least_difference(1,10,10),
least_difference(5,6,7),
)
help(least_difference)
print(1,2,3, sep=' < ') # default sep = ' '
def greet(who="Colin") :
print("Hello, ", who)
greet() #default who="Colin"
greet(who="Kaggle")
greet("world")
def multy_by_five(x) :
return 5 * x
def call(fn, arg) :
"""Call fn on arg"""
return fn(arg)
def squared_call(fn,arg) :
return fn(fn(arg))
print (
call(multy_by_five, 1),
squared_call(multy_by_five, 1),
sep='\n',
)
def mod_5(x) :
"""Return the remainder of x after dividing by 5"""
return x % 5
print (
max(100,51,14), # 100
max(100,51,14, key=mod_5), # 각 val의 mod_5한 값 중 max인 val 14
sep='\n',
)
print(round(511123,-2)) # 100미만 절사
from time import time
t=time()
print(t,"seconds since the Epoch")
from time import sleep
duration =5
print("Getting sleepy. See you in",duration, "seconds")
sleep(duration)
print("I'm back")
|
data-droid/study
|
kaggleLearn/python.py
|
python.py
|
py
| 1,826 |
python
|
en
|
code
| 6 |
github-code
|
6
|
35413864969
|
import multiprocessing
from multiprocessing import Process
from cleanup import TwitterCleanuper
from preprocessing import TwitterData
from word2vec import Word2VecProvider
import pandas as pd
def preprocess(results, data_path, is_testing, data_name, min_occurrences=5, cache_output=None):
twitter_data = TwitterData()
twitter_data.initialize(data_path, is_testing)
twitter_data.build_features()
twitter_data.cleanup(TwitterCleanuper())
twitter_data.tokenize()
twitter_data.build_wordlist(min_occurrences=min_occurrences)
twitter_data.build_data_model()
# twitter_data.build_ngrams()
# twitter_data.build_ngram_model()
# twitter_data.build_data_model(with_ngram=2)
word2vec = Word2VecProvider()
word2vec.load("/home/mike/Desktop/glove.twitter.27B.200d.txt")
twitter_data.build_word2vec_model(word2vec)
if cache_output is not None:
twitter_data.data_model.to_csv(cache_output, index_label="idx", float_format="%.6f")
results[data_name] = twitter_data.data_model
def preprare_data(min_occurrences):
import os
training_data = None
testing_data = None
print("Loading data...")
test_data_file_name = "data/processed_test_word2vec_bow_" + str(min_occurrences) + ".csv"
train_data_file_name = "data/processed_train_word2vec_bow_" + str(min_occurrences) + ".csv"
use_cache = os.path.isfile(train_data_file_name) and os.path.isfile(
test_data_file_name)
if use_cache:
training_data = TwitterData()
training_data.initialize(None, from_cached=train_data_file_name)
training_data = training_data.data_model
testing_data = TwitterData()
testing_data.initialize(None, from_cached=test_data_file_name)
testing_data = testing_data.data_model
print("Loaded from cached files...")
else:
print("Preprocessing data...")
with multiprocessing.Manager() as manager:
results = manager.dict()
preprocess_training = Process(target=preprocess, args=(
results, "data/train.csv", False, "train", min_occurrences, train_data_file_name,))
preprocess_testing = Process(target=preprocess, args=(
results, "data/train.csv", True, "test", min_occurrences, test_data_file_name,))
preprocess_training.start()
preprocess_testing.start()
print("Multiple processes started...")
preprocess_testing.join()
print("Preprocessed testing data...")
preprocess_training.join()
print("Preprocessed training data...")
training_data = results["train"]
testing_data = results["test"]
print("Data preprocessed & cached...")
return training_data, testing_data
class TwitterData( TwitterData_ExtraFeatures ):
def build_final_model (self, word2vec_provider, stopwords=nltk.corpus.stopwords.words( "english" )):
whitelist = self.whitelist
stopwords = list( filter( lambda sw: sw not in whitelist, stopwords ) )
extra_columns = [col for col in self.processed_data.columns if col.startswith( "number_of" )]
similarity_columns = ["bad_similarity", "good_similarity", "information_similarity"]
label_column = []
if not self.is_testing:
label_column = ["label"]
columns = label_column + ["original_id"] + extra_columns + similarity_columns + list(
map( lambda i: "word2vec_{0}".format( i ), range( 0, word2vec_provider.dimensions ) ) ) + list(
map( lambda w: w + "_bow", self.wordlist ) )
labels = []
rows = []
for idx in self.processed_data.index:
current_row = []
if not self.is_testing:
# add label
current_label = self.processed_data.loc[idx, "emotion"]
labels.append( current_label )
current_row.append( current_label )
current_row.append( self.processed_data.loc[idx, "id"] )
for _, col in enumerate( extra_columns ):
current_row.append( self.processed_data.loc[idx, col] )
# average similarities with words
tokens = self.processed_data.loc[idx, "tokenized_text"]
for main_word in map( lambda w: w.split( "_" )[0], similarity_columns ):
current_similarities = [abs( sim ) for sim in
map( lambda word: word2vec_provider.get_similarity( main_word, word.lower() ),
tokens ) if
sim is not None]
if len( current_similarities ) <= 1:
current_row.append( 0 if len( current_similarities ) == 0 else current_similarities[0] )
continue
max_sim = max( current_similarities )
min_sim = min( current_similarities )
current_similarities = [((sim - min_sim)/(max_sim - min_sim)) for sim in
current_similarities] # normalize to <0;1>
current_row.append( np.array( current_similarities ).mean() )
# add word2vec vector
tokens = self.processed_data.loc[idx, "tokenized_text"]
current_word2vec = []
for _, word in enumerate( tokens ):
vec = word2vec_provider.get_vector( word.lower() )
if vec is not None:
current_word2vec.append( vec )
averaged_word2vec = list( np.array( current_word2vec ).mean( axis=0 ) )
current_row += averaged_word2vec
# add bag-of-words
tokens = set( self.processed_data.loc[idx, "text"] )
for _, word in enumerate( self.wordlist ):
current_row.append( 1 if word in tokens else 0 )
rows.append( current_row )
self.data_model = pd.DataFrame( rows, columns=columns )
self.data_labels = pd.Series( labels )
return self.data_model, self.data_labels
def log(text):
print(text)
with open("log.txt", "a") as log_file:
log_file.write(str(text) + "\n")
if __name__ == "__main__":
def main():
for m in range( 3, 4 ):
print("Preparing data with min_occurrences=" + str( m ))
training_data, testing_data = preprare_data( m )
log( "********************************************************" )
log( "Validating for {0} min_occurrences:".format( m ) )
# drop idx & id columns
# if training_data.columns[0] == "idx":
# training_data = training_data.iloc[:, 1:]
#
# if testing_data.columns[0] == "idx":
# testing_data = testing_data.iloc[:, 1:]
#
# if "original_id" in training_data.columns:
# training_data.drop( "original_id", axis=1, inplace=True )
#
# if "original_id" in testing_data.columns:
# testing_data.drop( "original_id", axis=1, inplace=True )
td = TwitterData()
td.initialize( "data\\train.csv" )
td.build_features()
td.cleanup( TwitterCleanuper() )
td.tokenize()
td.stem()
td.build_wordlist()
td.build_final_model( word2vec )
td.data_model.head( 5 )
print("Done!")
main()
|
michal0janczyk/information_diffusion
|
fuzzy_logic/word_2_vectors/main.py
|
main.py
|
py
| 7,484 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9373814970
|
'''
다음은 ISLR패키지의 Carseats 데이터 세트이다.
매출(Sales)의 이상값을 제외한 데이를 훈련 데이터로 선정할 때
Age의 표준편차를 구하시오.
(이상 값은 평균보다 1.5표준편차이하이거나 이상인 값으로 선정한다.
'''
import pandas as pd
data = 'Carseats.csv'
df = pd.read_csv(data)
#print(df)
#print(df.info())
#print(df.describe())
chk = df.Sales.std()
#print(chk)
train = df[(df.Sales < (df.Sales.mean() + chk*1.5)) & (df.Sales > (df.Sales.mean() - chk*1.5))]
ans = train.Age.std()
print(ans)
'''
다음은 MASS 패키지의 Cars93 데이터 세트이다.
Luggage.room의 결측값을 중앙값으로 변환한 후
변환 전, 후 평균의 차이를 구하시오.
'''
import pandas as pd
data = 'Cars93.csv'
df = pd.read_csv(data)
#print(df)
#print(df.info())
#print(df.describe())
before = df['Luggage.room'].mean()
df.loc[df['Luggage.room'].isnull(), 'Luggage.room'] = df['Luggage.room'].fillna(df['Luggage.room'].median())
after = df['Luggage.room'].mean()
ans = abs(before - after)
print(ans)
'''
다음은 Covid19의 TimeAge데이터 세트이다.
연령(age)이 20대(20s)인 확진자(confirmed)의 평균과
50대(50s)인 확진자(confirmed) 평균의 차이를 구하시오.
'''
import pandas as pd
data = 'TimeAge.csv'
df = pd.read_csv(data)
#print(df)
#print(df.info())
#print(df.describe())
chk = df.groupby('age').mean().reset_index()
#print(chk)
ans = chk[chk.age == '20s'].confirmed.values[0] - chk[chk.age == '50s'].confirmed.values[0]
print(ans)
|
JoinNova/PracticeBigdata_Python
|
0143.py
|
0143.py
|
py
| 1,594 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
71844650429
|
from django.conf import settings
from django.urls import NoReverseMatch, reverse
def get_slug_or_pk(object, slug_field=None):
res = dict()
field = slug_field if hasattr(object, slug_field) else "pk"
if object:
param = "slug" if hasattr(object, slug_field) else "pk"
res.update({param: getattr(object, field)})
return res
def get_urls_of_site(site, object=None, user=None):
urls = {}
app = site.model._meta.app_label
model = site.model._meta.model_name
kwargs = get_slug_or_pk(object, slug_field=site.slug_field)
for action, perm in (("list", "view"), ("create", "add")):
try:
url_name = site.get_url_name(action)
if not user:
urls.update({action: reverse(url_name)})
elif user.has_perm(f"{app}.{perm}_{model}"):
urls.update({action: reverse(url_name)})
except NoReverseMatch:
if settings.DEBUG:
print("DEBUG: Url not found: %s" % url_name)
if not kwargs:
return urls
for action, perm in (
("update", "change"),
("detail", "view"),
("delete", "delete"),
):
try:
url_name = site.get_url_name(action)
if not user:
urls.update({action: reverse(url_name, kwargs=kwargs)})
elif user.has_perm(f"{app}.{perm}_{model}"):
urls.update({action: reverse(url_name, kwargs=kwargs)})
except NoReverseMatch:
if settings.DEBUG:
print("DEBUG: Url not found: %s" % url_name)
return urls
|
dbsiavichay/faclab
|
viewpack/shortcuts.py
|
shortcuts.py
|
py
| 1,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3539820868
|
"""
Name : portfolio_optimizer.py
Author : Yinsen Miao
Contact : [email protected]
Time : 7/21/2021
Desc: Solve mean-variance optimization
"""
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from gerber import gerber_cov_stat1, gerber_cov_stat2
from ledoit import ledoit
def set_eps_wgt_to_zeros(in_array, eps=1e-4):
# set small weights to 0 and return a list
out_array = np.array(in_array)
out_array[np.abs(in_array) < eps] = 0
out_array = np.array(out_array) / np.sum(out_array)
return out_array
class portfolio_optimizer:
def __init__(self, min_weight: float = 0., max_weight: float = 1.0,
cov_function: str = "HC",
freq: str = "monthly",
gs_threshold: float = 0.5):
"""
:param min_weight:
:param max_weight:
:param cov_function: can be one of the HC (historical covariance matrix), GS1 (Gerber Stat1), GS2 (Gerber Stat2)
:param freq: frequency of the returns series either daily or monthly
:param gs_threshold: threshold of Gerber statistics between 0 and 1
"""
# check arguments
assert cov_function in ['HC', 'GS1', 'GS2', 'SM'], "The covariance function must be one from HC, SM, GS1, and GS2"
assert freq in ['daily', 'monthly'], "The return series can only be either daily or monthly"
assert 1 > min_weight >= 0, "The minimal weight shall be in [0, 1)"
assert 1 >= max_weight > 0, "The maximum weight shall be in (0, 1]"
assert 1 >= gs_threshold > 0, "The Gerber shrinkage threshold shall be in (0, 1]"
self.min_weight = min_weight
self.max_weight = max_weight
self.factor = 252 if freq == "daily" else 12 # annual converter
self.cov_function = cov_function # covariance function can be one of HC, GS1, GS2
self.freq = freq # freq of return series can be either daily or monthly
self.init_weights = None # initial portfolio weights
self.covariance = None
self.returns_df = None
self.negative_returns_df = None
self.covariance_neg = None # covariance matrix of only negative returns for sortino ratio
self.obj_function = None
self.by_risk = None
self.gs_threshold = gs_threshold
def set_returns(self, returns_df: pd.DataFrame):
"""
pass the return series to the class
:param returns_df: pd.DataFrame of historical daily or monthly returns
"""
self.returns_df = returns_df.copy(deep=True)
self.negative_returns_df = returns_df[returns_df < 0].fillna(0) # keep only the negative returns
def optimize(self, obj_function: str,
target_std: float = None,
target_return: float = None,
prev_weights: np.array = None,
init_weights: np.array = None,
cost: float = None) -> np.array:
"""
Perform portfolio optimization given a series of returns
:param obj_function:
:param target_std: targeted annaulized portfolio standard deviation (std)
:param target_return: targeted annaulized portfolio return deviation
:param prev_weights: previous weights
:param prices: current price level when we rebalance our portfolio
:param cost: cost of transaction fee and slippage in bps or 0.01%
:return: an array of portfolio weights p x 1
"""
n, p = self.returns_df.shape # n is number of observations, p is number of assets
if init_weights is None:
self.init_weights = np.array(p * [1. / p]) # initialize weights: equal weighting
else:
self.init_weights = init_weights # otherwise use the nearby weights as hot start for MVO
self.obj_function = obj_function
# get covariance matrix
if self.cov_function == "HC":
self.covariance = self.returns_df.cov().to_numpy() # convert to numpy
self.covariance_neg = self.negative_returns_df.cov().to_numpy() # convert to numpy
elif self.cov_function == "SM":
self.covariance, _ = ledoit(self.returns_df.values)
self.covariance_neg, _ = ledoit(self.negative_returns_df.values)
elif self.cov_function == "GS1":
self.covariance, _ = gerber_cov_stat1(self.returns_df.values, threshold=self.gs_threshold)
self.covariance_neg, _ = gerber_cov_stat1(self.negative_returns_df.values, threshold=self.gs_threshold)
elif self.cov_function == "GS2":
self.covariance, _ = gerber_cov_stat2(self.returns_df.values, threshold=self.gs_threshold)
self.covariance_neg, _ = gerber_cov_stat2(self.negative_returns_df.values, threshold=self.gs_threshold)
# set objective function
if obj_function == "equalWeighting":
self.init_weights = np.array(p * [1. / p]) # initialize weights: equal weighting
return self.init_weights
# set the bounds of each asset holding from 0 to 1
bounds = tuple((self.min_weight, self.max_weight) for k in range(p))
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1.0}] # fully invest
if obj_function == 'meanVariance':
if target_std is not None:
self.by_risk = True
# optimize under risk constraint
constraints.append({'type': 'eq', 'fun': lambda weights: \
self.calc_annualized_portfolio_std(weights) - target_std})
else:
# optimize under return constraint
self.by_risk = False
constraints.append({'type': 'eq', 'fun': lambda weights: \
self.calc_annualized_portfolio_return(weights) - target_return})
if prev_weights is not None and cost is not None:
# cost function with transaction fee
cost_fun = lambda weights: self.object_function(weights) +\
np.abs(weights - prev_weights).sum() * cost / 10000.
else:
# cost function without any transaction fee
cost_fun = lambda weights: self.object_function(weights)
# trust-constr, SLSQP, L-BFGS-B
try:
opt = minimize(cost_fun, x0=self.init_weights, bounds=bounds, constraints=constraints, method="SLSQP")
except:
# if SLSQP fails then switch to trust-constr
opt = minimize(cost_fun, x0=self.init_weights, bounds=bounds, constraints=constraints, method="trust-constr")
return set_eps_wgt_to_zeros(opt['x']) # pull small values to zeros
def object_function(self, weights: np.array) -> float:
"""
:param weights: current weights to be optimized
"""
if self.obj_function == "maxReturn":
f = self.calc_annualized_portfolio_return(weights)
return -f
elif self.obj_function == "minVariance":
f = self.calc_annualized_portfolio_std(weights)
return f
elif self.obj_function == "meanVariance" and self.by_risk:
f = self.calc_annualized_portfolio_return(weights) # maximize target return level
return -f
elif self.obj_function == "meanVariance" and not self.by_risk:
f = self.calc_annualized_portfolio_std(weights) # minimize target risk or std level
return f
elif self.obj_function == "maxSharpe":
f = self.calc_annualized_portfolio_sharpe_ratio(weights)
return -f
elif self.obj_function == "maxSortino":
f = self.calc_annualized_sortino_ratio(weights)
return -f
elif self.obj_function == 'riskParity':
f = self.calc_risk_parity_func(weights)
return f
else:
raise ValueError("Object function shall be one of the equalWeighting, maxReturn, minVariance, " +
"meanVariance, maxSharpe, maxSortino or riskParity")
def calc_annualized_portfolio_return(self, weights: np.array) -> float:
# calculate the annualized standard returns
annualized_portfolio_return = float(np.sum(self.returns_df.mean() * self.factor * weights))
#float(np.sum(((1 + self.returns_df.mean()) ** self.factor - 1) * weights))
return annualized_portfolio_return
def calc_annualized_portfolio_std(self, weights: np.array) -> float:
if self.obj_function == "equalWeighting":
# if equal weight then set the off diagonal of covariance matrix to zero
annualized_portfolio_std = np.sqrt(np.dot(weights.T, np.dot(np.diag(self.covariance.diagonal()) * self.factor, weights)))
else:
temp = np.dot(weights.T, np.dot(self.covariance * self.factor, weights))
if temp <= 0:
temp = 1e-20 # set std to a tiny number
annualized_portfolio_std = np.sqrt(temp)
if annualized_portfolio_std <= 0:
raise ValueError('annualized_portfolio_std cannot be zero. Weights: {weights}')
return annualized_portfolio_std
def calc_annualized_portfolio_neg_std(self, weights: np.array) -> float:
if self.obj_function == "equalWeighting":
# if equal weight then set the off diagonal of covariance matrix to zero
annualized_portfolio_neg_std = np.sqrt(np.dot(weights.T, np.dot(np.diag(self.covariance_neg.diagonal()) * self.factor, weights)))
else:
annualized_portfolio_neg_std = np.sqrt(np.dot(weights.T, np.dot(self.covariance_neg * self.factor, weights)))
if annualized_portfolio_neg_std == 0:
raise ValueError('annualized_portfolio_std cannot be zero. Weights: {weights}')
return annualized_portfolio_neg_std
def calc_annualized_portfolio_moments(self, weights: np.array) -> tuple:
# calculate the annualized portfolio returns as well as its standard deviation
return self.calc_annualized_portfolio_return(weights), self.calc_annualized_portfolio_std(weights)
def calc_annualized_portfolio_sharpe_ratio(self, weights: np.array) -> float:
# calculate the annualized Sharpe Ratio
return self.calc_annualized_portfolio_return(weights) / self.calc_annualized_portfolio_std(weights)
def calc_annualized_sortino_ratio(self, weights: np.array) -> float:
# calculate the annualized Sortino Ratio
return self.calc_annualized_portfolio_return(weights) / self.calc_annualized_portfolio_neg_std(weights)
def calc_risk_parity_func(self, weights):
# Spinu formulation of risk parity portfolio
assets_risk_budget = self.init_weights
portfolio_volatility = self.calc_annualized_portfolio_std(weights)
x = weights / portfolio_volatility
risk_parity = (np.dot(x.T, np.dot(self.covariance * self.factor, x)) / 2.) - np.dot(assets_risk_budget.T, np.log(x + 1e-10))
return risk_parity
def calc_relative_risk_contributions(self, weights):
# calculate the relative risk contributions for each asset given returns and weights
rrc = weights * np.dot(weights.T, self.covariance) / np.dot(weights.T, np.dot(self.covariance, weights))
return rrc
# unitest the code
if __name__ == "__main__":
bgn_date = "2016-01-01"
end_date = "2020-01-01"
file_path = "../data/prcs.csv"
rets_df = pd.read_csv(file_path, parse_dates=['Date'], index_col=["Date"]).pct_change()[bgn_date: end_date]
rets = rets_df.values
# test objective function list
obj_function_list = ['equalWeighting', 'minVariance', 'maxReturn', 'maxSharpe', 'maxSortino', 'riskParity']
cov_function_list = ["HC", "SM", "GS1", "GS2"]
for cov_fun in cov_function_list:
print("MVO based on %s covariance function ..." % cov_fun)
port_opt = portfolio_optimizer(min_weight=0, max_weight=1, cov_function=cov_fun, freq="monthly")
port_opt.set_returns(returns_df=rets_df)
# run MVO under various optimization goals
for obj_fun_str in obj_function_list:
weights = port_opt.optimize(obj_fun_str)
ret, std = port_opt.calc_annualized_portfolio_moments(weights=weights)
sharpe = port_opt.calc_annualized_portfolio_sharpe_ratio(weights=weights)
sortino = port_opt.calc_annualized_sortino_ratio(weights=weights)
print("%20s: ret %.3f, std %.3f, Sharpe %.3f, Sortino %.3f" % (obj_fun_str, ret, std, sharpe, sortino))
obj_fun_str = "meanVariance"
# optimize for target std levels
target_stds = [3, 6, 9, 12, 15]
for target_std in target_stds:
weights = port_opt.optimize(obj_fun_str, target_std / 100.)
# print(weights)
ret, std = port_opt.calc_annualized_portfolio_moments(weights=weights)
sharpe = port_opt.calc_annualized_portfolio_sharpe_ratio(weights=weights)
sortino = port_opt.calc_annualized_sortino_ratio(weights=weights)
print("%20s (%02d%%): ret %.3f, std %.3f, Sharpe %.3f, Sortino %.3f" % (
obj_fun_str, target_std, ret, std, sharpe, sortino))
|
yinsenm/gerber
|
src/portfolio_optimizer.py
|
portfolio_optimizer.py
|
py
| 13,193 |
python
|
en
|
code
| 49 |
github-code
|
6
|
37283667870
|
# Importing modules
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import joblib
import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark.ml import PipelineModel
from pyspark.sql.functions import *
# Configure spark session
spark = SparkSession\
.builder\
.master('local[2]')\
.appName('quake_etl')\
.config('spark.jars.package', 'org.mongodb.spark:mongo-spark-connector 2.12:2.4.1')\
.getOrCreate()
from bokeh.io import output_notebook, output_file
from bokeh.plotting import figure, show, ColumnDataSource
from bokeh.models.tools import HoverTool
import math
from math import pi
from bokeh.palettes import Category20c
from bokeh.transform import cumsum
from bokeh.tile_providers import CARTODBPOSITRON, get_provider, Vendors
from bokeh.themes import built_in_themes
from bokeh.io import curdoc
import warnings
warnings.filterwarnings('ignore')
from pyspark.sql.functions import desc
df=joblib.load('./joblibs/df.joblib')
data=spark.createDataFrame(df)
df_quake_freq=joblib.load('./visualization_files/df_quake_freq.joblib')
df_pred=joblib.load('./visualization_files/rffpred.joblib')
def svm(a):
clf=joblib.load('./joblibs/svmModel.joblib')
a=np.array([a])
y_pred_svm=clf.predict(a)
return y_pred_svm[0]/10
def dt(a):
a=np.array([a])
dc=joblib.load('./joblibs/dtModel.joblib')
y_pred_dc=dc.predict(a)
return y_pred_dc[0]
def rf(a):
pipe=PipelineModel.load("./joblibs/rfmodel.model_v0")
ip = pd.DataFrame(np.array([a]))
ip.columns=['Latitude','Longitude','Depth']
dip=spark.createDataFrame(ip)
pred_results_RF = pipe.transform(dip)
return pred_results_RF.collect()[0][4]
def knn(a):
from sklearn.neighbors import KNeighborsRegressor
#Seperating X and y
X=df[['Year','Latitude','Longitude','Depth']]
y=df[['Magnitude']]
kneigh=KNeighborsRegressor(n_neighbors = 5)
kneigh.fit(X, y.values.ravel())
a=np.array([a])
y_pred_knn=kneigh.predict(a)
return y_pred_knn[0]
def style(p):
p.title.align='center'
p.title.text_font_size = '20pt'
p.title.text_font = 'serif'
p.xaxis.axis_label_text_font_size = '14pt'
p.xaxis.axis_label_text_font_style= 'bold'
p.yaxis.axis_label_text_font_size = '14pt'
p.yaxis.axis_label_text_font_style= 'bold'
p.xaxis.major_label_text_font_size = '12pt'
p.yaxis.major_label_text_font_size = '12pt'
p.legend.location = 'top_left'
return p
def showmap():
# Earthquakein Map Representation
df_quakes_2016 = data[data['Year']==2016]
df_quakes_2016=df_quakes_2016.toPandas()
def plotMap():
lat = df_quakes_2016['Latitude'].values.tolist()
lon = df_quakes_2016['Longitude'].values.tolist()
pred_lat = df_pred['Latitude'].values.tolist()
pred_lon = df_pred['Longitude'].values.tolist()
lst_lat = []
lst_lon = []
lst_pred_lat = []
lst_pred_lon = []
i=0
j=0
for i in range (len(lon)):
r_major = 6378137.000
x = r_major * math.radians(lon[i])
scale = x/lon[i]
y = 180.0/math.pi * math.log(math.tan(math.pi/4.0 +
lat[i] * (math.pi/180.0)/2.0)) * scale
lst_lon.append(x)
lst_lat.append(y)
i += 1
for j in range (len(pred_lon)):
r_major = 6378137.000
x = r_major * math.radians(pred_lon[j])
scale = x/pred_lon[j]
y = 180.0/math.pi * math.log(math.tan(math.pi/4.0 +
pred_lat[j] * (math.pi/180.0)/2.0)) * scale
lst_pred_lon.append(x)
lst_pred_lat.append(y)
j += 1
df_quakes_2016['coords_x'] = lst_lat
df_quakes_2016['coords_y'] = lst_lon
df_pred['coords_x'] = lst_pred_lat
df_pred['coords_y'] = lst_pred_lon
df_quakes_2016['Mag_Size'] = df_quakes_2016['Magnitude'] * 4
df_pred['Mag_Size'] = df_pred['Pred_Magnitude'] * 4
lats = df_quakes_2016['coords_x'].tolist()
longs = df_quakes_2016['coords_y'].tolist()
mags = df_quakes_2016['Magnitude'].tolist()
years = df_quakes_2016['Year'].tolist()
mag_size = df_quakes_2016['Mag_Size'].tolist()
pred_lats = df_pred['coords_x'].tolist()
pred_longs = df_pred['coords_y'].tolist()
pred_mags = df_pred['Pred_Magnitude'].tolist()
pred_year = df_pred['Year'].tolist()
pred_mag_size = df_pred['Mag_Size'].tolist()
cds = ColumnDataSource(
data=dict(
lat=lats,
lon=longs,
mag=mags,
year=years,
mag_s=mag_size
)
)
pred_cds = ColumnDataSource(
data=dict(
pred_lat=pred_lats,
pred_long=pred_longs,
pred_mag=pred_mags,
year=pred_year,
pred_mag_s=pred_mag_size
)
)
TOOLTIPS = [( "Year", "@year"), ("Magnitude", "@mag"),("Predicted Magnitude", "@pred_mag")
]
p = figure(title = 'Earthquake Map',
plot_width=2300, plot_height=450,
x_range=(-2000000, 6000000),
y_range=(-1000000, 7000000),
tooltips=TOOLTIPS)
p.circle(x='lon', y='lat', size='mag_s', fill_color='#cc0000', fill_alpha=0.7,
source=cds, legend='Quakes 2016')
p.circle(x='pred_long', y='pred_lat', size='pred_mag_s', fill_color='#ccff33', fill_alpha=7.0,
source=pred_cds, legend='Predicted Quakes 2017')
tile_provider = get_provider(Vendors.CARTODBPOSITRON)
p.add_tile(tile_provider)
p.title.align='center'
p.title.text_font_size='20pt'
p.title.text_font='serif'
p.legend.location='bottom_right'
p.legend.background_fill_color='black'
p.legend.background_fill_alpha=0.8
p.legend.click_policy='hide'
p.legend.label_text_color='white'
p.xaxis.visible=False
p.yaxis.visible=False
p.axis.axis_label=None
p.axis.visible=False
p.grid.grid_line_color=None
show(p)
df_quakes_2016['Magnitude']
plotMap()
def freqgraph():
#Frequency of Earthquake By Year
def plotBar():
cds = ColumnDataSource(data=dict(
yrs= df_quake_freq[ 'Year'].values.tolist(),
numQuakes = df_quake_freq['Counts'].values.tolist()))
TOOLTIPS =[ ('Number of earthquakes','@numQuakes'),('Year','@yrs')]
barChart = figure(title='Frequency of Earthquakes by Year',
plot_height=400,
plot_width=1150,
x_axis_label='Years',
y_axis_label='Number of Occurances',
x_minor_ticks=2,
y_range=(0, df_quake_freq['Counts'].max() +100),
toolbar_location=None,
tooltips=TOOLTIPS)
print(cds)
barChart.vbar (x='yrs', bottom=0, top='numQuakes',
color='#cc0000', width=0.75,
legend='Year', source=cds)
barChart = style(barChart)
show(barChart)
return barChart
plotBar()
def maggraph():
def plotMagnitude():
cds= ColumnDataSource(data=dict(
yrs = df_quake_freq[ 'Year'].sort_values().values.tolist(),
avg_mag = df_quake_freq['avg(Magnitude)'].round(1).values.tolist(),
max_mag= df_quake_freq [ 'max(Magnitude)'].values.tolist()))
TOOLTIPS = [('Year', '@yrs'),('avg(Magnitude)', '@avg_mag'),('max(Magnitude)','@max_mag')]
mp = figure(title='Maximum and Average Magnitude by Year',
plot_width=1150,
plot_height=400,
x_axis_label='Years',
y_axis_label='Magnitude', y_range=(5, df_quake_freq[ 'max(Magnitude)'].max() + 1),
x_minor_ticks=2,
toolbar_location=None,
tooltips= TOOLTIPS)
mp.line(x='yrs', y='max_mag', color='#cc0000', line_width=2, legend= 'Max Magnitude', source=cds)
mp.circle(x='yrs', y='max_mag', color='#cc0000', size=8, fill_color='#cc0000', source=cds)
mp.line(x='yrs', y='avg_mag', color='yellow', line_width=2, legend = 'Avg Magnitude', source=cds)
mp.circle(x='yrs', y='avg_mag', color='yellow', size=8, fill_color='yellow', source=cds)
mp =style(mp)
show(mp)
return mp
plotMagnitude()
|
ramsundar07/Earthquake-Detetection-Analysis-using-Machine-Learning-Algorithms
|
GUI/gui_algorithm.py
|
gui_algorithm.py
|
py
| 8,670 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28969789667
|
import machine
import picoweb
import ujson
import ulogging as logging
import ure as re
import utime
from common import config
app = picoweb.WebApp(__name__)
hooks = {}
CONFIGURE_DEVICE_HOOK = 'CONFIGURE_WIFI'
CONFIGURE_AWS_HOOK = 'CONFIGURE_AWS'
CONFIGURE_SENSOR_HOOK = "CONFIGURE_SENSOR"
GET_STATUS_HOOK = 'GET_STATUS'
# API helpers
def create_success_response(data: dict):
return _create_common_response(data=data, status=0, status_text='ok')
def _create_common_response(data, status: int, status_text: str):
response_dict = {
'data': data,
'status': status,
'status_text': status_text,
}
encoded = ujson.dumps(response_dict)
return encoded
def create_failed_response(resp, status_text: str, status: int = 500):
return _create_common_response(data=None, status=status, status_text=status_text)
# modified picoweb's req.read_form_data:
def parse_post_body(req):
size = int(req.headers[b"Content-Length"])
data = yield from req.reader.readexactly(size)
data_txt = data.decode('utf-8')
return ujson.loads(data_txt)
# Requests handling
@app.route("/status")
def get_status(req, resp):
data = {"timestamp": utime.time()}
status = hooks[GET_STATUS_HOOK]()
for key in status.keys():
data[key] = status[key]
encoded = create_success_response(data=data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route("/measurement")
def get_last_measurement(req, resp):
value = hooks['get_measurement_hook']()
data = {"value": value}
encoded = create_success_response(data=data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route("/battery")
def get_last_measurement(req, resp):
assert req.method == 'GET'
try:
battery_v = machine.ADC(machine.Pin(config.cfg.battery_voltage_pin))
battery_v.atten(machine.ADC.ATTN_11DB)
ADC_11DB_TO_VOLT = 0.000805664
voltage = battery_v.read() * ADC_11DB_TO_VOLT
voltage_divider_ratio = config.cfg.voltage_divider_r2_k / \
(config.cfg.voltage_divider_r1_k + config.cfg.voltage_divider_r2_k)
voltage = voltage / voltage_divider_ratio
except:
logging.info("Error reading battery voltage!")
voltage = 'ERROR'
data = {"voltage": voltage}
encoded = create_success_response(data=data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route("/")
def index(req, resp):
print("route /")
headers = {"Location": "/web_pages/index.html"}
yield from picoweb.start_response(resp, status="303", headers=headers)
@app.route("/config")
def set_config(req, resp):
assert req.method == 'POST'
data = yield from parse_post_body(req)
print(data)
if 'wifi' in data.keys():
print(data['wifi'])
hooks[CONFIGURE_DEVICE_HOOK](data['wifi'])
if 'aws' in data.keys():
hooks[CONFIGURE_AWS_HOOK](data['aws'])
if 'sensor' in data.keys():
hooks[CONFIGURE_SENSOR_HOOK](data['sensor'])
config.cfg.save()
response_data = {'result': 'ok'}
encoded = create_success_response(data=response_data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route(re.compile("/web_pages/(.+)"))
def get_static_file(req, resp):
print("Get static call")
file_path = '/web_server/web_pages/' + req.url_match.group(1)
logging.info('About to send file: ' + file_path)
yield from app.sendfile(resp, file_path)
@app.route(re.compile("/start_test_data_acquisition"))
def start_test_data_acquisition(req, resp):
hooks['start_test_data_acquisition']()
response_data = {'result': 'ok'}
encoded = create_success_response(data=response_data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
@app.route(re.compile("/start_data_acquisition"))
def start_data_acquisition(req, resp):
hooks['start_data_acquisition']()
response_data = {'result': 'ok'}
encoded = create_success_response(data=response_data)
yield from picoweb.start_response(resp, content_type="application/json")
yield from resp.awrite(encoded)
# setup and run
def setup(get_measurement_hook=None,
configure_device_hook=None,
configure_aws_hook=None,
configure_sensor_hook=None,
get_status_hook=None,
start_test_data_acquisition=None,
start_data_acquisition=None):
global hooks
hooks['get_measurement_hook'] = get_measurement_hook
hooks[CONFIGURE_DEVICE_HOOK] = configure_device_hook
hooks[CONFIGURE_AWS_HOOK] = configure_aws_hook
hooks[CONFIGURE_SENSOR_HOOK] = configure_sensor_hook
hooks[GET_STATUS_HOOK] = get_status_hook
hooks['start_test_data_acquisition'] = start_test_data_acquisition
hooks['start_data_acquisition'] = start_data_acquisition
def run():
global app
global hooks
if not hooks:
raise Exception('Please setup server with hooks first!')
logging.info('About to start server...')
app.run(debug=1, port=80, host='0.0.0.0')
def stop_server():
app.stop_server()
|
wizzdev-pl/iot-starter
|
MicroPython/src/web_server/web_app.py
|
web_app.py
|
py
| 5,324 |
python
|
en
|
code
| 7 |
github-code
|
6
|
43224823277
|
from javax import swing
from java.awt import BorderLayout, Dimension
from java.awt.event import KeyEvent
from javax.swing import JFrame, JScrollPane, JPanel, JTable, JList, ListSelectionModel
from javax.swing.event import ListSelectionListener
from javax.swing.table import DefaultTableModel
class TableApp:
def make_ui(self):
frame = JFrame("Table demo")
frame.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE)
frame.setLayout(BorderLayout())
panel = JPanel()
scrollPane = JScrollPane()
self.table = self.createTable()
jlist = JList(range(1, 4))
panel.add(jlist)
panel.add(self.table)
scrollPane.setColumnHeaderView(self.table.getTableHeader())
scrollPane.getViewport().setView(panel)
class MyListSelectionListener(ListSelectionListener):
def valueChanged(listenerSelf, event):
if event.getValueIsAdjusting():
return
self.table.addRowSelectionInterval(event.getFirstIndex(), event.getFirstIndex())
for column in range(self.table.getColumnCount()):
self.table.changeSelection(event.getFirstIndex(), column, False, True)
jlist.addListSelectionListener(MyListSelectionListener())
scrollPane.setRowHeaderView(jlist)
panel.setOpaque(True)
frame.getContentPane().add(scrollPane, BorderLayout.CENTER)
frame.setPreferredSize(Dimension(300,100))
frame.pack()
frame.setVisible(True)
def createTable(self):
data = [ ['Tom'] * 20, ['Dick'] * 20, ['Harry'] * 20 ]
columns = tuple([ "Name" + str(i) for i in range(1, 21) ])
model = DefaultTableModel(data, columns)
table = JTable(model)
table.setSelectionMode(ListSelectionModel.MULTIPLE_INTERVAL_SELECTION)
table.setCellSelectionEnabled(True)
return table
def createList(self):
return JList(range(1, 4))
@staticmethod
def main():
app = TableApp()
app.make_ui()
TableApp.main()
|
texttest/storytext-selftest
|
swing/tables/row_header_list_select_all_cells/target_ui.py
|
target_ui.py
|
py
| 2,132 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7268607481
|
def serialize_categories(categories) -> list:
output = []
for category in categories:
categories_data = {
"id": category.id,
"name": category.name,
"description": category.description,
"tasks": []
}
for task in category.tasks:
task_data = {
"id": task.id,
"name": task.name,
"description": task.description,
"duration": task.duration,
"classification": task.type
}
categories_data["tasks"].append(task_data)
output.append(categories_data)
return output
|
Kenzie-Academy-Brasil-Developers/q3-sprint5-matriz-eisenhower-RobsonMT
|
app/services/serialize_categories_service.py
|
serialize_categories_service.py
|
py
| 691 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11702399982
|
import md5
import random
#from settings import SOLRSOLR_HOST, SOLR_PORT
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'j*zdirg7yy9@q1k=c*q!*kovfsd#$FDFfsdfkae#id04pyta=yz@w34m6rvwfe'
def generate_hash():
hash = md5.new()
hash.update("".join(map(lambda i: chr(random.randint(0, 255)), range(16))))
hash.update(SECRET_KEY)
key = hash.hexdigest()
return key
import urllib
import urllib2
url = 'http://127.0.0.1:8000/api/insert'
institutionnames = ["Hospital", "Medical Department", "Research Center", "Pharmacy", "Hopital", "Ziekenhuis" ]
databasenames =["heartattack", "cardiomyopathy", "Coronary heart disease", "Valvular heart disease", "Peripheral arterial disease"]
location = ["Paris", "Roterdao",
"Porto", "Aveiro", "Lisboa", "Faro", "Portimao", "Brussels", "London",
"Barcelona", "Heildeberg", "Stuttgard", "Lens"]
data = {
'contact_technical_t':'IEETA',
'created_t':'2013-04-17 12:09:32.334053',
'location_t':'Aveiro',
'institution_name_t':'IEETA',
'contact_scientific_t':'[email protected]',
'contact_administrative_t':'[email protected]',
'type_t':'researchcohorts',
'id':'a10815736f733d04d8e0aa65fe37',
'user_t' :'bastiao',
'text_t': 'ieeta ieeta bastiao emif cardiomyopathy Coronary heart attack',
'total_number_subjects_t': '20000',
'ethical_committee_t': '[No]',
'publically_doc_procedure_t': '[No]',
'ethical_committee_t': '[No]',
'number_active_patients_jan2012_t': '200',
'average_time_follow_up_t': '130',
'assess_prevalence_prevalence_t': 'Brufen Beneron',
'literature_papers_t': "Luis A. Bastiao Silva, Carlos Costa, Jose Luis Olveira. A Secure PACS Cloud Archive in CARS 2011, Berlin, Germany ",
'population_description_t':'Fat, Headcache'}
import requests
requests.post("http://127.0.0.1:8000/api/insert", data)
#import pysolr
#import random
#solr = pysolr.Solr('http://' + SOLRSOLR_HOST + ':' + SOLR_PORT+ '/solr', timeout=10)
#for i in range(10):
# index_db = random.randint(1, len(databasenames))
# index_institutionnames = random.randint(1, len(institutionnames))
# index_locations = random.randint(1, len(location))
# data['database_name_t'] = institutionnames[index_institutionnames-1] + " " + location[index_locations-1] + " " +databasenames[index_db-1]
# data['location_t'] = location[index_locations]
# data['id'] = generate_hash()
# solr.add([data])
# solr.optimize()
#curl -v -H "Accept: application/json" -H "Content-type: application/json" -X POST -d ' {"user":{"first_name":"firstname","last_name":"lastname","email":"[email protected]","password":"app123","password_confirmation":"app123"}}' http://127.0.0.1:8000/api/insert
|
bioinformatics-ua/montra
|
emif/insert_script.py
|
insert_script.py
|
py
| 2,771 |
python
|
en
|
code
| 7 |
github-code
|
6
|
21981363351
|
# -*- coding: utf-8 -*-
import unittest
import os
import time
os.environ["TESTING_LDT"] = "TRUE"
import ldt
from ldt.helpers.ignore import ignore_warnings
class Tests(unittest.TestCase):
'''
The tests in this block inspect the MetaDictionary functionality:
combining WordNet and Wiktionary data.
'''
@classmethod
@ignore_warnings
def setUpClass(cls):
"""Setting up the test variables."""
cls.test_dict_fr = ldt.dicts.morphology.meta.MorphMetaDict(
language="French", cache=False)
cls.test_dict_en = ldt.dicts.morphology.meta.MorphMetaDict(
language="English", cache=False, custom_base="wiktionary")
@classmethod
def tearDownClass(cls):
"""Clearning up the test variables."""
cls.test_dict_fr = None
cls.test_base_en = None
@ignore_warnings
def test_metadictionary_initialization(self):
self.assertEqual(self.test_dict_fr.wiktionary.language, "fr")
@ignore_warnings
def test_metadictionary_initialization_wn(self):
with self.assertRaises(AttributeError):
self.test_dict_fr.wn.language
@ignore_warnings
def test_metadictionary_wn(self):
self.assertEqual(self.test_dict_en.wordnet._language, "en")
@ignore_warnings
def test_metadictionary_order(self):
self.assertEqual(self.test_dict_en._order[0], "wordnet")
@ignore_warnings
def test_metadictionary_minimal(self):
self.assertEqual(self.test_dict_en.is_a_word("cat", minimal=True),
["wordnet"])
@ignore_warnings
def test_metadictionary_minimal(self):
self.assertEqual(self.test_dict_en.is_a_word("cat", minimal=True),
["wordnet"])
@ignore_warnings
def test_metadictionary_get_pos(self):
test_dict = ldt.dicts.morphology.meta.MorphMetaDict(order=[
"wordnet"], custom_base="wordnet")
res = test_dict.get_pos("nouned")
self.assertEqual(res, ["verb"])
@ignore_warnings
def test_metadictionary_lemmatize(self):
test_dict = ldt.dicts.morphology.meta.MorphMetaDict(order=[
"wordnet"], custom_base="wordnet")
res = test_dict.lemmatize("nouned")
self.assertEqual(res, ["noun"])
@ignore_warnings
def test_metadictionary_lemmatize(self):
test_dict = ldt.dicts.morphology.meta.MorphMetaDict(order=["wordnet",
"wiktionary"], custom_base="wiktionary")
res = test_dict.lemmatize("GPUs")
self.assertEqual(res, ["GPU"])
#
# @ignore_warnings
# def test_metadictionary_lemmatize(self):
# self.assertEqual(test_dict_en.is_a_word("cat", minimal=True), ["wordnet"])
# @ignore_warnings
# def test_metadictionary_max(self):
# res = test_dict_en.is_a_word("cat", minimal=False)
# self.assertTrue(len(res) > 1)
#
# @ignore_warnings
# def test_metadictionary_is_a_word(self):
# time.sleep(0.5)
# self.assertTrue(test_dict_fr.is_a_word("chatte"))
#
# @ignore_warnings
# def test_metadictionary_relations(self):
# time.sleep(0.5)
# res = test_dict_en.get_relations("white", relations="main")
# worked = "unclean" in res["antonyms"] and "nonwhite" in res["antonyms"]
# self.assertTrue(worked)
#
# @ignore_warnings
# def test_metadictionary_relation(self):
# time.sleep(0.5)
# res = test_dict_en.get_relation("white", relation="antonyms")
# worked = "unclean" in res and "nonwhite" in res
# self.assertTrue(worked)
if __name__ == '__main__':
unittest.main()
|
annargrs/ldt
|
ldt/tests/dicts/morphology/test_meta.py
|
test_meta.py
|
py
| 3,709 |
python
|
en
|
code
| 16 |
github-code
|
6
|
72568366267
|
#!/usr/bin/env python
import seaborn
import numpy as np
import os
from collections import OrderedDict
import pandas as pd
import matplotlib.pyplot as plt
import sys
from termcolor import cprint
# Load data
# Global vars for tracking and labeling data at load time.
exp_idx = 0
label_parser_dict = None
smooth_factor = 10
leg_size = 30
subsample_step = 1
load_subsample_step = 50
default_colors = ["blue","orange","green","magenta", "brown", "red",'black',"grey",u'#ff7f0e',
"cyan", "pink",'purple', u'#1f77b4',
"darkorchid","sienna","lightpink", "indigo","mediumseagreen",'aqua',
'deeppink','silver','khaki','goldenrod','y','y','y','y','y','y','y','y','y','y','y','y' ] + ['y']*50
def get_all_runs(logdir, load_subsample_step=1):
"""
Recursively look through logdir for output files produced by
Assumes that any file "progress.txt" is a valid hit.
"""
global exp_idx
global units
datasets = []
for root, _, files in os.walk(logdir):
if 'log.csv' in files:
run_name = root[8:]
exp_name = None
# try to load a config file containing hyperparameters
config = None
try:
config_path = open(os.path.join(root,'config.json'))
config = json.load(config_path)
if 'exp_name' in config:
exp_name = config['exp_name']
except:
print('No file named config.json')
exp_idx += 1
# load progress data
try:
print(os.path.join(root,'log.csv'))
exp_data = pd.read_csv(os.path.join(root,'log.csv'))
except:
raise ValueError("CSV {} faulty".format(os.path.join(root, 'log.csv')))
exp_data = exp_data[::load_subsample_step]
data_dict = exp_data.to_dict("list")
data_dict['config'] = config
nb_epochs = len(data_dict['frames'])
print('{} -> {}'.format(run_name, nb_epochs))
datasets.append(data_dict)
return datasets
def get_datasets(rootdir, load_only="", load_subsample_step=1, ignore_pattern="ignore"):
_, models_list, _ = next(os.walk(rootdir))
print(models_list)
for dir_name in models_list.copy():
# add "ignore" in a directory name to avoid loading its content
if ignore_pattern in dir_name or load_only not in dir_name:
models_list.remove(dir_name)
for expe_name in list(labels.keys()):
if expe_name not in models_list:
del labels[expe_name]
# setting per-model type colors
for i,m_name in enumerate(models_list):
for m_type, m_color in per_model_colors.items():
if m_type in m_name:
colors[m_name] = m_color
print("extracting data for {}...".format(m_name))
m_id = m_name
models_saves[m_id] = OrderedDict()
models_saves[m_id]['data'] = get_all_runs(rootdir+m_name, load_subsample_step=load_subsample_step)
print("done")
if m_name not in labels:
labels[m_name] = m_name
"""
retrieve all experiences located in "data to vizu" folder
"""
labels = OrderedDict()
per_model_colors = OrderedDict()
# per_model_colors = OrderedDict([('ALP-GMM',u'#1f77b4'),
# ('hmn','pink'),
# ('ADR','black')])
# LOAD DATA
models_saves = OrderedDict()
colors = OrderedDict()
static_lines = {}
# get_datasets("storage/",load_only="RERUN_WizardGuide")
# get_datasets("storage/",load_only="RERUN_WizardTwoGuides")
try:
figure_id = eval(sys.argv[1])
except:
figure_id = sys.argv[1]
print("fig:", figure_id)
if figure_id == 0:
# train change
env_type = "No_NPC_environment"
fig_type = "train"
get_datasets("storage/", "RERUN_WizardGuide_lang64_mm", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_deaf_no_explo", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_no_explo", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_curr_dial", load_subsample_step=load_subsample_step)
top_n = 16
elif figure_id == 1:
# arch change
env_type = "No_NPC_environment"
fig_type = "arch"
get_datasets("storage/", "RERUN_WizardGuide_lang64_mm", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_bow", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_no_mem", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_bigru", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardGuide_lang64_attgru", load_subsample_step=load_subsample_step)
top_n = 16
elif figure_id == 2:
# train change FULL
env_type = "FULL_environment"
fig_type = "train"
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_mm", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_deaf_no_explo", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_no_explo", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_curr_dial", load_subsample_step=load_subsample_step)
top_n = 16
elif figure_id == 3:
# arch change FULL
env_type = "FULL_environment"
fig_type = "arch"
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_mm", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_bow", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_no_mem", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_bigru", load_subsample_step=load_subsample_step)
get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_attgru", load_subsample_step=load_subsample_step)
top_n = 16
elif str(figure_id) == "ShowMe":
get_datasets("storage/", "20-05_NeurIPS_ShowMe_ABL_CEB", load_subsample_step=load_subsample_step, ignore_pattern="tanh_0.3")
get_datasets("storage/", "20-05_NeurIPS_ShowMe_NO_BONUS_ABL", load_subsample_step=load_subsample_step)
get_datasets("storage/", "20-05_NeurIPS_ShowMe_CEB", load_subsample_step=load_subsample_step, ignore_pattern="tanh_0.3")
get_datasets("storage/", "20-05_NeurIPS_ShowMe_NO_BONUS_env", load_subsample_step=load_subsample_step)
label_parser_dict = {
"20-05_NeurIPS_ShowMe_ABL_CEB" : "ShowMe_exp_bonus_no_social_skills_required",
"20-05_NeurIPS_ShowMe_NO_BONUS_ABL" : "ShowMe_no_bonus_no_social_skills_required",
"20-05_NeurIPS_ShowMe_CEB" : "ShowMe_exp_bonus",
"20-05_NeurIPS_ShowMe_NO_BONUS_env" : "ShowMe_no_bonus",
}
env_type = str(figure_id)
fig_type = "test"
top_n = 16
elif str(figure_id) == "Help":
# env_type = "Bobo"
# get_datasets("storage/", "Bobo")
get_datasets("storage/", "24-05_NeurIPS_Help", load_subsample_step=load_subsample_step, ignore_pattern="ABL")
# get_datasets("storage/", "26-05_NeurIPS_gpu_Help_NoSocial_NO_BONUS_ABL", load_subsample_step=load_subsample_step)
get_datasets("storage/", "26-05_NeurIPS_gpu_Help_NoSocial_NO_BONUS_env", load_subsample_step=load_subsample_step)
label_parser_dict = {
"Help_NO_BONUS_env": "PPO",
"Help_BONUS_env": "PPO+Explo",
# "Help_NO_BONUS_ABL_env": "ExiterRole_no_bonus_no_NPC",
# "Help_BONUS_ABL_env": "ExiterRole_bonus_no_NPC",
"26-05_NeurIPS_gpu_Help_NoSocial_NO_BONUS_env": "Unsocial PPO",
# "26-05_NeurIPS_gpu_Help_NoSocial_NO_BONUS_ABL": "ExiterRole_Insocial_ABL"
}
static_lines = {
"PPO (helper)": (0.12, 0.05, "#1f77b4"),
"PPO+Explo (helper)": (0.11, 0.04, "indianred"),
# "Help_exp_bonus": (0.11525, 0.04916 , default_colors[2]),
# "HelperRole_ABL_no_exp_bonus": (0.022375, 0.01848, default_colors[3]),
"Unsocial PPO (helper)": (0.15, 0.06, "grey"),
# "HelperRole_ABL_Insocial": (0.01775, 0.010544, default_colors[4]),
}
env_type = str(figure_id)
fig_type = "test"
top_n = 16
elif str(figure_id) == "TalkItOut":
print("You mean Polite")
exit()
elif str(figure_id) == "TalkItOutPolite":
# env_type = "TalkItOut"
# get_datasets("storage/", "ORIENT_env_MiniGrid-TalkItOut")
# env_type = "GuideThief"
# get_datasets("storage/", "GuideThief")
# env_type = "Bobo"
# get_datasets("storage/", "Bobo")
get_datasets("storage/", "20-05_NeurIPS_TalkItOutPolite", load_subsample_step=load_subsample_step)
# get_datasets("storage/", "21-05_NeurIPS_small_bonus_TalkItOutPolite")
get_datasets("storage/", "26-05_NeurIPS_gpu_TalkItOutPolite_NoSocial_NO_BONUS_env", load_subsample_step=load_subsample_step)
get_datasets("storage/", "26-05_NeurIPS_gpu_TalkItOutPolite_NoSocial_NO_BONUS_NoLiar", load_subsample_step=load_subsample_step)
label_parser_dict = {
"TalkItOutPolite_NO_BONUS_env": "PPO",
"TalkItOutPolite_e": "PPO+Explo",
"TalkItOutPolite_NO_BONUS_NoLiar": "PPO (no liar)",
"TalkItOutPolite_NoLiar_e": "PPO+Explo (no liar)",
"26-05_NeurIPS_gpu_TalkItOutPolite_NoSocial_NO_BONUS_env": "Unsocial PPO",
"26-05_NeurIPS_gpu_TalkItOutPolite_NoSocial_NO_BONUS_NoLiar": "Unsocial PPO (no liar)",
}
env_type = str(figure_id)
fig_type = "test"
top_n = 16
elif str(figure_id) == "DiverseExit":
get_datasets("storage/", "24-05_NeurIPS_DiverseExit", load_subsample_step=load_subsample_step)
get_datasets("storage/", "26-05_NeurIPS_gpu_DiverseExit", load_subsample_step=load_subsample_step)
label_parser_dict = {
"DiverseExit_NO_BONUS": "No_bonus",
"DiverseExit_BONUS": "BOnus",
"gpu_DiverseExit_NoSocial": "No_social",
}
env_type = str(figure_id)
fig_type = "test"
top_n = 16
else:
get_datasets("storage/", str(figure_id), load_subsample_step=load_subsample_step)
env_type = str(figure_id)
fig_type = "test"
top_n = 8
#### get_datasets("storage/", "RERUN_WizardGuide_lang64_nameless")
#### get_datasets("storage/", "RERUN_WizardTwoGuides_lang64_nameless")
if per_model_colors: # order runs for legend order as in per_models_colors, with corresponding colors
ordered_labels = OrderedDict()
for teacher_type in per_model_colors.keys():
for k,v in labels.items():
if teacher_type in k:
ordered_labels[k] = v
labels = ordered_labels
else:
print('not using per_model_color')
for k in models_saves.keys():
labels[k] = k
def plot_with_shade(subplot_nb, ax,x,y,err,color,shade_color,label,
y_min=None,y_max=None, legend=False, leg_size=30, leg_loc='best', title=None,
ylim=[0,100], xlim=[0,40], leg_args={}, leg_linewidth=13.0, linewidth=10.0, ticksize=20,
zorder=None, xlabel='perf',ylabel='env steps'):
#plt.rcParams.update({'font.size': 15})
ax.locator_params(axis='x', nbins=4)
ax.locator_params(axis='y', nbins=3)
ax.tick_params(axis='both', which='major', labelsize=ticksize)
ax.plot(x,y, color=color, label=label,linewidth=linewidth,zorder=zorder)
ax.fill_between(x,y-err,y+err,color=shade_color,alpha=0.2)
if legend:
leg = ax.legend(loc=leg_loc, **leg_args) #34
for legobj in leg.legendHandles:
legobj.set_linewidth(leg_linewidth)
ax.set_xlabel(xlabel, fontsize=30)
if subplot_nb == 0:
ax.set_ylabel(ylabel, fontsize=30,labelpad=-4)
ax.set_xlim(xmin=xlim[0],xmax=xlim[1])
ax.set_ylim(bottom=ylim[0],top=ylim[1])
if title:
ax.set_title(title, fontsize=22)
# Plot utils
def plot_with_shade_grg(subplot_nb, ax,x,y,err,color,shade_color,label,
y_min=None,y_max=None, legend=False, leg_size=30, leg_loc='best', title=None,
ylim=[0,100], xlim=[0,40], leg_args={}, leg_linewidth=13.0, linewidth=10.0, ticksize=20,
zorder=None, xlabel='perf',ylabel='env steps', linestyle="-"):
#plt.rcParams.update({'font.size': 15})
ax.locator_params(axis='x', nbins=4)
ax.locator_params(axis='y', nbins=3)
ax.tick_params(axis='both', which='major', labelsize=ticksize)
ax.plot(x, y, color=color, label=label,linewidth=linewidth,zorder=zorder, linestyle=linestyle)
ax.fill_between(x, y-err, y+err,color=shade_color,alpha=0.2)
if legend:
leg = ax.legend(loc=leg_loc, **leg_args) #34
for legobj in leg.legendHandles:
legobj.set_linewidth(leg_linewidth)
ax.set_xlabel(xlabel, fontsize=30)
if subplot_nb == 0:
ax.set_ylabel(ylabel, fontsize=30, labelpad=-4)
ax.set_xlim(xmin=xlim[0],xmax=xlim[1])
ax.set_ylim(bottom=ylim[0],top=ylim[1])
if title:
ax.set_title(title, fontsize=22)
# Metric plot
metric = 'bin_extrinsic_return_mean'
# metric = 'mission_string_observed_mean'
# metric = 'extrinsic_return_mean'
# metric = 'extrinsic_return_max'
# metric = "rreturn_mean"
# metric = 'rreturn_max'
# metric = 'FPS'
f, ax = plt.subplots(1, 1, figsize=(10.0, 6.0))
ax = [ax]
max_y = -np.inf
min_y = np.inf
# hardcoded
min_y, max_y = 0.0, 1.0
max_steps = 0
exclude_patterns = []
include_patterns = []
def label_parser(label, figure_id, label_parser_dict=None):
if label_parser_dict:
if sum([1 for k, v in label_parser_dict.items() if k in label]) != 1:
if label in label_parser_dict:
# see if there is an exact match
return label_parser_dict[label]
else:
print("ERROR multiple curves match a lable and there is no exact match")
print(label)
exit()
for k, v in label_parser_dict.items():
if k in label: return v
else:
# return label.split("_env_")[1]
if figure_id not in [1,2,3,4]:
return label
else:
label_parser_dict = {
"RERUN_WizardGuide_lang64_no_explo": "MH-BabyAI",
"RERUN_WizardTwoGuides_lang64_no_explo": "MH-BabyAI",
"RERUN_WizardGuide_lang64_mm_baby_short_rec_env": "MH-BabyAI-ExpBonus",
"RERUN_WizardTwoGuides_lang64_mm_baby_short_rec_env": "MH-BabyAI-ExpBonus",
"RERUN_WizardGuide_lang64_deaf_no_explo": "Deaf-MH-BabyAI",
"RERUN_WizardTwoGuides_lang64_deaf_no_explo": "Deaf-MH-BabyAI",
"RERUN_WizardGuide_lang64_bow": "MH-BabyAI-ExpBonus-BOW",
"RERUN_WizardTwoGuides_lang64_bow": "MH-BabyAI-ExpBonus-BOW",
"RERUN_WizardGuide_lang64_no_mem": "MH-BabyAI-ExpBonus-no-mem",
"RERUN_WizardTwoGuides_lang64_no_mem": "MH-BabyAI-ExpBonus-no-mem",
"RERUN_WizardGuide_lang64_bigru": "MH-BabyAI-ExpBonus-bigru",
"RERUN_WizardTwoGuides_lang64_bigru": "MH-BabyAI-ExpBonus-bigru",
"RERUN_WizardGuide_lang64_attgru": "MH-BabyAI-ExpBonus-attgru",
"RERUN_WizardTwoGuides_lang64_attgru": "MH-BabyAI-ExpBonus-attgru",
"RERUN_WizardGuide_lang64_curr_dial": "MH-BabyAI-ExpBonus-current-dialogue",
"RERUN_WizardTwoGuides_lang64_curr_dial": "MH-BabyAI-ExpBonus-current-dialogue",
"RERUN_WizardTwoGuides_lang64_mm_baby_short_rec_100M": "MH-BabyAI-ExpBonus-100M"
}
if sum([1 for k, v in label_parser_dict.items() if k in label]) != 1:
print("ERROR multiple curves match a lable")
print(label)
exit()
for k, v in label_parser_dict.items():
if k in label: return v
return label
per_seed=False
for i, m_id in enumerate(models_saves.keys()):
#excluding some experiments
if any([ex_pat in m_id for ex_pat in exclude_patterns]):
continue
if len(include_patterns) > 0:
if not any([in_pat in m_id for in_pat in include_patterns]):
continue
runs_data = models_saves[m_id]['data']
ys = []
# DIRTY FIX FOR FAULTY LOGGING
print("m_id:", m_id)
if runs_data[0]['frames'][1] == 'frames':
runs_data[0]['frames'] = list(filter(('frames').__ne__, runs_data[0]['frames']))
###########################################
# determine minimal run length across seeds
minimum = sorted([len(run['frames']) for run in runs_data if len(run['frames'])])[-top_n]
min_len = np.min([len(run['frames']) for run in runs_data if len(run['frames']) >= minimum])
# min_len = np.min([len(run['frames']) for run in runs_data if len(run['frames']) > 10])
print("min_len:", min_len)
#compute env steps (x axis)
longest_id = np.argmax([len(rd['frames']) for rd in runs_data])
steps = np.array(runs_data[longest_id]['frames'], dtype=np.int) / 1000000
steps = steps[:min_len]
for run in runs_data:
data = run[metric]
# DIRTY FIX FOR FAULTY LOGGING (headers in data)
if data[1] == metric:
data = np.array(list(filter((metric).__ne__, data)), dtype=np.float16)
###########################################
if len(data) >= min_len:
if len(data) > min_len:
print("run has too many {} datapoints ({}). Discarding {}".format(m_id, len(data),
len(data)-min_len))
data = data[0:min_len]
ys.append(data)
ys_same_len = ys # RUNS MUST HAVE SAME LEN
# computes stats
n_seeds = len(ys_same_len)
sems = np.std(ys_same_len,axis=0)/np.sqrt(len(ys_same_len)) # sem
stds = np.std(ys_same_len,axis=0) # std
means = np.mean(ys_same_len,axis=0)
color = default_colors[i]
# per-metric adjusments
ylabel=metric
if metric == 'bin_extrinsic_return_mean':
ylabel = "success rate"
if metric == 'duration':
ylabel = "time (hours)"
means = means / 3600
sems = sems / 3600
stds = stds / 3600
#plot x y bounds
curr_max_y = np.max(means)
curr_min_y = np.min(means)
curr_max_steps = np.max(steps)
if curr_max_y > max_y:
max_y = curr_max_y
if curr_min_y < min_y:
min_y = curr_min_y
if curr_max_steps > max_steps:
max_steps = curr_max_steps
if subsample_step:
steps = steps[0::subsample_step]
means = means[0::subsample_step]
stds = stds[0::subsample_step]
sems = sems[0::subsample_step]
ys_same_len = [y[0::subsample_step] for y in ys_same_len]
# display seeds separtely
if per_seed:
for s_i, seed_ys in enumerate(ys_same_len):
seed_c = default_colors[i+s_i]
label = m_id#+"(s:{})".format(s_i)
plot_with_shade(0, ax[0], steps, seed_ys, stds*0, seed_c, seed_c, label,
legend=False, xlim=[0, max_steps], ylim=[min_y, max_y],
leg_size=leg_size, xlabel="env steps (millions)", ylabel=ylabel, smooth_factor=smooth_factor,
)
else:
label = label_parser(m_id, figure_id, label_parser_dict=label_parser_dict)
label = label #+"({})".format(n_seeds)
def smooth(x_, n=50):
if type(x_) == list:
x_ = np.array(x_)
return np.array([x_[max(i - n, 0):i + 1].mean() for i in range(len(x_))])
if smooth_factor:
means = smooth(means,smooth_factor)
stds = smooth(stds,smooth_factor)
x_lim = 30
if figure_id == "TalkItOutPolite":
leg_args = {
'ncol': 1,
'columnspacing': 1.0,
'handlelength': 1.0,
'frameon': False,
# 'bbox_to_anchor': (0.00, 0.23, 0.10, .102),
'bbox_to_anchor': (0.55, 0.35, 0.10, .102),
'labelspacing': 0.2,
'fontsize': 27
}
elif figure_id == "Help":
leg_args = {
'ncol': 1,
'columnspacing': 1.0,
'handlelength': 1.0,
'frameon': False,
# 'bbox_to_anchor': (0.00, 0.23, 0.10, .102),
'bbox_to_anchor': (0.39, 0.20, 0.10, .102),
'labelspacing': 0.2,
'fontsize': 27
}
else:
leg_args = {}
color_code = dict([
('PPO+Explo', 'indianred'),
('PPO', "#1f77b4"),
('Unsocial PPO', "grey"),
('PPO (no liar)', "#043252"),
('PPO+Explo (no liar)', "darkred"),
('Unsocial PPO (no liar)', "black"),
('PPO+Explo (helper)', 'indianred'),
('PPO (helper)', "#1f77b4"),
('Unsocial PPO (helper)', "grey")]
)
color = color_code.get(label, np.random.choice(default_colors))
print("C:",color)
plot_with_shade_grg(
0, ax[0], steps, means, stds, color, color, label,
legend=True,
xlim=[0, steps[-1] if not x_lim else x_lim],
ylim=[0, 1.0], xlabel="env steps (millions)", ylabel=ylabel, title=None,
leg_args =leg_args)
#
# plot_with_shade(0, ax[0], steps, means, stds, color, color,label,
# legend=True, xlim=[0, max_steps], ylim=[min_y, max_y],
# leg_size=leg_size, xlabel="Env steps (millions)", ylabel=ylabel, linewidth=5.0, smooth_factor=smooth_factor)
for label, (mean, std, color) in static_lines.items():
plot_with_shade_grg(
0, ax[0], steps, np.array([mean]*len(steps)), np.array([std]*len(steps)), color, color, label,
legend=True,
xlim=[0, max_steps],
ylim=[0, 1.0],
xlabel="env steps (millions)", ylabel=ylabel, linestyle=":",
leg_args=leg_args)
plt.tight_layout()
f.savefig('graphics/{}_results.svg'.format(str(figure_id)))
f.savefig('graphics/{}_results.png'.format(str(figure_id)))
plt.show()
|
flowersteam/social-ai
|
data_analysis_neurips.py
|
data_analysis_neurips.py
|
py
| 22,418 |
python
|
en
|
code
| 5 |
github-code
|
6
|
33038113455
|
"""Config flow for Withings."""
import logging
import voluptuous as vol
from withings_api.common import AuthScope
from homeassistant import config_entries
from homeassistant.components.withings import const
from homeassistant.helpers import config_entry_oauth2_flow
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register(const.DOMAIN)
class WithingsFlowHandler(config_entry_oauth2_flow.AbstractOAuth2FlowHandler):
"""Handle a config flow."""
DOMAIN = const.DOMAIN
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
_current_data = None
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {
"scope": ",".join(
[
AuthScope.USER_INFO.value,
AuthScope.USER_METRICS.value,
AuthScope.USER_ACTIVITY.value,
]
)
}
async def async_oauth_create_entry(self, data: dict) -> dict:
"""Override the create entry so user can select a profile."""
self._current_data = data
return await self.async_step_profile(data)
async def async_step_profile(self, data: dict) -> dict:
"""Prompt the user to select a user profile."""
profile = data.get(const.PROFILE)
if profile:
new_data = {**self._current_data, **{const.PROFILE: profile}}
self._current_data = None
return await self.async_step_finish(new_data)
profiles = self.hass.data[const.DOMAIN][const.CONFIG][const.PROFILES]
return self.async_show_form(
step_id="profile",
data_schema=vol.Schema({vol.Required(const.PROFILE): vol.In(profiles)}),
)
async def async_step_finish(self, data: dict) -> dict:
"""Finish the flow."""
self._current_data = None
return self.async_create_entry(title=data[const.PROFILE], data=data)
|
84KaliPleXon3/home-assistant-core
|
homeassistant/components/withings/config_flow.py
|
config_flow.py
|
py
| 2,112 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16106094065
|
from .color import cm_benj
def setup_args_scanpy(ap):
ap.add_argument("-f", "--figdir", default="figures")
ap.add_argument("--dpi", type=int, default=600)
ap.add_argument("--frameon", dest="frameon", action="store_true")
ap.add_argument("--no-frameon", dest="frameon", action="store_false")
ap.add_argument("--verbosity", type=int, default=2)
ap.set_defaults(frameon=True)
return ap
def setup_scanpy(**args):
import scanpy as sc
if args.get("figdir"):
sc.settings.figdir = args["figdir"]
cm = cm_benj()
if args.get("dpi"):
dpi = args["dpi"]
else:
dpi = 600
frameon = True
if args.get("frameon") and isinstance(args["frameon"], bool):
frameon = args["frameon"]
if args.get("verbosity") and isinstance(args["verbosity"], int):
sc.settings.verbosity = args["verbosity"]
else:
sc.settings.verbosity = 2
sc.set_figure_params(dpi_save=dpi, frameon=frameon)
return args
|
KellisLab/benj
|
benj/setup_scanpy.py
|
setup_scanpy.py
|
py
| 986 |
python
|
en
|
code
| 2 |
github-code
|
6
|
4330376900
|
def solution(arr1, arr2):
answer = []
for i in range(len(arr1)):
a1 = arr1.pop(0)
a2 = arr2.pop(0)
num = [x + y for x, y in zip(a1, a2)]
answer.append(num)
return answer
arr1 = [[1, 2], [2, 3]]
arr2 = [[3, 4], [5, 6]]
arr3 = [[1], [2]]
arr4 = [[3], [4]]
print(solution(arr1, arr2))
print(solution(arr3, arr4))
|
wold21/python_algorithm
|
프로그래머스/코딩테스트/Level1/행렬의 덧셈.py
|
행렬의 덧셈.py
|
py
| 359 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40139751051
|
from PIL import Image
img = Image.open("UnoDeck2.png")
i = 0
for y in range(6):
j = 2
k = 3
for x in range(12):
if i == 64:
break
left = 0
top = 0
height = 256
width = 166
box = (width+j)*x, (height+k)*y, width*(x+1)+(j*x), height*(y+1)+(k*y)
area = img.crop(box)
cardName = "Card" + str(i)+ ".png"
area.save((cardName),"PNG")
i += 1
|
CrazyScorcer/ImageCutter
|
imageCut.py
|
imageCut.py
|
py
| 472 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26485545082
|
######## Tensorflow Imaage Classifier #########
#
# Author: Erik Handeland Date: 12/12/2021
# Description: This program uses a TensorFlow Lite object detection model-metadata to
# perform object detection on an image. It creates a dict containing a
# list of detected objects and the count for each object. It also save a copy
# of the image with draws boxes and scores around the objects of interest for each image.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
# Add the following github repo by Evan Juras:
# https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi
#
# Import packages
import os
from os.path import exists
import cv2
import numpy as np
import importlib.util
from tflite_support import metadata
from from_root import from_root
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
else:
from tensorflow.lite.python.interpreter import Interpreter
# Extract metadata from the .tflite file
def load_metadata_labels(PATH_TO_MODEL):
label_list = []
try:
displayer = metadata.MetadataDisplayer.with_model_file(PATH_TO_MODEL)
file_name = displayer.get_packed_associated_file_list()[0]
except ValueError:
# The model-metadata does not have metadata.
return label_list
if file_name:
label_map_file = displayer.get_associated_file_buffer(file_name).decode()
label_list = list(filter(len, label_map_file.splitlines()))
return label_list
def load_labels(PATH_TO_GRAPH, PATH_TO_LABELS):
# Load label list from metadata or from labelmap file
label_list = load_metadata_labels(PATH_TO_GRAPH)
if not label_list: # DEPRECATED this is the old way of loading labels, new ML models should have it as metadata
if not exists(PATH_TO_LABELS):
print("No labelmap in metadata and no labelmap.txt found! at path: " + PATH_TO_LABELS)
return {
"error": "No labelmap found",
"vehicles": -1,
"pedestrians": -1,
"confidence-threshold": 0.50,
"objects": [],
}
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
label_list = [line.strip() for line in f.readlines()]
return label_list
# MODEL_NAME: should be the name of a directory in the models directory
# IMG_PATH: should be the path full path to your target image
# COORDS: Whether to return coordinates of detected objects
# MIN_CONF_LEVEL: is the minimum confidence level to be considered a detection 0-1
# PATH_TO_GRAPH & LABELMAP_NAME: Name of the .tflite file and the labelmap file. Defaults should work for most cases
# SAVED_IMG_PATH: Directory to save the image with boxes and scores. If not specified, no image will be saved
def objDetection(MODEL_NAME: str, IMG_PATH: str, MIN_CONF_LEVEL=0.50,
GRAPH_NAME="detect.tflite", LABELMAP_NAME="labelmap.txt", SAVED_IMG_PATH="", COORDS=False):
objects = []
# Get path to project root
CWD_PATH = str(from_root())
# Path to .tflite file, which contains the model-metadata that is used for object detection
try: # running from pip install - pip install has different path structure that source
PATH_TO_MODEL = os.path.join(CWD_PATH, "models", MODEL_NAME)
PATH_TO_GRAPH = os.path.join(PATH_TO_MODEL, GRAPH_NAME)
PATH_TO_LABELS = os.path.join(PATH_TO_MODEL, LABELMAP_NAME)
if not exists(PATH_TO_GRAPH):
raise FileNotFoundError
except FileNotFoundError: # running from source
PATH_TO_MODEL = os.path.join(CWD_PATH, "obj_detection", "models", MODEL_NAME)
PATH_TO_GRAPH = os.path.join(PATH_TO_MODEL, GRAPH_NAME)
PATH_TO_LABELS = os.path.join(PATH_TO_MODEL, LABELMAP_NAME)
if not exists(PATH_TO_GRAPH):
print("detect.tflite not found! at path: " + PATH_TO_GRAPH)
return {
"error": "Invalid model-metadata path",
"vehicles": -1,
"pedestrians": -1,
"confidence-threshold": MIN_CONF_LEVEL,
"objects": objects,
}
# Load label list from metadata or from labelmap file
labels = load_labels(PATH_TO_GRAPH, PATH_TO_LABELS)
# Load the Tensorflow Lite model-metadata.
interpreter = Interpreter(model_path=PATH_TO_GRAPH)
interpreter.allocate_tensors()
# Get model-metadata details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Load image and resize to expected shape [1xHxWx3]
image = cv2.imread(IMG_PATH)
if image is None:
print("Image not found, check path ", IMG_PATH)
return {
"error": "Image not found, check path",
"vehicles": -1,
"pedestrians": -1,
"confidence-threshold": MIN_CONF_LEVEL,
"objects": objects,
}
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imH, imW, _ = image.shape
image_resized = cv2.resize(image_rgb, (width, height))
input_data = np.expand_dims(image_resized, axis=0)
# Normalize pixel values if using a floating model-metadata (i.e. if model-metadata is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model-metadata with the image as input
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# Retrieve detection results
try:
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
except:
return {
"error": "Invalid model-metadata output details, probably using model-metadata for JS or Dart",
"vehicles": -1,
"pedestrians": -1,
"confidence-threshold": MIN_CONF_LEVEL,
"objects": objects,
}
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > MIN_CONF_LEVEL) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box Interpreter can
# return coordinates that are outside of image dimensions,
# need to force them to be within image using max() and min()
ymin = int(max(1, (boxes[i][0] * imH)))
xmin = int(max(1, (boxes[i][1] * imW)))
ymax = int(min(imH, (boxes[i][2] * imH)))
xmax = int(min(imW, (boxes[i][3] * imW)))
# Corners of the bounding box
tr = (xmax, ymax) # Top right
bl = (xmin, ymin) # Bottom left
br = (xmax, ymin)
tl = (xmin, ymax)
# Draw detection box on image
cv2.rectangle(image, bl, tr, (10, 255, 0), 2)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
object_score = int(scores[i] * 100)
label = '%s: %d%%' % (object_name, object_score) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(image, (xmin, label_ymin - labelSize[1] - 10),
(xmin + labelSize[0], label_ymin + baseLine - 10),
(255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(image, label, (xmin, label_ymin - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Add object to objects list
obj = {
"name": object_name,
"confidence": scores[i],
"coord": {"top-left": tl, "top-right": tr, "bottom-right": br, "bottom-left": bl} if COORDS else {},
}
objects.append(obj)
# count vehicles and pedestrians
cars = 0
people = 0
for obj in objects:
if obj["name"] == "car" or obj["name"] == "truck":
cars += 1
elif obj["name"] == "person":
people += 1
if SAVED_IMG_PATH:
_, tail = os.path.split(IMG_PATH)
SAVED_IMG_PATH = os.path.join(SAVED_IMG_PATH, tail[:-4] + "_box.jpg")
cv2.imwrite(SAVED_IMG_PATH, image)
return {
"error": "",
"vehicles": cars,
"pedestrians": people,
"confidence-threshold": MIN_CONF_LEVEL,
"objects": objects,
}
# Sample function for detecting if object is in a certain area, useful if some parking lots have handicapped or
# oversize parking spaces
# if inArea([tr, tl, br, bl], (100, 400), (800, 600)):
# print("Object detected in area")
def inArea(points, box_start, box_end):
for point in points:
if (box_start[0] < point[0] < box_end[0] and
box_start[1] < point[1] < box_end[1]):
return True
return False
|
KB4YG/ml
|
obj_detection/obj_detection.py
|
obj_detection.py
|
py
| 9,890 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32639606030
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
class AddRemoveElements(unittest.TestCase):
def setUp(self) -> None:
self.driver = webdriver.Chrome(executable_path="chromedriver")
self.driver.get('https://the-internet.herokuapp.com/')
self.driver.find_element(By.LINK_TEXT, 'Add/Remove Elements').click()
def tearDown(self) -> None:
self.driver.quit()
def test_add_remove(self):
elements_added = int(input('How many elements will you add?: '))
elements_removed = int(input('How many elements will you remove?: '))
total_elements = elements_added + elements_removed
add_button = self.driver.find_element(By.XPATH, '//*[@id="content"]/div/button')
for i in range(elements_added):
add_button.click()
for i in range(elements_removed):
try:
delete_button = self.driver.find_element(By.XPATH, '//*[@id="elements"]/button[3]')
delete_button.click()
except:
print("You're trying to delete more elements the existing")
break
if total_elements > 0:
print(f'There are {total_elements} elements on screen')
else:
print("There 0 are elements on screen")
if __name__ == '__main__':
unittest.main(verbosity=2)
|
yorlysoro/intro_selenium_course
|
test_add_remove.py
|
test_add_remove.py
|
py
| 1,436 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30478333670
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class MyQueue:
def __init__(self):
self.head = None
self.tail = None
def push(self, item):
item_node = Node(item)
if self.head is None or self.tail is None:
self.head = item_node
self.tail = item_node
else:
self.tail.next = item_node
self.tail = self.tail.next
# self.tail = item_node
def pop(self):
if self.head is None:
return -1
item = self.head.data
self.head = self.head.next
return item
|
prabhat-gp/GFG
|
Stacks and Queues/Queues/2_implement_queue_ll.py
|
2_implement_queue_ll.py
|
py
| 679 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15624457572
|
import numpy as np
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
test_size = 0.2
seed = 42
x_data = np.load('./data/soja_images_150_new.npy', allow_pickle=True)
y_data = np.load('./data/soja_labels_150_new.npy', allow_pickle=True)
x_data = x_data.astype(np.float32)
y_data_cat = to_categorical(y_data)
x_train, x_test, y_train, y_test = train_test_split(
x_data, y_data_cat, test_size=test_size, random_state=seed)
with open('./data/x_train.npy', 'wb') as f:
np.save(f, x_train)
with open('./data/x_test.npy', 'wb') as f:
np.save(f, x_test)
with open('./data/y_train.npy', 'wb') as f:
np.save(f, y_train)
with open('./data/y_test.npy', 'wb') as f:
np.save(f, y_test)
|
nagahamaVH/soybean-image-classif
|
app/src/prepare_data.py
|
prepare_data.py
|
py
| 741 |
python
|
en
|
code
| 1 |
github-code
|
6
|
14894176437
|
import asyncio
import os
import os.path
from flask import Flask, request, send_from_directory, flash, request
from werkzeug.utils import secure_filename
import getpass
import platform
from flask_cors import CORS
from scripts.datascript import Datascript
from scripts.CalcoloScostamentiSenzaIntermedi import ScostamentiSenzaIntermedi
# This file contains all the apis required to upload the
# This file contains all the apis required to upload the datasets and to get the graph images that will be displayed in the flutter frontend
app = Flask(__name__)
# CORS is required to allow other domains to access files and images on the webpage
CORS(app)
# Allowed dataset file extensions
ALLOWED_EXTENSIONS = {'csv', 'xlsx'}
# PATHS
# Raw uploaded datasets folder
# Output Graphs folder
username = getpass.getuser()
if platform.system() == "Windows":
UPLOAD_FOLDER = r"C:\SCGProject\Datasets\RawDatasets"
DATASET_FOLDER = r"C:\SCGProject\Datasets\CsvForGraphing"
if platform.system() == "Darwin" :
if(username == "marcovinciguerra"):
UPLOAD_FOLDER = "/Users/marcovinciguerra/Github/SCGProject/Datasets/RawDatasets"
DATASET_FOLDER = "/Users/marcovinciguerra/Github/SCGProject/Datasets/CsvForGraphing"
elif(username == "davidguzman"):
UPLOAD_FOLDER = "/Users/davidguzman/documents/Github/SCGProject/Datasets/RawDatasets"
DATASET_FOLDER = "/Users/davidguzman/documents/Github/SCGProject/Datasets/CsvForGraphing"
#Controllo che il file caricato abbia il formato corretto
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# Api per caricare il raw dataset
@app.route('/uploadDataset', methods = ['GET', 'POST'])
def uploadDataset():
if request.method == 'POST':
# Controllo se e presente un file nella POST request
if 'file' not in request.files:
print("no file selezionato")
flash('No file part')
return "KO"
# Predo i file selezionati dall'utente
files = request.files.getlist('file')
print(files)
# Itero i file selezionati e li carico nel filesystem uno ad uno
for file in files:
# Controllo che l'utente abbia selezionato almeno un file da caricare
if file.filename == '':
print('no file selezionato')
return "KO"
# Controllo che il formato del file sia valido
if file and allowed_file(file.filename):
# Salvo il file nel file system
file.save(os.path.join(UPLOAD_FOLDER, file.filename))
return "OK"
# Get csv graphs from folder
@app.route('/get-csvgraph/<filename>')
def get_csv_graph(filename):
return send_from_directory(DATASET_FOLDER, filename)
# Get test data from python scripts. It awaits data from the script
@app.route('/get-scriptdata')
async def get_script_data():
return await ScostamentiSenzaIntermedi.getData()
# Format Datasets Script
@app.route('/format-datasets')
async def fix_dataset():
return await FixDatas.runFixDatas()
if __name__ == "__main__":
app.run()
|
VinciGit00/SCGProject
|
Frontend/flask_code/app.py
|
app.py
|
py
| 3,160 |
python
|
en
|
code
| 2 |
github-code
|
6
|
887825257
|
from django.shortcuts import render, redirect
from django.http import Http404, JsonResponse
from django.views import View
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
# Decorators
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from .decorators import allowerd_users
from .decorators import unauthenticated_user
from datetime import date
import pandas as pd
# Models and Forms
from backend.models import Info, Book, Student, Issue, Reservation, Class
from backend.fields import book_fields, class_fields, student_fields, reservation_fields, issue_fields
from .forms import BookForm, ClassForm, StudentForm, IssueForm, ReservationForm, LoginForm
from .custom import get_fields
# Excel to JSON parser
def parser_view(request):
info = Info.objects.all().first()
data = None
if request.method == "POST":
if 'file' in request.FILES:
dataFrame = pd.read_excel(request.FILES['file'], engine = "openpyxl")
data = dataFrame.to_json(indent = 4, orient = "records", force_ascii = False)
else:
return redirect("parser-view")
context = { "json": data, "school_name": info.school_name }
return render(request, "apis/index.html", context)
@unauthenticated_user
@csrf_exempt
def login_view(request):
form = LoginForm()
info = Info.objects.all().first()
if request.method == 'POST':
form = LoginForm(request, data = request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(request, username = username, password = password)
if user is not None:
login(request, user)
return redirect('home-view')
context = { "form": form, "messages": messages, "school_name": info.school_name }
return render(request, 'registration/login.html', context)
def logout_view(request):
logout(request)
return redirect('login-view')
def home_view(request):
info = Info.objects.all().first()
books = Book.objects.all()
tableFields = book_fields()
fields = []
# Model field list
for field in Book._meta.get_fields():
if field.name != "reservation":
fields.append(field.name)
context = {
"querySet": books,
"fields": fields,
"tfields": tableFields[0],
"tlength": len(fields),
"school_name": info.school_name,
}
return render(request, "home.html", context)
def error_view(request):
return render(request, "components/error.html")
def tutorial_view(request):
info = Info.objects.all().first()
context = {
"school_name": info.school_name,
}
return render(request, "tutorial/index.html", context)
class BookGPView(View):
@method_decorator(allowerd_users(["book-editing"]))
def get(self, request):
info = Info.objects.all().first()
books = Book.objects.all()
tableFields = book_fields()
form = BookForm()
fields = []
# Model field list
for field in Book._meta.get_fields():
if field.name != "reservation":
fields.append(field.name)
context = {
"fields": fields,
"querySet": books,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "book/index.html", context)
@method_decorator(allowerd_users(["book-editing"]))
def post(self, request):
form = BookForm()
if request.method == "POST":
form = BookForm(request.POST)
if form.is_valid():
form.save()
return redirect("book-view")
else:
return form.errors
class BookPDView(View):
def get_object(self, pk):
try:
return Book.objects.get(id = pk)
except Book.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["book-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["book-editing"]))
def delete(self, request, pk):
book = self.get_object(pk)
error = JsonResponse({"error": "Sve knjige nisu vraćene!"})
if len(Reservation.objects.all()) == 0: # If there is no book's at all
book.delete()
return JsonResponse(dict(code = 204, content = "Knjiga je izbrisana"))
elif not Reservation.objects.get(book = book): # If the selected book is not reservated
book.delete()
return JsonResponse(dict(code = 204, content = "Knjiga je izbrisana"))
else: # If the all books of this type are returned
reservation = Reservation.objects.get(book = book)
if reservation.issued == reservation.returned:
book.delete()
return JsonResponse(dict(code = 204, content = "Knjiga je izbrisana"))
error.status_code = 403
return error
class ClassGPView(View):
@method_decorator(allowerd_users(["class-editing"]))
def get(self, request):
info = Info.objects.all().first()
classes = Class.objects.all()
tableFields = class_fields()
form = ClassForm()
fields = []
# Model field list
for field in Class._meta.get_fields():
if field.name != "student":
fields.append(field.name)
context = {
"fields": fields,
"querySet": classes,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "class/index.html", context)
@method_decorator(allowerd_users(["class-editing"]))
def post(self, request):
form = ClassForm()
if request.method == "POST":
form = ClassForm(request.POST)
if form.is_valid():
form.save()
return redirect("class-view")
else:
return form.errors
class ClassPDView(View):
def get_object(self, pk):
try:
return Class.objects.get(id = pk)
except Class.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["class-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["class-editing"]))
def delete(self, request, pk):
classes = self.get_object(pk)
classes.delete()
return JsonResponse(dict(code = 204, content = "Odjeljenje je izbrisano!"))
class StudentGPView(View):
@method_decorator(allowerd_users(["student-editing"]))
def get(self, request):
info = Info.objects.all().first()
students = Student.objects.all()
tableFields = student_fields()
form = StudentForm()
fields = []
# Model fields
for field in Student._meta.get_fields():
if field.name != "issue":
fields.append(field.name)
context = {
"fields": fields,
"querySet": students,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "student/index.html", context)
@method_decorator(allowerd_users(["student-editing"]))
def post(self, request):
form = StudentForm()
if request.method == "POST":
form = StudentForm(request.POST)
if form.is_valid():
form.save()
return redirect("student-view")
else:
return form.errors
class StudentPDView(View):
def get_object(self, pk):
try:
return Student.objects.get(id = pk)
except Student.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["student-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["student-editing"]))
def delete(self, request, pk):
student = self.get_object(pk)
student.delete()
return JsonResponse(dict(code = 204, content = "Učenik je izbrisan!"))
class ReservationGPView(View):
@method_decorator(allowerd_users(["reservation-editing"]))
def get(self, request):
info = Info.objects.all().first()
reservation = Reservation.objects.filter(professor = request.user.get_full_name())
tableFields = reservation_fields()
form = ReservationForm()
fields = get_fields(Reservation, "issue")
context = {
"fields": fields,
"querySet": reservation,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "reservation/index.html", context)
@method_decorator(allowerd_users(["reservation-editing"]))
def post(self, request):
info = Info.objects.all().first()
reservation = Reservation.objects.all()
form = ReservationForm()
fields = get_fields(Reservation, "issue")
if request.method == "POST":
form = ReservationForm(request.POST)
if form.is_valid():
# Updating the book DB
book = Book.objects.get(id = form.cleaned_data["book"].id)
book.quantity -= form.cleaned_data["quantity"]
book.save()
# Saving the user
data = form.save(commit = False)
data.professor = request.user.get_full_name()
data.save()
return redirect("reservation-view")
context = {
"fields": fields,
"querySet": reservation,
"form": form,
"school_name": info.school_name,
}
return render(request, "reservation/index.html", context)
class ReservationPDView(View):
def get_object(self, pk):
try:
return Reservation.objects.get(id = pk)
except Reservation.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["reservation-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["reservation-editing"]))
def delete(self, request, pk):
reservation = self.get_object(pk)
error = JsonResponse({"error": "Sve knjige nisu vraćene!"})
if request.is_ajax():
if reservation.issued == reservation.returned:
book = Book.objects.get(id = reservation.book.id)
book.quantity += reservation.quantity
book.save()
reservation.delete()
return JsonResponse(dict(code = 204, content = "Rezervacija je izbrisana!"))
error.status_code = 403
return error
class IssueGPView(View):
@method_decorator(allowerd_users(["issue-editing"]))
def get(self, request):
info = Info.objects.all().first()
issues = Issue.objects.all()
tableFields = issue_fields()
form = IssueForm()
fields = [field.name for field in Issue._meta.get_fields()]
context = {
"fields": fields,
"querySet": issues,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "issue/index.html", context)
@method_decorator(allowerd_users(["issue-editing"]))
def post(self, request):
info = Info.objects.all().first()
issues = Issue.objects.all()
form = IssueForm()
fields = [field.name for field in Issue._meta.get_fields()]
if request.method == "POST":
form = IssueForm(request.POST)
if form.is_valid():
issue = Reservation.objects.get(id = form.cleaned_data["reservation"].id)
issue.issued += 1
issue.save()
form.save()
return redirect("issue-view")
context = {
"fields": fields,
"querySet": issues,
"form": form,
"school_name": info.school_name,
}
return render(request, "issue/index.html", context)
class IssuePDView(View):
# Getting the Issue object
def get_object(self, pk):
try:
return Issue.objects.get(id = pk)
except Issue.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["issue-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["issue-editing"]))
def put(self, request, pk):
issue = self.get_object(pk)
data = {}
if request.is_ajax():
reservation = issue.reservation
if issue.returnStatus:
# Updating the issues DB to the latest info
issue.returnStatus = False
issue.returnDate = None
issue.debt = 0
reservation.returned -= 1
else:
issue.returnStatus = True
issue.returnDate = date.today()
if date.today() > reservation.endDate:
delta = date.today() - reservation.endDate
issue.debt = delta.days * .5
reservation.returned += 1
# Saving the changes
issue.save()
reservation.save()
# Preparing the data for returning into template
data['id'] = issue.id
data['returnStatus'] = issue.returnStatus
data['returnDate'] = issue.returnDate
data['debt'] = issue.debt
data['content'] = "Uspješno ste izmjenili podatke o knjizi!"
return JsonResponse(data)
@method_decorator(allowerd_users(["issue-editing"]))
def delete(self, request, pk):
issue = self.get_object(pk)
reservation = issue.reservation
reservation.issued -= 1
reservation.returned -=1
reservation.save()
issue.delete()
return JsonResponse(dict(code = 204, content = "Učenik je izbrisan!"))
|
analitika-tech/library
|
system/frontend/views.py
|
views.py
|
py
| 14,674 |
python
|
en
|
code
| 1 |
github-code
|
6
|
16505295497
|
from typing import List, Dict, Tuple
def create_chirp_dictionary(file_name: str) \
-> Dict[int, Tuple[int, str, List[str], List[int], List[int]]]:
"""
Opens the file "file_name" in working directory and reads the content into a
chirp dictionary as defined on Page 2 Functions 2.
Note, some spacing has been added for human readability.
>>> create_chirp_dictionary("chirps.txt")
{100000: (
400,
'Does not want to build a %SnowMan %StopAsking',
['SnowMan', 'StopAsking'],
[100, 200, 300],
[400, 500]),
100001: (
200,
'Make the ocean great again.',
[''],
[],
[400]),
100002: (
500,
"Help I'm being held captive by a beast! %OhNoes",
['OhNoes'],
[400],
[100, 200, 300]),
100003: (
500,
"Actually nm. This isn't so bad lolz :P %StockholmeSyndrome",
['StockholmeSyndrome'],
[400, 100],
[]),
100004: (
300,
'If some random dude offers to %ShowYouTheWorld do yourself a favour and %JustSayNo.',
['ShowYouTheWorld', 'JustSayNo'],
[500, 200],
[400]),
100005: (
400,
'LOLZ BELLE. %StockholmeSyndrome %SnowMan',
['StockholmeSyndrome', 'SnowMan'],
[],
[200, 300, 100, 500])}
"""
#Your code goes here
f = open(file_name, "r")
H = dict()
#Helper for tag
def tag_helper(x):
y = []
if x == '\n':
return []
else:
x = x.split(', ')
for i in x:
y.append(str(i))
return y
#Helper for liked and disliked
def helper(a):
b = []
if a == '\n':
return []
else:
a = a.strip('\n').split(',')
for numbers in a:
b.append(int(numbers))
return b
line = f.readline()
while line:
chirpid = int(line) #10000
userid = int(f.readline()) #400
message = f.readline().strip('\n') #Does not want to build a %SnowMan %StopAsking
tags = f.readline().strip('\n') #SnowMan, StopAsking
likeds = f.readline() #100, 200, 300
dislikeds = f.readline() #400, 500
sperate = f.readline() #sperates to the next userid (\n)
line = f.readline()
tag = tag_helper(tags)
liked = helper(likeds)
disliked = helper(dislikeds)
H[chirpid] = (userid, message, tag, liked, disliked)
return H
|
kimber1y-tung/CSC108
|
assignment3/A3-2.py
|
A3-2.py
|
py
| 2,659 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4242214840
|
from predict_utility import load_checkpoint,get_input_args,predict,process_image
from utility_module import label_mapping
import warnings
warnings.filterwarnings("ignore")
from prettytable import PrettyTable
x = PrettyTable()
args = get_input_args()
model = load_checkpoint(args.checkpoint)
top_ps,top_class = predict(image_path=args.path,model=model,topk=args.top_k)
print("\nprediction to the given image of the flower\n")
flower_to_name = label_mapping()
prob = [round(p,5) for p in top_ps]
top_class_name = [flower_to_name[c] for c in top_class]
x.add_column("flower name",top_class_name)
x.add_column("prediction probability", prob)
print(x)
|
rkg-37/ImageClassifier
|
predict.py
|
predict.py
|
py
| 654 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10887477924
|
def bubble_sortiranje(niz):
privremeno = 0
duzina = len(niz)
for i in range(0, duzina - 1): #ide do pretposlednjeg jer od poslednjeg nema nista desno
for j in range(0, (duzina - 1) - i):
if(niz[j] > niz[j + 1]):
privremeno = niz[j]
niz[j] = niz[j + 1]
niz[j + 1] = privremeno
def select_sortiranje(niz):
for i in range(0, len(niz)):
indeks = i
for j in range(i + 1, len(niz)):
if(niz[j] < niz[indeks]):
indeks = j
privremeno = niz[indeks]
niz[indeks] = niz[i]
niz[i] = privremeno
def insert_sortiranje(niz):
privremeno = 0
j = 0
for i in range(1, len(niz)):
privremeno = niz[i]
j = i - 1
while(j >= 0 and niz[j] > privremeno):
niz[j + 1] = niz[j]
j -= 1
niz[j + 1] = privremeno
|
marko-smiljanic/vezbanje-strukture-podataka
|
vezbanje-strukture-podataka/Domaci-PREDAVANJA/domaci3_sortiranje/test_sort.py
|
test_sort.py
|
py
| 909 |
python
|
bs
|
code
| 0 |
github-code
|
6
|
35083479163
|
dic = ["a", "b", "c", "d" , "e" , "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
chars = ['O','Q','R','S']
z, i, x = 0, 0, 0
if chars[0].istitle():
z = 1
dic = [element.upper() for element in dic]
while chars[x] != dic[i]:
i += 1
for item in chars:
if chars[x] != dic[i]:
if z == 1:
print(dic[i])
else:
print(dic[i].lower())
break
i += 1
x += 1
|
diogodh/codewars_py
|
missing_letter.py
|
missing_letter.py
|
py
| 481 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38978357115
|
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('localhost', 9999))
s.listen(1)
while True:
cli, (remhost, remport) = s.accept()
print("Nhan ket noi tu", remhost)
msg = "Hello %s\n" %remhost
cli.send(msg.encode('ascii'))
cli.close()
|
DuongHongDoan/CT225_LTPython
|
Buoi3_LTMang/Bai46_HelloServer.py
|
Bai46_HelloServer.py
|
py
| 282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71484411388
|
import sys
sys.setrecursionlimit(3000)
def check(rs, cs):
table[rs][cs] = 2
if (rs, cs) == (rg, cg): return True
if rs > 0 and table[rs - 1][cs] == 1 and check(rs - 1, cs):
return True
if cs > 0 and table[rs][cs - 1] == 1 and check(rs, cs - 1):
return True
if rs < r - 1 and table[rs + 1][cs] == 1 and check(rs + 1, cs):
return True
if cs < c - 1 and table[rs][cs + 1] == 1 and check(rs, cs + 1):
return True
return False
r, c = map(int, input().split())
table = [[0] * c for _ in range(r)]
rs, cs = map(lambda x:int(x) - 1, input().split())
rg, cg = map(lambda x:int(x) - 1, input().split())
n = int(input())
draw = [list(map(int, input().split())) for _ in range(n)]
for ri, ci, hi, wi in draw:
ri -= 1
ci -= 1
for i in range(ri, ri+hi):
for j in range(ci, ci+wi):
table[i][j] = 1
if table[rs][cs] != 1 or table[rg][cg] != 1:
print('NO')
else:
print('YES' if check(rs, cs) else 'NO')
|
knuu/competitive-programming
|
atcoder/corp/codethxfes2014b_e.py
|
codethxfes2014b_e.py
|
py
| 994 |
python
|
en
|
code
| 1 |
github-code
|
6
|
26039884706
|
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.go.subsystems.golang import GolangSubsystem
from pants.core.util_rules.system_binaries import (
BinaryPath,
BinaryPathRequest,
BinaryPaths,
BinaryPathTest,
)
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
@dataclass(frozen=True)
class CGoBinaryPathRequest(EngineAwareParameter):
binary_name: str
binary_path_test: BinaryPathTest | None
def debug_hint(self) -> str | None:
return self.binary_name
@rule
async def find_cgo_binary_path(
request: CGoBinaryPathRequest, golang_env_aware: GolangSubsystem.EnvironmentAware
) -> BinaryPath:
path_request = BinaryPathRequest(
binary_name=request.binary_name,
search_path=golang_env_aware.cgo_tool_search_paths,
test=request.binary_path_test,
)
paths = await Get(BinaryPaths, BinaryPathRequest, path_request)
first_path = paths.first_path_or_raise(
path_request, rationale=f"find the `{request.binary_name}` tool required by CGo"
)
return first_path
def rules():
return collect_rules()
|
pantsbuild/pants
|
src/python/pants/backend/go/util_rules/cgo_binaries.py
|
cgo_binaries.py
|
py
| 1,235 |
python
|
en
|
code
| 2,896 |
github-code
|
6
|
71226682109
|
import os, glob
from fpdf import FPDF
class Pdf_Tool:
def __init__(self, format):
self.pdf = FPDF(format=format)
def save(self, dir, pdf_name):
if not os.path.exists(dir):
os.makedirs(dir)
self.pdf.output(os.path.join(dir, pdf_name), "F")
def create(self, img_path_list, dimen):
for img_path in img_path_list:
self.pdf.add_page()
self.pdf.image(img_path, dimen[0], dimen[1], dimen[2], dimen[3])
if __name__ == "__main__":
root = os.path.join(os.getcwd(), "output")
root = "F:/E-Book/Temp/JPG"
for i in range(1, 4):
no = str(i).zfill(1)
filepath_list = sorted(glob.glob(os.path.join(root, no + "/*.jpg")), key=os.path.basename)
pdf = Pdf_Tool((2040, 1512))
pdf.create(filepath_list, (0, 0, 2040, 1512))
pdf.save(os.path.join(root, no), no + ".pdf")
|
huangzf128/something
|
code/python/image/pdf_tool.py
|
pdf_tool.py
|
py
| 888 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16439872003
|
from flask import Flask
import dash
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
import os
inbodyDf = pd.read_csv(os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'inbody.csv'))
courseDf = pd.read_csv(os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'courses.csv'))
external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.cs']
server = Flask(__name__)
app = dash.Dash(__name__, server=server, external_stylesheets=external_stylesheets)
app.title='[Youwon Shin]'
app.layout = html.Div(className='body',children=[
html.Div(className='header',children=[
html.H1(className='h1',children='Welcome to YOUWON\'s WORLD!',style={'color':'white'})
]),
html.Div(className='firstDiv',children=[
html.Div(className='Intro',children=[
html.H1(className='h1',children='Youwon Shin',style={'color':'#8977ad'}),html.Br(),
html.P(className='IntroArticle',children=['Hello, I\'m youwon shin.',html.Br(),'I am currently a M.S. student in ',
html.B(children='Computer Science major'), ' at KAIST and supervised by Prof.Uchin Lee in ',
html.A(className='a',children='ICLab@KAIST', href="http://ic.kaist.ac.kr/wiki/wiki.cgi?Main"), '.', html.Br(),
'I received my B.S. degree in ', html.A(className='a',children='Mechanical and Biomedical Engineering', href="http://mbe.ewha.ac.kr/"),
' from Ewha Womans University in 2021.', html.Br(),html.Br(),html.Br(),
html.B(children='Contact: '),
html.A(className='email',children='[email protected]',href="mailto:[email protected]")])
]),
html.Div(className='Img', children=[
html.Img(className='profimg',src= app.get_asset_url('profile.jpg'), style={'alt':'Profile image'})
])
]),
html.Div(className='secondDiv',children=[
html.Div(className='leftDiv',children=[
html.H2(className='h2',children='My Personality Type'),
html.Div(className='leftChild',children=[
html.Img(className='mbtiImg',src=app.get_asset_url('ENFJ.png'), style={'alt':'ENFJ'}),
html.Span(className='MBTI',children=[
html.Br(),
html.B('E'), 'xtroverted', html.Br(),
'I', html.B('N'), 'tution', html.Br(),
html.B('F'), 'eelings', html.Br(),
html.B('J'), 'udgment'
])
])
]),
html.Div(className='rightDiv',children=[
html.H2(className='h2',children='Inbody Trend'),
html.Div(className='chartbox',children=[
dcc.Dropdown(
id="Value-selector",
options=[{
'label': i,
'value': i
} for i in inbodyDf['Type'].unique()],
value="All",
placeholder="Select Type",
),
dcc.Graph(id='inbody-graph')
]),
],
style={
'width' : '100%',
'min-width':'35rem'
})
]),
html.Div(className='thirdDiv',children=[
html.Div(className='leftDiv',children=[
html.H2(className='h2',children='Course Schedule (Fall, 2021)'),
html.Table(className='table1',children=[
html.Tbody([
html.Tr([
html.Th(style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('MON', style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('TUE', style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('WED', style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('THU', style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('FRI', style={'background-color':"#9283ad", 'width':'80px'})
],style={'height':'35px'}),
html.Tr([
html.Td('9:00-10:30'),html.Td(),html.Td(),html.Td(),html.Td(),html.Td()
]),
html.Tr([
html.Td('10:30-12:00'),html.Td(),html.Td(['Data', html.Br(),'Visualization']),html.Td(),html.Td(['Data', html.Br(),'Visualization']),html.Td()
]),
html.Tr([
html.Td('12:00-13:00'),html.Td('~LUNCH TIME~', colSpan=5,style={'background-color': '#d5c9dd','font-weight':'bold'})
]),
html.Tr([
html.Td('13:00-14:30'),html.Td(['Advanced', html.Br(), 'Data Mining']),html.Td(),html.Td(['Advanced', html.Br(), 'Data Mining']),html.Td(),html.Td()
]),
html.Tr([
html.Td('14:30-16:00'),html.Td(),html.Td('HCI'),html.Td(),html.Td('HCI'),html.Td()
])
])
])
]),
html.Div(className='rightDiv',children=[
html.H2(className='h2',children='How many courses did I take?'),
html.Div(className='chartbox',children=[
dcc.Dropdown(
id="Year-selector",
options=[{
'label': i,
'value': i
} for i in courseDf['Year'].unique()],
value="Year",
placeholder="Select Year"
),
dcc.Graph(id='course-graph')
])
],
style={
'width' : '100%',
'min-width':'35rem'
})
]),
html.Div(className='fourthDiv',children=[
html.Div(className='DivChild',children=[
html.H2(className='h2',children=['Visitors for last 7 days']),
html.Table(className='table2',children=[
html.Tbody([
html.Tr([
html.Th('MON', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('TUE', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('WED', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('THU', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('FRI', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('SAT', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('SUN', style={'background-color':"#dbd4e7", 'width':'90px'})
]),
html.Tr([
html.Td('30', style={'width':"#dbd4e7"}),html.Td('12'),html.Td('23'),html.Td('43'),
html.Td('21'),html.Td('11'),html.Td('34')
])
])
])
])
]),
html.Div(className='footer',children=[
html.B('Interactive Computing Lab, School of Computing,KAIST'),
html.Br(),
html.I('291 Daehak-ro, Yuseong-gu, Daejeon 34141, Republic of Korea')
])
])
@app.callback(
Output(component_id='inbody-graph', component_property='figure'),
[Input(component_id='Value-selector', component_property='value')]
)
def update_inbody_graph(value):
days = ['2021-07-27', '2021-08-03', '2021-08-12', '2021-09-07']
if value == "All":
df = inbodyDf.copy()
else:
df = inbodyDf.loc[inbodyDf['Type']==value]
line1 = go.Scatter(name='Fat', x=days, y=df.loc[df['Type']=='Fat']['Figure'], mode='lines+markers')
line2 = go.Scatter(name='Skeletal muscles', x=days, y=df.loc[df['Type']=='Skeletal muscles']['Figure'],mode='lines+markers')
line3 = go.Scatter(name='BMI', x=days, y=df.loc[df['Type']=='BMI']['Figure'],mode='lines+markers')
line4 = go.Scatter(name='Fat Pect.', x=days, y=df.loc[df['Type']=='Fat Pect.']['Figure'],mode='lines+markers')
return {
'data': [line1,line2,line3,line4],
'layout':
go.Layout(
barmode='stack'
)
}
@app.callback(
Output(component_id='course-graph', component_property='figure'),
[Input(component_id='Year-selector', component_property='value')]
)
def update_course_graph(value):
if value == "Year":
df = courseDf[courseDf['Year']==2021]
else:
df = courseDf[courseDf['Year']==value]
grouped_Df = df.groupby(['Semester','Department']).count()
grouped_Df = grouped_Df.reset_index()
semesters = ['Spring', 'Fall']
bar1 = go.Bar(name='School of Computing', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='School of Computing']['Course'])
bar2 = go.Bar(name='General Required', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='General Required']['Course'])
bar3 = go.Bar(name='Electrical Engineering', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='Electrical Engineering']['Course'])
bar4 = go.Bar(name='Cyber Security', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='Cyber Security']['Course'])
bar5 = go.Bar(name='Computer Engineering', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='Computer Engineering']['Course'])
bar6 = go.Bar(name='Mech/BioMed Engineering', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='Mech/BioMed Engineering']['Course'])
return {
'data': [bar1,bar2,bar3,bar4,bar5,bar6],
'layout':
go.Layout(
barmode='stack'
)
}
if __name__ == '__main__':
app.run_server(debug=True)
|
yuwon-shin/Data_Visualization
|
PR/flask/useDash.py
|
useDash.py
|
py
| 9,696 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28624119358
|
import cartopy
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
def plot_geodetic(location_to_geodetic, edxml_parser):
# get names and geodetics for plotting
locs = np.asarray(list(location_to_geodetic.keys()))
geodetic_coords = np.asarray(list(location_to_geodetic.values()))
geodetic_coords = geodetic_coords[:, [1, 0]]
# remove any locations from geodetic that was not in parser
loc_in_parser = np.isin(locs, edxml_parser.sorted_unique_locs)
locs = locs[loc_in_parser]
geodetic_coords = geodetic_coords[loc_in_parser]
# count occurences of each location in parser
loc_counts = np.asarray([edxml_parser.loc_to_count[loc] for loc in locs])
# set up figure and axes
fig = plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
# zoom view around points
min_coord = np.min(geodetic_coords, axis=0) - 5
max_coord = np.max(geodetic_coords, axis=0) + 5
ax.set_extent([min_coord[0], max_coord[0], min_coord[1], max_coord[1]],
crs=ccrs.PlateCarree())
# add imagery
ax.add_feature(cartopy.feature.LAND)
ax.add_feature(cartopy.feature.OCEAN)
# plot points
sc = plt.scatter(geodetic_coords[:, 0], geodetic_coords[:, 1], color='#00000088', marker='o',
s=2*loc_counts, transform=ccrs.PlateCarree())
# create annotation
# code modified from:
# https://stackoverflow.com/questions/7908636/possible-to-make-labels-appear-when-hovering-over-a-point-in-matplotlib
annot = ax.annotate("", xy=(0, 0), xytext=(20, 20), textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
# define func to update annotations
def update_annot(ind):
# get position from first point
pos = sc.get_offsets()[ind["ind"][0]]
annot.xy = pos
# draw box with annotations from all points from event
text = "\n".join([locs[n] for n in ind["ind"]])
annot.set_text(text)
annot.get_bbox_patch().set_alpha(0.4)
# define func to handle clicking
def on_click(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = sc.contains(event)
# update annotation if point with data clicked
if cont:
update_annot(ind)
annot.set_visible(True)
fig.canvas.draw_idle()
# hide annotation if point without data clicked
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect("button_release_event", on_click)
# display plot
plt.show()
|
pnadelofficial/HistoricalLetters
|
plot.py
|
plot.py
|
py
| 2,774 |
python
|
en
|
code
| null |
github-code
|
6
|
17241327801
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import requests
from bs4 import BeautifulSoup
import time
BASE_URL = 'https://www.nowcoder.com'
driver = webdriver.Chrome(executable_path="P:/selenium/chromedriver.exe")
driver.get('https://www.nowcoder.com/discuss/experience?tagId=2656') # 对应的页面
# 等待ajax内容加载
wait = WebDriverWait(driver, 10)
wait.until(
EC.presence_of_element_located((By.CLASS_NAME, "js-nc-wrap-link"))
)
def scrollPage(timeout, times):
for i in range(times):
print('next')
# 向下刷新一次内容
driver.execute_script("window.scrollTo(0,Math.max(document.documentElement.scrollHeight,document.body.scrollHeight,document.documentElement.clientHeight));")
time.sleep(timeout)
# 以3秒的间隔向下刷新5次内容
scrollPage(3, 5)
# 带有 class='js-nc-wrap-link' 属性的标签都是面经的链接
items = driver.find_elements_by_class_name('js-nc-wrap-link')
with open('content.txt', 'w', encoding='utf-8') as f:
# 逐个读取每一个链接中的文本,并写到文件里面
for item in items:
print(item.get_attribute('data-href'))
response = requests.get(BASE_URL + item.get_attribute('data-href'))
data = response.text
soup = BeautifulSoup(data, 'html.parser')
words = soup.find('div', {'class': 'post-topic-des nc-post-content'})
f.write(words.get_text())
|
Chunar5354/interview_note
|
experience/spider.py
|
spider.py
|
py
| 1,566 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32693212351
|
'''
Created on Mar 30, 2010
@author: kwadwo
'''
from OutsideLibrary.table_parser import TableParser
from OutsideLibrary import ClientForm
from courseLib import *
import urllib2
def getStuff():
mnemonic = raw_input("input course Mnemonic (ex:'math')")
number = raw_input("input course number")
response = urllib2.urlopen("http://rabi.phys.virginia.edu/mySIS/CS/CS_programs/Class_Search.php?Semester=1108")
forms = ClientForm.ParseResponse (response, backwards_compat=False)
response.close()
form = forms[1]
form["iCMnemonic"] = mnemonic
form["iCNumber"] = number
response1 = urllib2.urlopen(form.click()).read()
p = TableParser()
return [response1, p]
def parseDays(unparsedDays):
dict={"MoWeFr":["Monday","Wednesday","Friday"],"Tu":["Tuesday"],"Th":["Thursday"]}
if(unparsedDays in dict):
return dict[unparsedDays]
return []
def main():
stuff = getStuff()
p = stuff[1]
p.feed(stuff[0])
sections = []
#populating sections list
for i in p.doc[1][2:]:
days,time = i[6].split(" ",1)
days=parseDays(days.strip())
sectDays=[]
for j in days:
sectDays.append(SectionDay(j,time.strip()))
location = i[7].replace("\n", "").replace("\r", "")
instructor = i[5].replace("\n", "").replace("\r", "")
status = i[3]
#checking whether this day is part of a previous section
if i[0] == "":
sections[-1].classDays.append(sectDays)
continue;
sections.append(Section(i[0], sectDays, instructor, location, status))
# print p.doc[1][1][0]
c=Course(p.doc[1][1][1], p.doc[1][1][0], sections)
sched1=Schedule(1)
#get a sample section and add it to the schedule
# for i in c.sections[0].classDays:
# sched1.putSchduledDay(i.name, c, c.sections[0].sectionNumber)
# print secdays
# sched1.putSchduledDay("Monday", c)
# for i in sections:
# print "Section #: ", i.sectionNumber
# print "Days: ", i.classDays.name
# print "instructor: ", i.instructor
# print "status: ", i.status
# print "location: ", i.location
# print "\n"
if __name__ == '__main__':
main()
|
code4ghana/randomPrograms
|
PythonPrograms/SISScheduler/src/CourseLib/Runner.py
|
Runner.py
|
py
| 2,280 |
python
|
en
|
code
| 1 |
github-code
|
6
|
39019051320
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription, conditions
from launch.actions import ExecuteProcess,RegisterEventHandler, IncludeLaunchDescription
from launch_ros.actions import Node
from launch.substitutions import LaunchConfiguration,Command
from launch.event_handlers import OnProcessExit
from launch.launch_description_sources import PythonLaunchDescriptionSource
def generate_launch_description():
use_rviz = LaunchConfiguration('use_rviz', default='false')
use_sim_time = LaunchConfiguration('use_sim_time', default='false')
pkg_r2d2_control = get_package_share_directory('r2d2_control')
xacro_file = os.path.join(pkg_r2d2_control,'urdf','r2d2_gazebo_ros2_control.urdf.xacro')
robot_description = {'robot_description' : Command(['xacro', ' ', xacro_file])}
rviz_file = os.path.join(pkg_r2d2_control,'config','r2d2_ros2_control.rviz')
gazebo = IncludeLaunchDescription(
PythonLaunchDescriptionSource([os.path.join(
get_package_share_directory('gazebo_ros'), 'launch'), '/gazebo.launch.py']),
)
node_robot_state_publisher = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
name='robot_state_publisher',
output='screen',
parameters=[{'use_sim_time': use_sim_time}, robot_description])
spawn_entity = Node(
package='gazebo_ros',
executable='spawn_entity.py',
name='spawn_entity',
output='screen',
arguments=[
'-entity', 'r2d2',
'-x', '0',
'-y', '0',
'-z', '1',
'-topic', '/robot_description'
])
# load_joint_state_controller = ExecuteProcess(
# cmd=['ros2', 'control', 'load_controller', '--set-state', 'start',
# 'joint_state_broadcaster'],
# output='screen'
# )
load_joint_state_controller = ExecuteProcess(
cmd=['ros2', 'control', 'load_start_controller', 'joint_state_broadcaster'],
output='screen'
)
# load_joint_diff_drive_controller = ExecuteProcess(
# cmd=['ros2', 'control', 'load_controller', '--set-state', 'start', 'front_back_diff_drive_controller'],
# output='screen'
# )
load_joint_diff_drive_controller = ExecuteProcess(
cmd=['ros2', 'control', 'load_start_controller', 'front_back_diff_drive_controller'],
output='screen'
)
rviz = Node(
package='rviz2',
executable='rviz2',
name='rviz2',
arguments=['-d', rviz_file],
condition=conditions.IfCondition(use_rviz))
return LaunchDescription([
spawn_entity,
RegisterEventHandler(
event_handler=OnProcessExit(
target_action=spawn_entity,
on_exit=[load_joint_state_controller],
)
),
RegisterEventHandler(
event_handler=OnProcessExit(
target_action=load_joint_state_controller,
on_exit=[load_joint_diff_drive_controller],
)
),
gazebo,
node_robot_state_publisher,
rviz
])
|
kei1107/r2d2_ros2
|
r2d2_control/launch/r2d2_control6.launch.py
|
r2d2_control6.launch.py
|
py
| 3,223 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74050593789
|
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
import sqlite3
from time import sleep
imgchrList = ['n','a','e','m','i','g','h','v']
DiffList = ['nov','adv','exh','mxm','inf','grv','hvn','vvd']
conn = sqlite3.connect("SDVXRanking.db")
cur = conn.cursor()
for tid in range(164,1412):
print('Loading...' + str(tid))
sql = "insert into TrackList (TrackID) VALUES (?);"
cur.execute(sql,(str(tid+1000),))
for i in range(0,8):
req = Request('http://anzuinfo.me/trackData.html?trackID='+str(tid).zfill(4)+imgchrList[i])
res = urlopen(req)
html = res.read().decode('utf8')
bs = BeautifulSoup(html, 'html.parser')
TrackData = bs.findAll('table', attrs={'class': 'trackData'})
for tracks in TrackData:
findlv = 'lv '+DiffList[i]
TrackLevel = tracks.find('div', attrs={'class': findlv})
if TrackLevel is None:
continue
TrackLevel = TrackLevel.text
TrackDifficulty = DiffList[i].upper()
TrackTitle = tracks.find('td', attrs={'class': 'title'}).text
sql = "update TrackList SET TrackTitle = :Title, "+TrackDifficulty+" = :Lv where TrackID = :ID;"
cur.execute(sql,{'Title': TrackTitle, 'Lv': TrackLevel, 'ID':str(tid+1000)})
conn.commit()
sleep(0.02)
conn.close()
|
limjungho/SDVXRanking
|
ParsingTrackList.py
|
ParsingTrackList.py
|
py
| 1,365 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3999827011
|
import PySimpleGUI as sg # Simple GUI for Python
import core
import guiElements
import constants as const
# Setup the simple window
sg.theme("Black")
layout = [
[guiElements.frameSelectJobAndZone],
[guiElements.frameSelectResource],
[guiElements.frameAssignAKey],
[guiElements.layoutStatusAndStartStopBtns],
]
# Create the Window
window = sg.Window(
title="Wakfu FarmBot 0.1",
layout=layout,
size=(400, 350),
element_justification="c",
element_padding=10,
)
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
if event == sg.WIN_CLOSED: # if user closes windows
break
if event == "button_start":
core.onClick_Start_Button(event, values, window)
if event == "button_stop":
core.onClick_Stop_Button(event, values, window)
if event == "combo_key":
core.onChange_Key_Combo(event, values, window)
if event == "combo_zone":
core.onChange_Zone_Combo(event, values, window)
if event == "combo_resource":
core.onChange_Resource_Combo(event, values, window)
if event == "combo_job":
core.onChange_Job_Combo(event, values, window)
window.close()
|
jhriverasa/wakfu-farmscript
|
FarmScriptGUI.py
|
FarmScriptGUI.py
|
py
| 1,235 |
python
|
en
|
code
| 5 |
github-code
|
6
|
75316047546
|
import wx
from ..form.general import GeneralDialog
from ..textbox import LayoutDimensions
from ..textbox.textbox import TextInputLayout, TextSmartBox
from ..textbox.floatbox import FloatInputLayout, FloatSmartBox
from ..controller import ChildController
from ..model.bind import BindOjbect
__author__ = 'Joeny'
class FigureSetting(object):
"""
Figure Setting model.
"""
def __init__(self, *args, **kwargs):
"""
Figure Setting Constructor
:param args:
:param kwargs:
:return:
"""
self.title = kwargs.get('title', 'Title')
self.x_title = kwargs.get('x_title', 'X Title')
self.x_subtitle = kwargs.get('x_subtitle', '')
self.y_title = kwargs.get('y_title', 'Y Title')
self.y_subtitle = kwargs.get('y_subtitle', '')
self.x_min = kwargs.get('x_min', None)
self.x_max = kwargs.get('x_max', None)
self.y_min = kwargs.get('y_min', None)
self.y_max = kwargs.get('y_max', None)
self.linewidth = kwargs.get('linewidth', 2)
self.legend = kwargs.get('legend', [])
class FigureSettingPanel(wx.Panel):
"""
Particular Figure Setting
"""
def __init__(self, parent, setting, *args, **kwargs):
"""
:param setting:
:param args:
:param kwargs:
:return:
"""
wx.Panel.__init__(self, parent, *args, **kwargs)
self.layouts = {}
self.bind_objects = {}
self.setting = setting
self.SetSizerAndFit(self.do_layout())
def do_layout(self):
"""
Layout form
:return:
"""
vsizer = wx.BoxSizer(wx.VERTICAL)
layout = LayoutDimensions(top=2, bottom=2, left=4, right=4, interior=2,
widths=(100, 200),
stretch_factor=(0, 1), height=24)
layout.calculate()
self.layouts['title'] = TextInputLayout(self,
name='Title',
layout=layout,
textbox=TextSmartBox(self))
self.layouts['x_title'] = TextInputLayout(self,
name='X Title',
layout=layout,
textbox=TextSmartBox(self))
self.layouts['y_title'] = TextInputLayout(self,
name='Y Title',
layout=layout,
textbox=TextSmartBox(self))
self.layouts['x_min'] = FloatInputLayout(self,
name='X Min',
layout=layout,
textbox=FloatSmartBox(self, signs=True))
self.layouts['x_max'] = FloatInputLayout(self,
name='X Max',
layout=layout,
textbox=FloatSmartBox(self, signs=True))
self.layouts['y_min'] = FloatInputLayout(self,
name='Y Min',
layout=layout,
textbox=FloatSmartBox(self, signs=True))
self.layouts['y_max'] = FloatInputLayout(self,
name='Y Max',
layout=layout,
textbox=FloatSmartBox(self, signs=True))
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['title'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['x_title'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['y_title'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['x_min'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['x_max'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['y_min'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['y_max'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
return vsizer
def sync_data(self):
"""
Sync textbox data
"""
self.bind_objects['title'] = BindOjbect(self.setting.__dict__,
self.layouts['title'].textbox,
'title')
self.bind_objects['x_title'] = BindOjbect(self.setting.__dict__,
self.layouts['x_title'].textbox,
'x_title')
self.bind_objects['y_title'] = BindOjbect(self.setting.__dict__,
self.layouts['y_title'].textbox,
'y_title')
self.bind_objects['x_min'] = BindOjbect(self.setting.__dict__,
self.layouts['x_min'].textbox,
'x_min')
self.bind_objects['x_max'] = BindOjbect(self.setting.__dict__,
self.layouts['x_max'].textbox,
'x_max')
self.bind_objects['y_min'] = BindOjbect(self.setting.__dict__,
self.layouts['y_min'].textbox,
'y_min')
self.bind_objects['y_max'] = BindOjbect(self.setting.__dict__,
self.layouts['y_max'].textbox,
'y_max')
class FigureSettingDialog(GeneralDialog):
"""
Modify figure setting.
"""
def __init__(self, parent, controller=None, setting=None, local=None, btn_flags=wx.OK | wx.CANCEL, **kwargs):
"""
Figure setting dialog.
:param parent:
:param controller:
:param setting:
:param btn_flags:
:param kwargs:
:return:
"""
self.nb = None
self.pages = {}
if local:
self.local = local
self.local.view = self
else:
self.local = FigureSettingController(parent, self, setting)
GeneralDialog.__init__(self,
parent,
title="Figure Setting",
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER,
controller=controller,
local=self.local,
btn_flags=btn_flags,
**kwargs)
self.btnsizer.AffirmativeButton.Bind(wx.EVT_BUTTON, self.local.button_ok_click)
def do_layout(self):
"""
Draw layout
:return:
"""
self.nb = wx.Notebook(self)
for index, setting in enumerate(self.local.settings):
# Create Panel.
self.pages[index] = FigureSettingPanel(self.nb, setting)
# Add to tab page.
self.nb.AddPage(self.pages[index], "Plot %d" % (index + 1))
# Sync Data
self.pages[index].sync_data()
return self.nb
class FigureSettingController(ChildController):
"""
Figure Setting Controller
"""
def __init__(self, parent, view, settings):
"""
:param parent:
:param view:
:return:
"""
ChildController.__init__(self, parent, view, settings)
self.settings = settings
def sync_data(self):
"""
Sync Data
:return:
"""
pass
def do_layout(self):
pass
def refresh(self):
pass
def update_layout(self, state):
pass
def button_ok_click(self, event):
"""
Button ok click
:param event:
:return:
"""
error = False
#TODO: Need to bind the textbox with the data.
if error is False:
event.Skip()
else:
if not wx.Validator_IsSilent():
wx.Bell()
def delete_control(self):
pass
|
JoenyBui/boa-gui
|
boaui/chart/dlg.py
|
dlg.py
|
py
| 8,601 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33648598241
|
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
import re
import string
@dataclass
class Equipment:
title: str
value: str
unit: Optional[str] = None
quantity: Optional[int] = None
_equipment = [
("Backpack", "2gp"),
("Candle", "1cp"),
("Chain, 10'", "30gp"),
("Chalk, 1 piece", "1cp"),
("Chest, empty", "2gp"),
("Crowbar", "2gp"),
("Flask, empty", "3cp"),
("Flint & steel", "15cp"),
("Grappling hook", "1gp"),
("Hammer, small", "5sp"),
("Holy symbol", "25gp"),
("Holy water, 1 vial**", "25gp"),
("Ironspikes, each", "1sp"),
("Lantern", "10gp"),
("Mirror, hand-sized", "10gp"),
("Oil, 1 flask***", "2sp"),
("Pole, 10-foot", "15cp"),
("Rations, per day", "5cp"),
("Rope, 50'", "25cp"),
("Sack, large", "12cp"),
("Sack, small", "8cp"),
("Thieves' tools", "25gp"),
("Torch, each", "1cp"),
("Waterskin", "5sp"),
]
EQUIPMENT = {}
#: Process the initial list of equipment tuples into a dict of Equipment classes
for name, cost in _equipment:
kwargs = {}
title = name.split(", ")[0]
kwargs["title"] = title
kwargs["value"] = cost
if len(name.split(", ")) > 1:
quantity = name.split(", ")[1]
try:
digits = int("".join([c for c in quantity if c.isdigit()]))
except IndexError:
digits = None
if digits:
kwargs["quantity"] = digits
kwargs["unit"] = re.sub(r"[\d+\s]", "", quantity)
EQUIPMENT[title] = Equipment(**kwargs)
|
sethwoodworth/crawl-classic
|
crawl_classic/equipment.py
|
equipment.py
|
py
| 1,581 |
python
|
en
|
code
| 3 |
github-code
|
6
|
35754868132
|
# Задайте список из N элементов, заполненных числами из промежутка [-N, N].
# Найдите произведение элементов на указанных индексах. Индексы вводятся одной строкой, через пробел.
# n = 3 [-3, -2, -1, 0, 1, 2, 3] --> 0 2 3
# -3 * -1 * 0 = 0 Вывод: 0
N = int(input('Введите значение N = '))
position_1 = int(input('Введите номер позиции элемента 1:'))
position_2 = int(input('Введите номер позиции элемента 2:'))
list = []
for i in range(2*N+1):
if i < N:
list.append(-N+i)
elif i > N:
list.append(i-N)
else:
list.append(0)
print(list)
if ((position_1 or position_2) > 2*N+1) or (position_1 or position_2) <=0:
print('как минимум одно значение позиции элемента за пределами [-N, N]')
else:
print(f"Произведение элементов в указанных позициях = {list[position_1-1]*list[position_2-1]}" )
|
Natalie-4/task2
|
4.py
|
4.py
|
py
| 1,130 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
17651848697
|
# coefficients in Zp
def Z_p(n, co_eff_lst):
co_eff_Zp = []
for lst in co_eff_lst:
co_eff_Zp.append([i % n for i in lst])
return co_eff_Zp
# generating polynomial
# return Tn(x) mod m
def Tn_Zm(n, m, x):
co_eff = co_eff_lst[n-1]
sum = 0
for i in co_eff:
sum += i * x**n
n = n-2
return sum % m
# check whether or not Zm -> Zm
def generating_function(n, m):
lst = []
for x in range(m): # elements in Zm
lst.append(Tn_Zm(n, m, x))
if sorted(lst) == [i for i in range(m)]: return "G"
else: return "N"
|
6taco-cat9/chebyshev_polynomials
|
generating_function.py
|
generating_function.py
|
py
| 580 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3883274800
|
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if not nums:
return 0
curSum = nums[0]
maxSum = curSum
for num in nums[1:]:
curSum = curSum + num if curSum > 0 else num
maxSum = maxSum if maxSum > curSum else curSum
return maxSum
|
fatzero/Leetcode-Problems
|
1-100/53.maximum-subarray.py
|
53.maximum-subarray.py
|
py
| 343 |
python
|
en
|
code
| 0 |
github-code
|
6
|
780574891
|
from nltk.tokenize import TweetTokenizer
class LexiconFeatureExtractor:
def __init__(self, afinn_lexicon_file_path="resources/lexicons/AFINN-en-165.txt",
afinn_emoticon_lexicon_file_path="resources/lexicons/AFINN-emoticon-8.txt",
bing_liu_lexicon_file_path="resources/lexicons/BingLiu.txt",
mpqa_lexicon_file_path="resources/lexicons/mpqa.txt"):
print("Loading AFINN lexicons...")
self.afinn_lexicon = LexiconFeatureExtractor._read_standart_lexicon(afinn_lexicon_file_path)
self.afinn_emoticon_lexicon = LexiconFeatureExtractor._read_standart_lexicon(afinn_emoticon_lexicon_file_path)
print("Loading BingLiu lexicon...")
self.bingliu_lexicon = LexiconFeatureExtractor._read_standart_lexicon(bing_liu_lexicon_file_path)
print("Loading MPQA lexicon...")
self.mpqa_lexicon = LexiconFeatureExtractor._read_standart_lexicon(mpqa_lexicon_file_path)
print("Loading NRC - Hashtag - Emotion - Lexicon")
self.nrc_hash_emo_lexicon = LexiconFeatureExtractor \
._read_labeled_lexicon("resources/lexicons/NRC-Hashtag-Emotion-Lexicon-v0.2.txt")
print("Loading NRC - AffectIntensity - Lexicon")
self.nrc_affect_intensity_lexicon = LexiconFeatureExtractor \
._read_labeled_lexicon("resources/lexicons/NRC-AffectIntensity-Lexicon.txt")
print("Loading SentiStrength EmoticonLookupTable")
self.emoticon_lookup_lexicon = LexiconFeatureExtractor \
._read_standart_lexicon("resources/lexicons/EmoticonLookupTable.txt")
print("Loading SentiStrength EmotionLookupTable")
self.emotion_lookup_lexicon = LexiconFeatureExtractor \
._read_standart_lexicon("resources/lexicons/EmotionLookupTable.txt")
def extract_feature(self, input_txt):
res = [LexiconFeatureExtractor.calculate_score_word_based(self.afinn_lexicon, input_txt),
LexiconFeatureExtractor.calculate_score_word_based(self.afinn_emoticon_lexicon, input_txt),
LexiconFeatureExtractor.calculate_score_word_based(self.bingliu_lexicon, input_txt),
LexiconFeatureExtractor.calculate_score_word_based(self.mpqa_lexicon, input_txt)]
# NRC - Hashtag - Emotion - Lexicon
res += LexiconFeatureExtractor.calculate_score_labeled(self.nrc_hash_emo_lexicon, input_txt)
# NRC - Affect intensity - Lexicon
res += LexiconFeatureExtractor.calculate_multiscore(self.nrc_affect_intensity_lexicon, input_txt)
# SentiStrength - Emoticon - Lexicon
res.append(LexiconFeatureExtractor.calculate_score_word_based(self.emoticon_lookup_lexicon, input_txt))
# SentiStrength - Emotion - Lexicon
res.append(LexiconFeatureExtractor.calculate_score_word_based(self.emotion_lookup_lexicon, input_txt))
return res
@staticmethod
def _read_standart_lexicon(file_path, delimeter="\t"):
res = {}
with(open(file_path, "r")) as f:
for line in f:
columns = line.strip().split(delimeter)
if len(columns) > 1:
res[" ".join(columns[:-1]).strip(" ")] = float(columns[-1])
return res
@staticmethod
def _read_multi_score_lexicon(file_path, delimeter="\t", ):
res = {}
with(open(file_path, "r")) as f:
for line in f:
scores = []
columns = line.strip().split(delimeter)
for i in range(1, len(columns)):
scores.append(float(columns[i]))
res[columns[0]] = scores
return res
@staticmethod
def _read_labeled_lexicon(file_path, delimeter="\t",
label_index=0, feature_index=1, score_index=2):
res = {}
with(open(file_path, "r")) as f:
for line in f:
columns = line.strip().split(delimeter)
if len(columns) > 2:
if columns[label_index] not in res:
res[columns[label_index]] = {}
res[columns[label_index]][columns[feature_index]] = float(columns[score_index])
return res
@staticmethod
def calculate_score_word_based(lexicon, input_txt):
score = 0.0
input_words = [t.encode("utf-8") for t in TweetTokenizer().tokenize(input_txt)]
for k, v in lexicon.items():
if " " not in k and k in input_words:
score += v
elif " " in k and LexiconFeatureExtractor.contains_all(k, input_words):
score += v
return score
@staticmethod
def calculate_multiscore(lexicon, input_txt, score_count=4):
res = [0.0 for _ in range(score_count)]
input_words = [t.encode("utf-8") for t in TweetTokenizer().tokenize(input_txt)]
for label, d in lexicon.items():
for k, v in d.items():
scores = []
if " " not in k and k in input_words:
scores.append(v)
elif " " in k and LexiconFeatureExtractor.contains_all(k, input_words):
scores.append(v)
for i in range(len(scores)):
res[i] += scores[i]
return res
@staticmethod
def calculate_score_labeled(lexicon, input_txt):
res = []
score = 0.0
input_words = [t.encode("utf-8") for t in TweetTokenizer().tokenize(input_txt)]
for label, d in lexicon.items():
for k, v in d.items():
score = 0.0
if " " not in k and k in input_words:
score += v
elif " " in k and LexiconFeatureExtractor.contains_all(k, input_words):
score += v
res.append(score)
return res
@staticmethod
def contains_all(words1, words2):
for w in words1.split():
if w not in words2:
return False
return True
|
erayyildiz/AffectInTweets
|
src/lexicon_features.py
|
lexicon_features.py
|
py
| 5,989 |
python
|
en
|
code
| 3 |
github-code
|
6
|
12830949060
|
# Definition for a Node.
from collections import deque
from typing import List
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def preorder(self, root: Node) -> List[int]:
output = []
if root is None:
return output
def dps(node: Node):
output.append(node.val)
if node.children:
for child in node.children:
dps(child)
dps(root)
return output
def preorder_iter(self, root: Node) -> List[int]:
output = []
if root is None:
return output
stack = deque()
stack.append(root)
while stack:
popped_node = stack.pop()
output.append(popped_node.val)
if popped_node.children:
for child in reversed(popped_node.children):
stack.append(child)
return output
|
theRobertSan/LeetCode-Solutions-Python
|
589.py
|
589.py
|
py
| 996 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9264213572
|
import mne
import argparse
import numpy as np
from config import fname
# Handle command line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('subject', metavar='sub###', type=int, help='The subject to process')
args = parser.parse_args()
subject = args.subject
print('Processing subject:', subject)
report = mne.open_report(fname.report(subject=subject))
# Fit ICA to the continuous data
raw_detrended = mne.io.read_raw_fif(fname.raw_detrend(subject=subject))
ica = mne.preprocessing.ICA(n_components=100).fit(raw_detrended)
# Get ICA components that capture eye blinks and heart beats
eog_epochs = mne.preprocessing.create_eog_epochs(raw_detrended)
_, eog_scores = ica.find_bads_eog(eog_epochs)
eog_bads = list(np.flatnonzero(abs(eog_scores) > 0.2))
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw_detrended)
ecg_bads, ecg_scores = ica.find_bads_ecg(ecg_epochs)
ica.exclude = eog_bads + ecg_bads
print(eog_bads)
print(ecg_bads)
if len(eog_bads) > 0:
report.add_figs_to_section(ica.plot_scores(eog_scores), 'Correlation between ICA components and EOG channel', 'ICA', replace=True)
report.add_figs_to_section(ica.plot_properties(eog_epochs, picks=eog_bads), ['Properties of EOG component %02d' % e for e in eog_bads], 'ICA', replace=True)
if len(ecg_bads) > 0:
report.add_figs_to_section(ica.plot_scores(ecg_scores), 'Correlation between ICA components and ECG channel', 'ICA', replace=True)
report.add_figs_to_section(ica.plot_properties(ecg_epochs, picks=ecg_bads), ['Properties of ECG component %02d' % e for e in ecg_bads], 'ICA', replace=True)
report.add_figs_to_section(ica.plot_overlay(eog_epochs.average()), 'EOG signal removed by ICA', 'ICA', replace=True)
report.add_figs_to_section(ica.plot_overlay(ecg_epochs.average()), 'ECG signal removed by ICA', 'ICA', replace=True)
ica.save(fname.ica(subject=subject))
report.save(fname.report(subject=subject), overwrite=True, open_browser=False)
report.save(fname.report_html(subject=subject), overwrite=True, open_browser=False)
|
wmvanvliet/beamformer_simulation
|
megset/03_ica.py
|
03_ica.py
|
py
| 2,047 |
python
|
en
|
code
| 4 |
github-code
|
6
|
7759954850
|
'''
사업자등록번호로 업종 확인
'''
import csv
import json
from urllib.request import urlopen
from urllib import parse
import datetime as dt
import pandas as pd
import numpy as np
import re
# 대신정보통신 4088118945
# 대보정보통신 1358119406
bizNo = '2118108009'
# 조달청_사용자정보서비스
'''
> parameter(조회조건)
- bizno : 사업자등록번호
> numOfRows 는 totalCount 보다 작아야 함
'''
url = 'http://apis.data.go.kr/1230000/UsrInfoService/getPrcrmntCorpIndstrytyInfo'
queryParams = '?' + parse.urlencode({ parse.quote_plus('serviceKey') : 'B1CsUiO26Y56VDOKIParM6z394FXvTQC0rafsREBzSnOl8Cc1PUFY98LOcqKq5OahD5s2AhvszA2AIIYj0KXvg==',
parse.quote_plus('pageNo') : '1',
parse.quote_plus('numOfRows') : 100,
parse.quote_plus('type') : 'json' ,
parse.quote_plus('bizno') : bizNo
})
# set_API & get_data -> openAPI & parameters
response = urlopen(url + queryParams)
data = response.read()
JSON_object = json.loads(data.decode('utf-8'))
'''
"bizno": "1048118820",
"indstrytyNm": "엔지니어링사업(프로젝트매니지먼트)",
"indstrytyCd": "7309",
"rgstDt": "2011-10-31 00:00:00",
"vldPrdExprtDt": "",
"systmRgstDt": "2014-06-30 16:12:45",
"chgDt": "",
"indstrytyStatsNm": "",
"rprsntIndstrytyYn": "N",
"systmChgDt": "2014-06-30 16:12:45"
'''
result = pd.DataFrame(JSON_object["response"]["body"]["items"], columns = ["bizno",
"indstrytyNm",
"rgstDt",
"vldPrdExprtDt"])
#result
# init Series
s = []
for index, item in result.iterrows():
if len(item['vldPrdExprtDt']) > 0:
print(item['vldPrdExprtDt'] + ' -> ' + str(len(item['vldPrdExprtDt'])))
print(item['indstrytyNm'])
print(re.sub('\(.*?\)', '', item['indstrytyNm']))
print('\n')
s.append(re.sub('\(.*?\)', '', item['indstrytyNm']))
#s
# using naive method
# to remove duplicated
# from list
res = []
for i in s:
if i not in res:
res.append(i)
#res
#
print('-'.join(res))
|
starrything/openapi-g2b
|
04_g2b_user_service.py
|
04_g2b_user_service.py
|
py
| 2,354 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32010864325
|
def deep_find(data, key):
if key in data:
return data[key]
for k, v in data.items():
if isinstance(v, dict):
item = deep_find(v, key)
if item is not None:
return item
# elif isinstance(v, list):
# for d in v:
# for result in deep_find(d, key):
# return result
my_data = {"id": "abcde",
"key1": "blah",
"key2": "blah blah",
"nestedlist": [
{"id": "qwerty",
"nestednestedlist": [
{"id": "xyz", "keyA": "blah blah blah"},
{"id": "fghi", "keyZ": "blah blah blah"}],
"anothernestednestedlist": [
{"id": "asdf", "keyQ": "blah blah"},
{"id": "yuiop", "keyW": "blah"}]}]}
print(deep_find(my_data, 'id'))
|
1oss1ess/HackBulgaria-Programming101-Python-2018
|
week-8/Graphs/t1.py
|
t1.py
|
py
| 882 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14434842726
|
import numpy as np
m = [[0] * 4 for i in range(4)]
dx, dy = [0, 1, 0, -1], [1, 0, -1, 0]
x, y, c = 0, -1, 1
for i in range(4 + 4 - 2):
for j in range((4 + 4 - i) // 2):
x += dx[i % 4]
y += dy[i % 4]
m[x][y] = c
c += 1
BOARD_LENGTH = 4
GOAL_STATE = np.array(m)
def goal_on_row(num, i):
for j in range(BOARD_LENGTH):
if num == GOAL_STATE[i][j]:
return j
def goal_on_column(num, j):
for i in range(BOARD_LENGTH):
if num == GOAL_STATE[i][j]:
return i
def linear_conflict_heuristic(state):
result = 0
for i in range(BOARD_LENGTH):
for j in range(BOARD_LENGTH):
num = state[i][j]
if num != 0:
position = goal_on_row(num, i)
if position is not None:
if position <= j:
for k in reversed(range(j)):
num2 = state[i][k]
if num2 != 0:
position2 = goal_on_row(num2, i)
if position2 is not None:
if position < position2:
result += 1
else:
for k in range(j + 1, BOARD_LENGTH):
num2 = state[i][k]
if num2 != 0:
position2 = goal_on_row(num2, i)
if position2 is not None:
if position > position2:
result += 1
position = goal_on_column(num, j)
if position is not None:
if position <= i:
for k in reversed(range(i)):
num2 = state[k][j]
if num2 != 0:
position2 = goal_on_column(num2, j)
if position2 is not None:
if position < position2:
result += 1
else:
for k in range(i + 1, BOARD_LENGTH):
num2 = state[k][j]
if num2 != 0:
position2 = goal_on_column(num2, j)
if position2 is not None:
if position > position2:
result += 1
return result
def main():
state = np.array([[11, 3, 4, 2], [14, 8, 12, 9], [5, 0, 13, 6], [7, 15, 1, 10]])
result = linear_conflict_heuristic(state)
print(f"RESULT = {result}")
if __name__ == '__main__':
main()
|
cuzureau/n_puzzle
|
stack.py
|
stack.py
|
py
| 1,922 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41160046527
|
from django import forms
from django.core.mail import EmailMessage
from .models import Householdaccountbook
# class HouseholdaccountbookForm(forms.ModelForm):
# class Meta:
# model = Householdaccountbook
# fields = ["pref","choice",]
class TableCreateForm(forms.ModelForm):
class Meta:
model = Householdaccountbook
fields = ('title', 'choice', 'date', 'genre', 'quanity', 'money', 'content' )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs['class'] = 'form-control'
# self.fields['date'].widget.attrs['class'] = 'form-control'
# self.fields['date'].widget.attrs['id'] = "inputdate"
self.fields['date'].widget.attrs['placeholder'] = "例 2000-12-05"
|
HaruShim/Sotuken
|
実装/新満/table一覧表示/table/forms.py
|
forms.py
|
py
| 777 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30857567174
|
import pyautogui
import webbrowser as web
import time
msg = input('enter message to send: ')
times = int(input('enter the number of times to send the message: '))
# win_chrome_path = 'C:\Program Files\Google\Chrome\Application\chrome.exe %s'
# web.get(win_chrome_path).open('web.whatsapp.com')
web.open('web.whatsapp.com')
time.sleep(30)
for i in range(times):
for char in msg:
pyautogui.press('space' if char==' ' else char)
pyautogui.press('enter')
|
Abdul-Hannan12/Whatsapp-Automation
|
spam_message.py
|
spam_message.py
|
py
| 471 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5619572961
|
from decimal import Decimal
class Account:
"""
Account class maintain information and balance about each account
******Arguments******
firstname:string
lastname:string
national id number:string with 10 numbers
birthdate:string in the form of ##/##/####
balance:integer greater than or equal to zero
"""
accountNumber = 0
accountsList = []
def __init__(self, firstname, lastname, national_id_number, birthdate, balance):
self.firstname = firstname
self.lastname = lastname
self.national_id_number = national_id_number
self.birthdate = birthdate
self.balance = balance
Account.accountNumber += 1
self.account_number = Account.accountNumber
self.stocks = {} # {stock symbol: [shares, value]}
Account.accountsList.append(self)
def __str__(self):
return f"{self.account_number}) {self.firstname:<9} {self.lastname:<9} Id:[{self.national_id_number}] " \
f" Birthdate:[{self.birthdate}] Balance = {self.balance:,}"
def buy_shares(self, stock, shares):
"""
function to buy some shares from specific stock
:param stock: Stock object
:param shares: positive integer (representing Number of shares purchased)
:return: string (Notify transaction completed successfully)
"""
value = shares * stock.open_val
if type(shares) != int:
raise TypeError("Shares must be integer")
elif shares > stock.shares_remain:
raise ValueError(f"Shares value must be lower than remaining shares for stock ({stock.shares_remain})")
elif shares < 1:
raise ValueError("Shares must be positive integer")
elif value > self.balance:
raise ValueError("Not enough money for account to buy these shares")
elif stock.symbol not in self.stocks:
self.stocks[stock.symbol] = [shares, value]
else:
self.stocks[stock.symbol][0] += shares
self.stocks[stock.symbol][1] += value
self.balance = int(self.balance - value)
stock.shares_remain -= shares
return f"{self.firstname} {self.lastname} bought {shares} {stock.symbol} shares successfully"
def sell_shares(self, stock, shares):
"""
function to sell some shares from specific stock
:param stock: Stock object
:param shares: positive integer (representing Number of shares sold)
:return: string (Notify transaction completed successfully)
"""
sellValue = shares * stock.open_val
cShares, cValue = self.stocks[stock.symbol]
if stock.symbol not in self.stocks:
raise ValueError("Account Doesn't have this stock")
elif type(shares) != int:
raise TypeError("Shares must be integer")
elif shares < 1:
raise ValueError("Shares must be positive integer")
elif shares > cShares:
raise ValueError(f"Not enough shares for account to sell ({cShares})")
else:
self.stocks[stock.symbol][0] -= shares
self.stocks[stock.symbol][1] -= sellValue
self.balance = int(self.balance + sellValue)
stock.shares_remain += shares
return f"{self.firstname} {self.lastname} sold {shares} {stock.symbol} shares successfully"
def get_shares(self):
"""
function to print shares that belongs to account
:return: string (Total shares and values)
"""
if not self.stocks:
return f"{self.firstname} {self.lastname} doesn't have any share"
counter = 0
total_shares = 0
total_value = 0
print(f"{self.firstname} {self.lastname} shares:")
for k, v in self.stocks.items():
total_shares += v[0]
total_value += v[1]
counter += 1
print(f"[{counter}] {k:<6} Shares:{v[0]:<7,} Value = {v[1]:,.0f}")
return f"Total shares:{total_shares:<7,} Total value = {total_value:,.0f}"
@property
def firstname(self):
return self.__firstname
@firstname.setter
def firstname(self, value):
if type(value) != str:
raise TypeError("Firstname must be string")
elif not value.replace(" ", "").isalpha():
raise ValueError("Firstname must be consists of only alphabetic characters")
else:
self.__firstname = value
@property
def lastname(self):
return self.__lastname
@lastname.setter
def lastname(self, value):
if type(value) != str:
raise TypeError("Firstname must be string")
elif not value.replace(" ", "").isalpha():
raise ValueError("Lastname must be consists of only alphabetic characters")
else:
self.__lastname = value
@property
def national_id_number(self):
return self.__national_id_number
@national_id_number.setter
def national_id_number(self, value):
if type(value) != str:
# I choose string because it is not possible to place 0 left side of the id numbers if they were integer
raise TypeError("National id number must be string")
elif len(value) != 10 or not value.isdigit():
raise ValueError("National id number must be consists of 10 numbers")
else:
self.__national_id_number = value
@property
def birthdate(self):
return self.__birthdate
@birthdate.setter
def birthdate(self, value):
if type(value) != str:
raise TypeError("Birthdate must be string")
elif len(value) != 10 or not value.replace("/", "").isdigit() or not value[2] == value[5] == "/":
raise ValueError("Birthdate must be in the form of ##/##/####, where each # is a digit")
else:
self.__birthdate = value
@property
def balance(self):
return self.__balance
@balance.setter
def balance(self, value):
if type(value) != int:
raise TypeError("Balance must be integer")
elif not value >= 0:
raise ValueError("Balance must be greater than or equal to zero")
else:
self.__balance = Decimal(value)
def print_account_list():
"""
Function to print all existing accounts
"""
for account in Account.accountsList:
print(account)
class Stock:
"""
Stock class maintain current information such as open, volume and number of shares about each company
******Arguments******
symbol:string (abbreviation for company name)
open val:float greater than zero (price for each stock that belongs to the company)
volume:integer greater than zero (price for all of stocks that belongs to the company)
date:string
"""
companyNumber = 0
stocksList = []
def __init__(self, symbol, open_val, volume, date):
self.symbol = symbol
self.open_val = open_val
self.volume = volume
self.date = date
self.shares = self.volume // self.open_val
self.shares_remain = self.shares
Stock.companyNumber += 1
self.company_number = Stock.companyNumber
Stock.stocksList.append(self)
def __str__(self):
return f"{self.company_number}) {self.symbol:<6} Open = {self.open_val:<11,.2f} Volume = {self.volume:<13,} " \
f"Total shares:{self.shares:<7,} Sold shares:{self.shares - self.shares_remain:<7,} date:{self.date}"
@property
def symbol(self):
return self.__symbol
@symbol.setter
def symbol(self, value):
if type(value) != str:
raise TypeError("symbol must be string")
self.__symbol = value
@property
def open_val(self):
return self.__open_val
@open_val.setter
def open_val(self, value):
if type(value) != float:
raise TypeError("Open value must be float")
elif not value > 0:
raise ValueError("Open value must be greater than zero")
else:
self.__open_val = Decimal(value)
@property
def volume(self):
return self.__volume
@volume.setter
def volume(self, value):
if type(value) != int:
raise TypeError("Volume must be integer")
elif not value > 0:
raise ValueError("Volume must be greater than zero")
else:
self.__volume = Decimal(value)
@property
def date(self):
return self.__date
@date.setter
def date(self, value):
if type(value) != str:
raise TypeError("Date must be string")
self.__date = value
def print_stock_list():
"""
Function to print all existing stocks
"""
for stock in Stock.stocksList:
print(stock)
account1 = Account("Ali", "Ronaldo", "0045375980", "01/10/2000", 15000)
account2 = Account("majid", "messy", "0025328985", "10/16/2002", 10000)
print_account_list()
amazon = Stock("AMZN", 2181.3798828125, 4676700, "05/13/2022")
facebook = Stock("FB", 192.580001831054, 24523500, "05/13/2022")
tesla = Stock("TSLA", 773.47998046875, 30651800, "05/13/2022")
google = Stock("GOOGLE", 2290.65991210937, 1747900, "05/13/2022")
apple = Stock("AAPL", 144.58999633789, 113787000, "05/13/2022")
print_stock_list()
print(account1)
account1.buy_shares(apple, 10)
account1.buy_shares(apple, 5)
account1.buy_shares(facebook, 11)
account1.sell_shares(facebook, 2)
account1.get_shares()
print(account1)
print(apple)
|
MortezaGhandchi/stock-oop-pandas
|
Stock_Project_AP.py
|
Stock_Project_AP.py
|
py
| 9,814 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7921803967
|
year = 1799
birthday = '6 июня'
year_people= int(input('А вы знаете год рождения А.С.Пушкина?: '))
while year_people != year:
year_people = int(input('Еще раз попробуйте: '))
if year_people == year:
birthday_people = input('А день рождения?: ')
while birthday_people != birthday:
birthday_people = input('Еще раз попробуй вспомнить день рождения: ')
print('Верно')
|
Lyubov-Tuz/basic_python
|
borndayforewer.py
|
borndayforewer.py
|
py
| 503 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
10584185740
|
import numpy as np
def gaussseidel_1d(rho, hx, epsilon, maxiter, maxerr):
if rho.ndim != 1:
raise ValueError("rho must be of shape=(n,)")
phi = np.zeros(shape=rho.shape, dtype=rho.dtype)
nx = rho.shape[0]
mr = hx * hx / epsilon
for iteration in range(maxiter):
error = 0.0
for x in range(nx):
phi_x = (
phi[(x - 1) % nx] + \
phi[(x + 1) % nx] + \
rho[x] * mr) / 2.0
error += (phi[x] - phi_x)**2
phi[x] = phi_x
if error < maxerr:
break
return phi
def gaussseidel_2d(rho, hx, hy, epsilon, maxiter, maxerr):
if rho.ndim != 2:
raise ValueError("rho must be of shape=(nx, ny)")
phi = np.zeros(shape=rho.shape, dtype=rho.dtype)
nx, ny = rho.shape
fx, fy = 1.0 / (hx * hx), 1.0 / (hy * hy)
mx = 0.5 * fx / (fx + fy)
my = 0.5 * fy / (fx + fy)
mr = 0.5 / (epsilon * (fx + fy))
for iteration in range(maxiter):
error = 0.0
for x in range(nx):
for y in range(ny):
phi_xy = (
phi[(x - 1) % nx, y] * mx + \
phi[(x + 1) % nx, y] * mx + \
phi[x, (y - 1) % ny] * my + \
phi[x, (y + 1) % ny] * my + \
rho[x, y] * mr)
error += (phi[x, y] - phi_xy)**2
phi[x, y] = phi_xy
if error < maxerr:
break
return phi
def gaussseidel_3d(rho, hx, hy, hz, epsilon, maxiter, maxerr):
if rho.ndim != 3:
raise ValueError("rho must be of shape=(nx, ny, nz)")
phi = np.zeros(shape=rho.shape, dtype=rho.dtype)
nx, ny, nz = rho.shape
fx, fy, fz = 1.0 / (hx * hx), 1.0 / (hy * hy), 1.0 / (hz * hz)
mx = 0.5 * fx / (fx + fy + fz)
my = 0.5 * fy / (fx + fy + fz)
mz = 0.5 * fz / (fx + fy + fz)
mr = 0.5 / (epsilon * (fx + fy + fz))
for iteration in range(maxiter):
error = 0.0
for x in range(nx):
for y in range(ny):
for z in range(nz):
phi_xyz = (
phi[(x - 1) % nx, y, z] * mx + \
phi[(x + 1) % nx, y, z] * mx + \
phi[x, (y - 1) % ny, z] * my + \
phi[x, (y + 1) % ny, z] * my + \
phi[x, y, (z - 1) % nz] * mz + \
phi[x, y, (z + 1) % nz] * mz + \
rho[x, y, z] * mr)
error += (phi[x, y, z] - phi_xyz)**2
phi[x, y, z] = phi_xyz
if error < maxerr:
break
return phi
def gaussseidel(rho, h, epsilon=1.0, maxiter=10000, maxerr=1e-15):
if rho.ndim == 1:
return gaussseidel_1d(rho, *h, epsilon, maxiter, maxerr)
elif rho.ndim == 2:
return gaussseidel_2d(rho, *h, epsilon, maxiter, maxerr)
elif rho.ndim == 3:
return gaussseidel_3d(rho, *h, epsilon, maxiter, maxerr)
else:
raise ValueError(
'gaussseidel expects rho with 1, 2 or 3 dimensions.')
|
LeonKlein/urban-broccoli
|
urbanbroccoli/gaussseidel.py
|
gaussseidel.py
|
py
| 3,089 |
python
|
en
|
code
| null |
github-code
|
6
|
28412951714
|
#!/usr/bin/env python
import sys, os, shlex
import multiprocessing as mp
import subprocess as sp
from pli.lib.util import log
def find_files(root):
outdir = 'tmpout'
for curdir, dirs, files in os.walk(root):
protein_fname = None
ligand_fname = None
for f in files:
if f.endswith('_protein.pdb'):
sym = f[1:3]
protein_fname = f'{curdir}/{f}'
elif f.endswith('_ligand.mol2'):
ligand_fname = f'{curdir}/{f}'
if protein_fname and ligand_fname:
bname = os.path.basename(protein_fname).split('_')[0]
sym = bname[1:3]
if not os.path.exists(f'{outdir}/{sym}/{bname}_ALP.pkl.gz'):
yield protein_fname, ligand_fname, sym
def worker(args):
protein_iname, ligand_iname, sym = args
cmd = f'python pli/bin/plifinder.py {protein_iname} {ligand_iname} -o tmpout/{sym}'
log(cmd)
proc = sp.Popen(shlex.split(cmd), universal_newlines=True, stdout=sp.PIPE, stderr=sp.STDOUT)
ret = proc.wait()
return ret, protein_iname, ligand_iname, sym
def main():
root = 'v2015'
pool = mp.Pool(mp.cpu_count())
count = 0
for ret, protein_iname, ligand_iname, sym in pool.imap(worker, find_files(root)):
if ret != 0:
log(f'!! Error {protein_iname} {ligand_iname}')
continue
count += 1
log(f'{count} {protein_iname} {ligand_iname} -> {sym}')
if __name__ == '__main__':
main()
|
rhara/plifinder
|
examples/plifinder_v2015.py
|
plifinder_v2015.py
|
py
| 1,504 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25272817830
|
import sys
from itertools import combinations
from collections import deque
def move(stage):
for i in range(M):
for j in range(N-1, 0, -1):
enemy[j][i] = enemy[j-1][i]
if stage == 0:
for i in range(M):
enemy[0][i] = 0
dr = [0, -1, 0]
dc = [-1, 0, 1]
N, M, D = map(int, sys.stdin.readline().split())
enemy = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]
comb = list(combinations(range(N), 3))
visited = [[0] * M for _ in ' '*N]
shot = [[0] * M for _ in ' '*N]
q = deque()
shot_clear = deque()
result = 0
for stage in range(N):
max_kill = 0
for archers in comb:
kill = 0
for archer in archers:
q.append((N, archer))
dist = 0
len_q = len(q)
cnt = 0
while q:
if len_q == cnt:
len_q = len(q)
cnt = 0
dist += 1
if dist == D:
q.clear()
break
cnt += 1
r, c = q.popleft()
for d in range(3):
nr = r + dr[d]
nc = c + dc[d]
if 0 <= nr < N and 0 <= nc < M:
if enemy[nr][nc]:
if not shot[nr][nc]:
kill += 1
shot[nr][nc] = 1
shot_clear.append((nr, nc))
q.clear()
break
q.append((nr, nc))
while shot_clear:
r, c = shot_clear.popleft()
enemy[r][c] = 0
shot[r][c] = 0
if kill > max_kill:
max_kill = kill
result += max_kill
move(stage)
print(result)
|
powerticket/algorithm
|
Baekjoon/17135.py
|
17135.py
|
py
| 1,842 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19386134805
|
import argparse
import json
import os
import sys
import numpy as np
import torch
from plyfile import PlyData, PlyElement
from torch.utils.data import DataLoader
from tqdm import tqdm
sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder
from utils.pc_utils import write_ply_rgb
from utils.box_util import get_3d_box
from data.scannet.model_util_scannet import ScannetDatasetConfig
from lib.config import CONF
from lib.scan2cap_dataset import Scan2CapDataset
from models.pointnet_extractor_module import PointNetExtractor
# constants
SCANNET_ROOT = "../data/scannet/scans/" # TODO point this to your scannet data
SCANNET_MESH = os.path.join(SCANNET_ROOT, "{}/{}_vh_clean_2.ply") # scene_id, scene_id
SCANNET_META = os.path.join(SCANNET_ROOT, "{}/{}.txt") # scene_id, scene_id
MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])
DC = ScannetDatasetConfig()
SCANREFER_TRAIN = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_train.json")))
SCANREFER_VAL = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_val.json")))
VOCABULARY = json.load(open(os.path.join(CONF.PATH.DATA, "vocabulary.json"), "r"))
global_correct = 0
global_total = 0
def get_dataloader(args, scanrefer, all_scene_list, split, config, augment):
dataset = Scan2CapDataset(
scanrefer=scanrefer,
scanrefer_all_scene=all_scene_list,
vocabulary=VOCABULARY,
split=split,
num_points=args.num_points,
use_height=(not args.no_height),
use_color=args.use_color,
use_normal=args.use_normal,
use_multiview=args.use_multiview,
augment=augment
)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=True)
return dataset, dataloader
def get_model(args):
# load model
input_channels = int(args.use_multiview) * 128 + int(args.use_normal) * 3 + int(args.use_color) * 3 + int(not args.no_height)
model = PointNetExtractor(pretrain_mode=True, feature_channels=input_channels).cuda()
path = os.path.join(CONF.PATH.OUTPUT, args.folder, "model.pth")
model.load_state_dict(torch.load(path), strict=False)
model.eval()
return model
def get_scanrefer(args):
scanrefer = SCANREFER_TRAIN if args.use_train else SCANREFER_VAL
all_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer])))
if args.scene_id:
assert args.scene_id in all_scene_list, "The scene_id is not found"
scene_list = [args.scene_id]
else:
scene_list = sorted(list(set([data["scene_id"] for data in scanrefer])))
scanrefer = [data for data in scanrefer if data["scene_id"] in scene_list]
return scanrefer, scene_list
def write_ply(verts, colors, indices, output_file):
if colors is None:
colors = np.zeros_like(verts)
if indices is None:
indices = []
file = open(output_file, 'w')
file.write('ply \n')
file.write('format ascii 1.0\n')
file.write('element vertex {:d}\n'.format(len(verts)))
file.write('property float x\n')
file.write('property float y\n')
file.write('property float z\n')
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('element face {:d}\n'.format(len(indices)))
file.write('property list uchar uint vertex_indices\n')
file.write('end_header\n')
for vert, color in zip(verts, colors):
file.write("{:f} {:f} {:f} {:d} {:d} {:d}\n".format(vert[0], vert[1], vert[2] , int(color[0]*255), int(color[1]*255), int(color[2]*255)))
for ind in indices:
file.write('3 {:d} {:d} {:d}\n'.format(ind[0], ind[1], ind[2]))
file.close()
def write_bbox(bbox, mode, output_file):
"""
bbox: (cx, cy, cz, lx, ly, lz, r), center and length in three axis, the last is the rotation
output_file: string
"""
def create_cylinder_mesh(radius, p0, p1, stacks=10, slices=10):
import math
def compute_length_vec3(vec3):
return math.sqrt(vec3[0]*vec3[0] + vec3[1]*vec3[1] + vec3[2]*vec3[2])
def rotation(axis, angle):
rot = np.eye(4)
c = np.cos(-angle)
s = np.sin(-angle)
t = 1.0 - c
axis /= compute_length_vec3(axis)
x = axis[0]
y = axis[1]
z = axis[2]
rot[0,0] = 1 + t*(x*x-1)
rot[0,1] = z*s+t*x*y
rot[0,2] = -y*s+t*x*z
rot[1,0] = -z*s+t*x*y
rot[1,1] = 1+t*(y*y-1)
rot[1,2] = x*s+t*y*z
rot[2,0] = y*s+t*x*z
rot[2,1] = -x*s+t*y*z
rot[2,2] = 1+t*(z*z-1)
return rot
verts = []
indices = []
diff = (p1 - p0).astype(np.float32)
height = compute_length_vec3(diff)
for i in range(stacks+1):
for i2 in range(slices):
theta = i2 * 2.0 * math.pi / slices
pos = np.array([radius*math.cos(theta), radius*math.sin(theta), height*i/stacks])
verts.append(pos)
for i in range(stacks):
for i2 in range(slices):
i2p1 = math.fmod(i2 + 1, slices)
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2, i*slices + i2p1], dtype=np.uint32) )
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2p1, (i + 1)*slices + i2p1], dtype=np.uint32) )
transform = np.eye(4)
va = np.array([0, 0, 1], dtype=np.float32)
vb = diff
vb /= compute_length_vec3(vb)
axis = np.cross(vb, va)
angle = np.arccos(np.clip(np.dot(va, vb), -1, 1))
if angle != 0:
if compute_length_vec3(axis) == 0:
dotx = va[0]
if (math.fabs(dotx) != 1.0):
axis = np.array([1,0,0]) - dotx * va
else:
axis = np.array([0,1,0]) - va[1] * va
axis /= compute_length_vec3(axis)
transform = rotation(axis, -angle)
transform[:3,3] += p0
verts = [np.dot(transform, np.array([v[0], v[1], v[2], 1.0])) for v in verts]
verts = [np.array([v[0], v[1], v[2]]) / v[3] for v in verts]
return verts, indices
def get_bbox_edges(bbox_min, bbox_max):
def get_bbox_verts(bbox_min, bbox_max):
verts = [
np.array([bbox_min[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_max[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_max[2]])
]
return verts
box_verts = get_bbox_verts(bbox_min, bbox_max)
edges = [
(box_verts[0], box_verts[1]),
(box_verts[1], box_verts[2]),
(box_verts[2], box_verts[3]),
(box_verts[3], box_verts[0]),
(box_verts[4], box_verts[5]),
(box_verts[5], box_verts[6]),
(box_verts[6], box_verts[7]),
(box_verts[7], box_verts[4]),
(box_verts[0], box_verts[4]),
(box_verts[1], box_verts[5]),
(box_verts[2], box_verts[6]),
(box_verts[3], box_verts[7])
]
return edges
def get_bbox_corners(bbox):
centers, lengths = bbox[:3], bbox[3:6]
xmin, xmax = centers[0] - lengths[0] / 2, centers[0] + lengths[0] / 2
ymin, ymax = centers[1] - lengths[1] / 2, centers[1] + lengths[1] / 2
zmin, zmax = centers[2] - lengths[2] / 2, centers[2] + lengths[2] / 2
corners = []
corners.append(np.array([xmax, ymax, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymax, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymax, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymax, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymin, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymin, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymin, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymin, zmax]).reshape(1, 3))
corners = np.concatenate(corners, axis=0) # 8 x 3
return corners
radius = 0.03
offset = [0,0,0]
verts = []
indices = []
colors = []
corners = get_bbox_corners(bbox)
box_min = np.min(corners, axis=0)
box_max = np.max(corners, axis=0)
palette = {
0: [0, 255, 0], # gt
1: [0, 0, 255] # pred
}
chosen_color = palette[mode]
edges = get_bbox_edges(box_min, box_max)
for k in range(len(edges)):
cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0], edges[k][1])
cur_num_verts = len(verts)
cyl_color = [[c / 255 for c in chosen_color] for _ in cyl_verts]
cyl_verts = [x + offset for x in cyl_verts]
cyl_ind = [x + cur_num_verts for x in cyl_ind]
verts.extend(cyl_verts)
indices.extend(cyl_ind)
colors.extend(cyl_color)
write_ply(verts, colors, indices, output_file)
def read_mesh(filename):
""" read XYZ for each vertex.
"""
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 6], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
vertices[:,3] = plydata['vertex'].data['red']
vertices[:,4] = plydata['vertex'].data['green']
vertices[:,5] = plydata['vertex'].data['blue']
return vertices, plydata['face']
def export_mesh(vertices, faces):
new_vertices = []
for i in range(vertices.shape[0]):
new_vertices.append(
(
vertices[i][0],
vertices[i][1],
vertices[i][2],
vertices[i][3],
vertices[i][4],
vertices[i][5],
)
)
vertices = np.array(
new_vertices,
dtype=[
("x", np.dtype("float32")),
("y", np.dtype("float32")),
("z", np.dtype("float32")),
("red", np.dtype("uint8")),
("green", np.dtype("uint8")),
("blue", np.dtype("uint8"))
]
)
vertices = PlyElement.describe(vertices, "vertex")
return PlyData([vertices, faces])
def align_mesh(scene_id):
vertices, faces = read_mesh(SCANNET_MESH.format(scene_id, scene_id))
for line in open(SCANNET_META.format(scene_id, scene_id)).readlines():
if 'axisAlignment' in line:
axis_align_matrix = np.array([float(x) for x in line.rstrip().strip('axisAlignment = ').split(' ')]).reshape((4, 4))
break
# align
pts = np.ones((vertices.shape[0], 4))
pts[:, :3] = vertices[:, :3]
pts = np.dot(pts, axis_align_matrix.T)
vertices[:, :3] = pts[:, :3]
mesh = export_mesh(vertices, faces)
return mesh
def dump_results(args, scanrefer, data, config):
dump_dir = os.path.join(CONF.PATH.OUTPUT, args.folder, "vis")
os.makedirs(dump_dir, exist_ok=True)
# from inputs
ids = data['scan_idx'].detach().cpu().numpy()
point_clouds = data['point_clouds'].cpu().numpy()
batch_size = point_clouds.shape[0]
pcl_color = data["pcl_color"].detach().cpu().numpy()
if args.use_color:
pcl_color = (pcl_color * 256 + MEAN_COLOR_RGB).astype(np.int64)
# from network outputs
# detection
# ground truth
gt_center = data['ref_center_label'].cpu().numpy() # (B,MAX_NUM_OBJ,3)
gt_size_residual = data['ref_size_residual_label'].cpu().numpy() # B,K2,3
# reference
nyu40_label = data["ref_nyu40_label"].detach().cpu().numpy()
prediction = torch.argmax(data["ref_obj_cls_scores"], dim=1).detach().cpu().numpy() + 1
global global_correct
global global_total
global_correct += np.sum(nyu40_label == prediction)
global_total += batch_size
print("NYU40_LABEL", [DC.nyu40id2label[i] for i in list(nyu40_label)])
print("PREDICTION", [DC.nyu40id2label[i] for i in list(prediction)])
print("ACC", global_correct / global_total)
for i in range(batch_size):
# basic info
idx = ids[i]
scene_id = scanrefer[idx]["scene_id"]
object_id = scanrefer[idx]["object_id"]
object_name = scanrefer[idx]["object_name"]
ann_id = scanrefer[idx]["ann_id"]
# scene_output
scene_dump_dir = os.path.join(dump_dir, scene_id)
if not os.path.exists(scene_dump_dir):
os.mkdir(scene_dump_dir)
# # Dump the original scene point clouds
mesh = align_mesh(scene_id)
mesh.write(os.path.join(scene_dump_dir, 'mesh.ply'))
write_ply_rgb(point_clouds[i], pcl_color[i], os.path.join(scene_dump_dir, 'pc.ply'))
# visualize the gt reference box
# NOTE: for each object there should be only one gt reference box
object_dump_dir = os.path.join(scene_dump_dir, "gt_{}_{}_{}_{}_{}.ply".format(scene_id, object_id, ann_id, DC.nyu40id2label[nyu40_label[i]], DC.nyu40id2label[prediction[i]]))
gt_obb = np.zeros((7,))
gt_obb[0:3] = gt_center[i]
gt_obb[3:6] = gt_size_residual[i]
gt_bbox = get_3d_box(gt_size_residual[i], 0, gt_center[i])
if not os.path.exists(object_dump_dir):
write_bbox(gt_obb, 0, os.path.join(object_dump_dir))
def visualize(args):
# init training dataset
print("preparing data...")
scanrefer, scene_list = get_scanrefer(args)
# dataloader
_, dataloader = get_dataloader(args, scanrefer, scene_list, "val", DC, False)
# model
model = get_model(args)
model.eval()
# evaluate
print("visualizing...")
for data in tqdm(dataloader):
for key in data:
data[key] = data[key].cuda()
# feed
with torch.no_grad():
data = model(data)
# visualize
dump_results(args, scanrefer, data, DC)
print("done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str, help="Folder containing the model", required=True)
parser.add_argument("--gpu", type=str, help="gpu", default="0")
parser.add_argument("--scene_id", type=str, help="scene id", default="")
parser.add_argument("--batch_size", type=int, help="batch size", default=2)
parser.add_argument('--num_points', type=int, default=40000, help='Point Number [default: 40000]')
parser.add_argument('--num_proposals', type=int, default=256, help='Proposal number [default: 256]')
parser.add_argument('--num_scenes', type=int, default=-1, help='Number of scenes [default: -1]')
parser.add_argument('--no_height', action='store_true', help='Do NOT use height signal in input.')
parser.add_argument('--no_nms', action='store_true', help='do NOT use non-maximum suppression for post-processing.')
parser.add_argument('--use_train', action='store_true', help='Use the training set.')
parser.add_argument('--use_color', action='store_true', help='Use RGB color in input.')
parser.add_argument('--use_normal', action='store_true', help='Use RGB color in input.')
parser.add_argument('--use_multiview', action='store_true', help='Use multiview images.')
args = parser.parse_args()
# setting
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
visualize(args)
|
nseppi/scan2cap
|
scan2cap/scripts/visualize_pretrain.py
|
visualize_pretrain.py
|
py
| 15,949 |
python
|
en
|
code
| 1 |
github-code
|
6
|
1375960664
|
# -*- coding: utf-8 -*-
import numpy as np
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
from deap.algorithms import varAnd
from adan.aiem.genetics.evaluators import *
import array
import random as traditional_random
#import pathos
import pathos
import operator
#from adan import functions
from adan.functions import *
from adan.aidc.feature_selection import *
import time
def eaSimple_island(population,toolbox, cxpb, mutpb, ngen,halloffame=None,
verbose=__debug__,allowed_time=np.inf, stats=None,FREQ=None,
percentage_migration=0.1):
"""
ngen is used both for the total generations and for the within island generatins.
So, the total number of gens will be ngen**2.
FREQ: How often migration takes place. If FREQ=None, then it is set to ngen/3
"""
#FREQ is how often migration takes place
if FREQ is None:
FREQ=int(ngen/3)
if FREQ<0:
FREQ=1
toolbox.register("algorithm", eaSimple_timed, toolbox=toolbox,
cxpb=cxpb, mutpb=mutpb, ngen=ngen,
verbose=verbose,stats=stats,halloffame=halloffame)
islands = population
#The GA runs each time for ngen, and then it runs for a total number of equal to ngen/FREQ
for i in range(0, ngen):
start = time.time()
results = toolbox.map(toolbox.algorithm, islands)
islands = [pop for pop, logbook in results]
if i % FREQ ==0:
print('******MIGRATION TAKING PLACE******')
tools.migRing(islands, int(percentage_migration*len(islands[0])), tools.selBest)
end = time.time()
if (end-start)>allowed_time:
if verbose:
print('Time-out. Maximum allowed time exceeded.')
break
return islands
def eaSimple_timed(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, verbose=__debug__,allowed_time=np.inf):
"""This is a copy of the eaSimple() method from DEAP, but adjusted
to support time-out. In case of timeout, the most recent generation is
returned.
"""
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
#-2 is the 'fail' value (e.g. the fitness function couldn't be computed)
if fit is None:
fit=(-2,)
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
start = time.time()
# Begin the generational process
for gen in range(1, ngen+1):
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
if fit is None:
fit=(-2,)
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
end = time.time()
if (end-start)>allowed_time:
if verbose:
print('Time-out. Maximum allowed time exceeded.')
break
return population, logbook
def calcNewFeatures(result_set,df,features='best'):
"""
returns the best features alongside the variables participating in the complex variables
"""
all_features=[]
complex_features=[]
pset=setPset(df)
toolbox = base.Toolbox()
toolbox.register("compile", gp.compile, pset=pset)
complex_columns=[]
all_columns=[]
simple_columns=[]
if features=='best':
dummy='best_individuals_object'
elif features=='all':
dummy='all_features_individuals_object'
for feat in result_set[dummy]:
complex_features.append(toolbox.compile(feat))
all_features.append(toolbox.compile(feat))
complex_columns.append(str(feat))
all_columns.append(str(feat))
simple_features=[]
for feat in result_set['variables']:
simple_features.append(df[feat])
simple_columns.append(str(feat))
all_features.append(df[feat])
all_columns.append(str(feat))
return pd.DataFrame(np.column_stack(all_features),columns=all_columns),pd.DataFrame(np.column_stack(complex_features),columns=complex_columns),pd.DataFrame(np.column_stack(simple_features),columns=simple_columns)
def setPset(df):
pset = gp.PrimitiveSet("MAIN", 0,prefix="coef")
pset.addPrimitive(add,2)
pset.addPrimitive(sub, 2)
pset.addPrimitive(mul, 2)
pset.addPrimitive(div, 2)
for fun in singlefunctions:
pset.addPrimitive(fun,1)
for col in df.columns.values:
#we must use strings for column names otherwise the functions interpret the
#column names as numbers
pset.addTerminal(df[col].values,name=col)
return pset
def findFeaturesGP(df,target,population=300,ngen=50,cxpb=0.9,features=-1,
max_tree=3,evaluator=evalPearsonCorNumba,
task="regression",n_processes=1,allowed_time=None,target_sampling=0.8):
"""
This function calculates complex features that correlate with the response variable.
Output:
A dictionary with the following fields:
best_features: a list of lists, where every element is a feature selected by the best n features as defined by the cbf method
best_features_plus_cols: a list of lists, where every element is a feature selected by the best n features as defined by the cbf method plus
any original features participating in the creation of the individuals
best_individuals_equations: the equations used to compute the best_features (this is the string version of best_individuals_object)
best_individuals_plus_columns: like the previous, plus the column names of the individual features
best_individuals_object: the programs used to compute the best_features
scores: the score of each individual produced during the genetic programming
scores_cbf: the cbf score of each feature (all features not just the best ones)
variables: the names of the original variables that participate in the creation of the features in the best_features
all_features: a list of lists with all the features produced by the genetic algorithm
all_features_individuals: the programs used to compute all_features
features: if features<1, then the algorithm simply defaults to 1
target_sampling: When the features are evaluated, we can sample a % of the targets, and evaluate
the performace on this subset. This should help with overfitting and finding better solutions.
"""
if features<1:
features=1
if task=='regression' and evaluator==None:
evaluator=evalPearsonCorNumba
elif task=='classification' and evaluator==None:
evaluator=evalANOVANumba
mutpb=1-cxpb
# for col in df.columns:
# df[col]=df[col].astype('float64')
pset=setPset(df)
creator.create("FitnessMax", base.Fitness, weights=(1,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=max_tree)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)
#need to do that because multithreading does not support functions with more than one arguments
def evaluate(x):
return evaluator(x,toolbox=toolbox,targets=target,sampling=target_sampling)
#toolbox.register("evaluate", evaluator,toolbox=toolbox, targets=targets)
toolbox.register("evaluate", evaluate)
#toolbox.register("select", tools.selTournament, tournsize=3)
#toolbox.register("select", tools.selNSGA2)
toolbox.register("select", tools.selDoubleTournament,fitness_size=3,parsimony_size=1.4,fitness_first=True)
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genFull, min_=0, max_=max_tree)
toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=max_tree))
toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=max_tree))
if type(population)==type([]):
toolbox.register("deme", tools.initRepeat, list, toolbox.individual)
DEME_SIZES = population
pop = [toolbox.deme(n=i) for i in DEME_SIZES]
hof = tools.HallOfFame(sum(population))
else:
pop = toolbox.population(n=population)
hof = tools.HallOfFame(population)
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)
if n_processes>1:
pool = pathos.multiprocessing.ProcessingPool(n_processes)
toolbox.register("map", pool.map)
# pop, log = algorithms.eaMuPlusLambda(pop, toolbox, mu,lamb, cxpb,mutpb,ngen=ngen, stats=mstats,
# halloffame=hof, verbose=True)
# if allowed_time is None:
# pop, log = algorithms.eaSimple(pop, toolbox, cxpb,mutpb, ngen=ngen, stats=mstats,
# halloffame=hof, verbose=True)
if type(population)==type([]):
pop = eaSimple_island(pop, toolbox, cxpb,mutpb, ngen=ngen, stats=mstats,
halloffame=hof, verbose=True, allowed_time=allowed_time)
else:
pop, log = eaSimple_timed(pop, toolbox, cxpb,mutpb, ngen=ngen, stats=mstats,
halloffame=hof, verbose=True, allowed_time=allowed_time)
allfeatures=[]
allfeatures_individuals_object=[]
scores=[]
feature_names=[]
best_individuals_object=[]
for i in range(0,len(hof.items)):
# print(hof.items[i])
feature=toolbox.compile(hof.items[i])
if not np.isnan(feature).any():
#need to guard against zero variance features
if np.var(feature)>0.0:
allfeatures.append(feature)
allfeatures_individuals_object.append(hof.items[i])
feature_names.append(str(hof.items[i]))
best_individuals_object.append(hof.items[i])
#for some reason in DEAP the key in the hall-of-fame is the score
# if features>0:
# cbfscores=cbfSelectionNumba(allfeatures,target,task=task)
# bestindices=sorted(range(len(cbfscores)), key=lambda x: cbfscores[x],reverse=True)
# else:
# cbfscores=np.ones(len(allfeatures))
# bestindices=range(len(allfeatures))
cbfscores=cbfSelectionNumba(allfeatures,target,task=task)
bestindices=sorted(range(len(cbfscores)), key=lambda x: cbfscores[x],reverse=True)
bestfeatures=[]
bestindividuals=[]
bestindividuals_plus_cols=[]
scorescbf=[]
best_features_plus_cols=[]
best_individuals_object_final=[]
for i in range(0,int(features)):
index=bestindices[i]
bestfeatures.append(allfeatures[index])
best_features_plus_cols.append(allfeatures[index])
bestindividuals.append(feature_names[index])
bestindividuals_plus_cols.append(feature_names[index])
best_individuals_object_final.append(best_individuals_object[i])
# scores.append(eval(str(hof.keys[index])))
# scorescbf.append(cbfscores[index])
#all features includes the best variables, plus any single variables which might participate in the creation of the complex variables
final_vars=[]
str_individuals=str(bestindividuals)
for col in df.columns:
if str_individuals.find(col)>-1:
final_vars.append(col)
#append the original variable to bestfeatures if it exists in a complex feature
best_features_plus_cols.append(df[col].values)
bestindividuals_plus_cols.append(col)
#combine all features (individual and composite) into one df
best_all_feats_df=pd.DataFrame(np.column_stack(best_features_plus_cols),columns=bestindividuals_plus_cols)
return {'best_features':bestfeatures,'best_features_plus_cols':best_features_plus_cols,
'best_individuals_equations':bestindividuals,'best_individuals_object':best_individuals_object_final,
'scores':scores,'scores_cbf':scorescbf,'variables':final_vars,
'all_features':allfeatures,'all_features_individuals_object':allfeatures_individuals_object,'best_all_feats_df':best_all_feats_df}
def findEquationFeatures(features_to_be_used,task,target,ngen=10,population=10,crossover_prob=0.5,mut_prob=0.1,individual_mut=0.1,tournsize=3):
"""
Performs feature selection over the set of features before doing the
symbolic modelling
individual_mut: If a mutation occurs, then each item might be flipped according to this probability
"""
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
import array
creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_bool", traditional_random.getrandbits,1)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, features_to_be_used.shape[1])
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
#we import here to avoid a cyclical import
from adan.aiem.symbolic_modelling import findSymbolicExpressionL1_regression_helper, findSymbolicExpressionL1_classification_helper
def evalOneMax(individual):
if sum(individual)==0:
return -100,
else:
ind=np.array(individual,bool)
if task=='regression':
models=findSymbolicExpressionL1_regression_helper(features_to_be_used.loc[:,ind].values,target)
elif task=='classification':
models=findSymbolicExpressionL1_classification_helper(features_to_be_used.loc[:,ind].values,target)
performances=[perf[1] for perf in models]
maximum=max(performances)
return maximum,
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=individual_mut)
toolbox.register("select", tools.selTournament, tournsize=tournsize)
pop = toolbox.population(n=population)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
pop, log = algorithms.eaSimple(pop, toolbox, cxpb=crossover_prob,
mutpb=mut_prob, ngen=ngen,
stats=stats, halloffame=hof, verbose=True)
final_choice=hof.items[0]
final_choice=np.array(final_choice,bool)
#return pop, log, hof
return final_choice
|
stelios12312312/ADAN
|
adan/aiem/genetics/genetic_programming.py
|
genetic_programming.py
|
py
| 16,827 |
python
|
en
|
code
| 0 |
github-code
|
6
|
28664991218
|
m = int(input("Enter your marks : "))
if(m >= 90 and m <= 100):
print("Exelent")
elif(m >= 80 and m <= 89):
print("A Grade")
elif(m >= 70 and m <= 79):
print("B Grade")
elif(m >= 60 and m <= 69):
print("C Grade")
elif(m >= 50 and m <= 59):
print("D Grade")
elif(m < 50):
print("Fail")
#************************************************
# Another way
m = int(input("Enter your marks : "))
if(m >= 90 and m <= 100):
Grade = "Exelent"
elif(m >= 80 and m <= 89):
Grade = "A"
elif(m >= 70 and m <= 79):
Grade = "B"
elif(m >= 60 and m <= 69):
Grade = "C"
elif(m >= 50 and m <= 59):
Grade = "D"
elif(m < 50):
Grade = "F"
print("Your Grade is : " + Grade) # Concadinating
|
vikaskr-gupta/Python
|
6 Conditional Expression/9B_Problem_06.py
|
9B_Problem_06.py
|
py
| 718 |
python
|
en
|
code
| 1 |
github-code
|
6
|
74959980988
|
"""system URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from clubs import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('user_dashboard/', views.user_dashboard, name='user_dashboard'),
path('user_profile/', views.user_profile, name='user_profile'),
path('user_profile/edit', views.edit_user_profile, name='edit_user_profile'),
path('user_profile/change_password', views.change_password, name='change_password'),
path('user_profile/<int:user_id>/<int:membership_id>', views.user_profile, name='user_profile'),
path('member_profile/<int:membership_id>', views.member_profile, name='member_profile'),
path('log_in/', views.log_in, name='log_in'),
path('log_out/', views.log_out, name='log_out'),
path('sign_up/', views.sign_up, name='sign_up'),
path('membership_application/', views.membership_application, name='membership_application'),
path('new_club/', views.club_creation, name='new_club'),
path('available_clubs/', views.available_clubs, name='available_clubs'),
path('club/<int:club_id>', views.club_dashboard, name='club_dashboard'),
path('club_memberships/', views.club_memberships, name='club_memberships'),
path('my_applications/', views.my_applications, name='my_applications'),
path('club/<int:club_id>/<int:user_id>/promote', views.promote_member, name='promote_member'),
path('club/<int:club_id>/<int:user_id>/demote', views.demote_member, name='demote_member'),
path('club/<int:club_id>/<int:user_id>/kick', views.kick_member, name='kick_member'),
path('club/<int:club_id>/edit', views.edit_club, name='edit_club'),
path('club/<int:club_id>/leave', views.leave_club, name='leave_club'),
path('tournament/<int:tournament_id>', views.tournament_dashboard, name='tournament_dashboard'),
path('club/<int:club_id>/transfer_ownership/<int:user_id>', views.transfer_ownership, name='transfer_ownership'),
path('membership/<int:membership_id>/approve', views.accept_membership, name='accept_membership'),
path('membership/<int:membership_id>/deny', views.reject_membership, name='reject_membership'),
path('new_tournament/<int:club_id>', views.tournament_creation, name='new_tournament'),
path('tournament/<int:tournament_id>/join', views.join_tournament, name='join_tournament'),
path('tournament/<int:tournament_id>/leave', views.leave_tournament, name='leave_tournament'),
path('tournament/<int:tournament_id>/cancel', views.cancel_tournament, name='cancel_tournament'),
path('tournament/<int:tournament_id>/generate_matches', views.generate_matches, name='generate_matches')
]
|
amir-rahim/ChessClubManagementSystem
|
system/urls.py
|
urls.py
|
py
| 3,313 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7763839215
|
from Monitor import Monitor
import MonitorVarTypes
import requests
monitor_var = {'PreviousClosingPrice': MonitorVarTypes.FLOAT}
monitor_text = "URL FORMAT: https://api.polygon.io/v2/aggs/ticker/{TICKER}/prev?apiKey={APIKEY}; \nGo to https://polygon.io/docs/get_v1_meta_symbols__stocksTicker__news_anchor for more info"
class PolygonStockAPIMonitor(Monitor):
def _mapper(self):
return {'PreviousClosingPrice': self._price_check}
def _price_check(self, func, val):
try:
r = requests.get(self.src)
if r.status_code == 200:
stock_json = r.json()
closing_price = stock_json['results'][0]['c']
return func(closing_price, val), closing_price
except Exception:
raise ValueError
def start(trigger):
monitor = PolygonStockAPIMonitor(trigger)
monitor.run()
|
YuHanjiang/IFTTT-backend
|
Monitors/PolygonStockAPIMonitor.py
|
PolygonStockAPIMonitor.py
|
py
| 881 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41996069005
|
import logging
import platform
import sys
import json
from getpass import getpass
from pathlib import Path
from typing import Union, Dict, Tuple
from shapely.geometry import Polygon
import os
import couchdb
import xmltodict
from lxml import etree
from tqdm import tqdm
from geojson_rewind import rewind
logging.basicConfig(level="INFO")
log = logging.getLogger(__name__)
def getBbox(coordinateList):
"""
input: list of lon/lat coordinate pairs for CAP polygon as [[lat,lon],[lat,log],...]
output: two points defining the bounding box as [lon,lat,lon,lat]
"""
return list(Polygon(coordinateList).bounds)
class CapToDB:
def __init__(self):
self._xmlschema = Path("cap_v1.2_schema.xml").read_text()
self._xml_validator = None # set at first usage
def run(self, path: Union[str, Path],
db_warnings: couchdb.client.Database,
db_incidents: couchdb.client.Database):
"""Run conversion and upload to db for all xml files in path
Args:
path: Path to xml files to be stored in DB
db_warnings: CouchDB database to store warnings
db_incidents: CouchDB database to store incident (number / names)
"""
capdir = Path(path)
for fn in tqdm(list(capdir.glob("**/METfare*.xml"))):
dic = self.xml_to_dict(fn)
warning, incident = self.map_dict(dic)
try:
log.debug("\n\nSaving to warnings DB for fn: %s", fn)
log.debug("content %s", warning)
id = warning["_id"]
if id in db_warnings:
print(id, "**** already exists, deleting")
db_warnings.delete(db_warnings[id])
else:
try:
warning['source'] = f'lustre_archive,{fn.name}'
with open(f'{warning["_id"]}.geojson', 'w') as file:
file.write(json.dumps(warning))
file.close()
# fh = open(f'{warning["_id"]}.geojson', 'w')
# fh.write(warning)
# fh.close();
except Exception as e:
print(e)
db_warnings.save(warning)
log.debug("upload attachment")
db_warnings.put_attachment(warning, fn.read_bytes(),
fn.name)
except couchdb.http.ResourceConflict:
log.exception("Could not update for %s. See log.", fn.name)
pass
# store incident number & update name, if available
# if incident is None:
# log.debug("No incident info")
# continue
# saved_entry = db_incidents.get(incident["_id"])
# if saved_entry is None:
# log.debug("Creating incidents database")
# db_incidents.save(incident)
# elif "name" not in saved_entry and "name" in incident:
# log.debug("Updating incidents database")
# saved_entry.update(incident)
# db_incidents.save(saved_entry)
# else:
# log.debug("Entry in db_incident exists already. No changes.")
def xml_to_dict(self, fn: Union[Path, str]) -> Dict:
"""Convert xml to dictionary.
Args:
fn: Input filename
"""
string = Path(fn).read_text()
try:
self.validate(string)
except etree.XMLSyntaxError as e:
log.warning("fn: %s is not a valid xml: %s.", fn, e)
return xmltodict.parse(string)
def validate(self, string: str) -> None:
"""Validates xml string against schema.
Args:
string: String to be validated.
Raises:
lxml.etree.XMLSyntaxError: If string is not a valid according to
the provided schema
"""
if self._xml_validator is None:
log.debug("Attempt to process xml schema")
schema_root = etree.XML(self._xmlschema.encode())
schema = etree.XMLSchema(schema_root)
self._xml_validator = etree.XMLParser(schema=schema)
log.info("Processed xml schema")
etree.fromstring(string.encode(), self._xml_validator)
def map_dict(self, event: Dict) -> Tuple[Dict, Union[None, Dict]]:
"""Maps xml-dict to DB keys
Results:
warning: Information for warnings DB
incident: Information for incidents DB. None if no incident number.
"""
warning = {}
alert = event['alert']
info = self.single_lang_evt_from_cap(alert)
# Variable keys
# format: "partition:name"
# warning["_id"] = f'metfare:{alert["identifier"]}'
warning["_id"] = f'{alert["identifier"]}'
warning["saved_at"] = alert["sent"]
warning["transmitted_at"] = alert["sent"]
warning["onset"] = info["onset"]
warning["expires"] = info["expires"]
warning["phenomenon"] = info["eventCode"]["value"]
# Info may not exist
if "incidents" in alert:
warning["incident"] = alert["incidents"]
# Fixed keys:
warning["archived"] = True
warning["author"] = f"{os.path.basename(__file__)}@met.no"
# warning["author"] = f"{__file__}@{platform.node()}"
warning["transmission_state"] = "transmitted"
warning["source"] = "lustre_archive"
# new keys
warning["status"] = alert["status"]
if "references" in warning:
warning["references"] = alert["references"]
warning["certainty"] = info["certainty"]
warning["severity"] = info["severity"]
warning["msgType"] = alert["msgType"]
warning["altitude"] = info["area"]["altitude"]
warning["ceiling"] = info["area"]["ceiling"]
warning["areaDesc"] = {
"en": info["area"]["areaDesc"],
"nb": info["area"]["areaDesc"],
}
warning["type"] = "FeatureCollection"
orig_polygon = info["area"]["polygon"].split()
polygon = []
for coor in orig_polygon:
lon, lat = coor.split(",")
polygon.append((float(lat), float(lon)))
coordinates = [
polygon,
]
geometry = {
"type": "Polygon",
"coordinates": coordinates,
}
bbox = getBbox(coordinates[0])
feature = {
"geometry": geometry,
"type": "Feature",
"properties": {"customArea": False, "bbox": bbox},
}
feature = rewind(feature)
warning["features"] = [feature,]
# warning["color"]
# warning["ref_by"]
# keys that are not relevant:
# "transmitted_at", "drafted_at", "author"
# incident-info
incident = None
if "incidents" in alert:
incident = {}
incident["_id"] = warning["incident"].zfill(10)
for parameter in info["parameter"]:
if parameter["valueName"] == "incidentName":
incident["name"] = parameter["value"]
return warning, incident
def single_lang_evt_from_cap(self, evt: Dict, lang="no") -> Dict:
"""Gets `events` of one language from mutlilang-CAP file"""
evt_no = evt["info"][0]
if evt_no["language"].lower() != lang:
raise ValueError("CAPs XML file scheme must have changed")
for evt_other_lang in evt["info"][1:]:
if evt_other_lang["language"] == lang:
raise ValueError("CAPs XML file scheme must have changed")
return evt_no
def save_incident(self, event: Dict):
alert = event['alert']
info = self.single_lang_evt_from_cap(alert)
if __name__ == "__main__":
user = input("user:")
password = getpass("password:")
couch = couchdb.Server("http://%s:%[email protected]:5984/" % (user, password))
captodb = CapToDB()
path = "test_data" if len(sys.argv) == 1 else sys.argv[1]
# captodb.run(path, couch["archive_warnings"], couch["archive_incidents"])
# captodb.run(path, couch["junk-warnings"], couch["junk-incidents"])
captodb.run(path, couch["jjw"], couch["jji"])
# captodb.run(path, couch["warnings"], couch["incidents"])
|
metno/weamyl-metcap
|
scripts/lustre_archive_importer.py
|
lustre_archive_importer.py
|
py
| 8,431 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4406242971
|
import random
def max_lenght_of_sub_array_with_sum0(arr, k):
if arr is None or len(arr) == 0 or k <= 0:
return 0
ans = 0
for i in range(len(arr)):
for j in range(i, len(arr)):
if k == sum(arr[i:j+1]):
ans = max(ans, j-i+1)
return ans
def max_lenght_of_sub_array_with_sum1(arr, k):
if arr is None or len(arr) == 0 or k <= 0:
return 0
left, right = 0, 0
current = arr[0]
ans = 0
while right < len(arr):
if current == k:
ans = max(ans, right-left+1)
current -= arr[left]
left += 1
elif current < k:
right += 1
if right != len(arr):
current += arr[right]
else:
current -= arr[left]
left += 1
return ans
def test(count, maxval):
arr = [random.randint(1, maxval) for _ in range(count)]
total = sum(arr)
for k in range(total+1):
ans0 = max_lenght_of_sub_array_with_sum0(arr, k)
ans1 = max_lenght_of_sub_array_with_sum1(arr, k)
assert(ans0 == ans1)
if __name__ == '__main__':
test(10, 10)
test(10, 100)
test(100, 10)
|
guzhoudiaoke/data_structure_and_algorithms
|
coding_interview_guide/8_array_and_matrix/10/10.py
|
10.py
|
py
| 1,186 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4915045825
|
def sum_diagonal(data):
data = sum([int(i) for i in data])
return data
n = int(input())
matrix = [input().split(", ") for _ in range(n)]
first_diagonal = [matrix[row_index][col_index] for row_index in range(len(matrix)) for col_index in
range(len(matrix[row_index])) if row_index == col_index]
second_diagonal = [matrix[row_index][col_index] for row_index in range(len(matrix)) for col_index in
range(len(matrix[row_index])) if row_index + col_index == len(matrix) - 1]
print(f"First diagonal: {', '.join(first_diagonal)}. Sum: {sum_diagonal(first_diagonal)}")
print(f"Second diagonal: {', '.join(second_diagonal)}. Sum: {sum_diagonal(second_diagonal)}")
|
M0673N/Python-Advanced
|
04_comprehensions/exercise/05_problem.py
|
05_problem.py
|
py
| 703 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25146242563
|
import numpy as np
import netCDF4 as nd
import matplotlib.pyplot as plt
from calc_irr import *
def get_sample_points(origin, dir , ns=10):
gps = np.zeros((origin.shape[0],dir.shape[0]))
gps[0] = np.tan(dir/180.*np.pi) * origin[2] + origin[0]
gps[1,:] = origin[1]*1
gps[2,:] = 0
ds = np.linspace(0,1,ns)
v = np.zeros((ds.shape[0],origin.shape[0],dir.shape[0]))
v = np.reshape(origin,(1,origin.shape[0],1)) + (gps - np.reshape(origin,(origin.shape[0],1))) * np.reshape(ds,(ds.shape[0],1,1))
return v
def get_boxindex(p,grid):
dxi = np.array([grid[3],grid[4],grid[5]])
bounds = np.array([grid[0],grid[1],grid[2]])
index = np.zeros_like(p)
try:
for i in range(len(index)):
index[i] = p[i]//grid[i+3]
if index[i] < 0:
index[i]+=grid[i]
if index[i] > grid[i]-1:
index[i] -= grid[i]
except:
for i in range(p.shape[0]):
for xi in range(3):
for k in range(p.shape[2]):
index[i,xi,k] = p[i,xi,k]//dxi[xi]
if index[i,xi,k] < 0:
index[i,xi,k] += bounds[xi]
if index[i,xi,k] > bounds[xi]-1:
index[i,xi,k] -= bounds[xi]
return index.astype(int)
def get_e(p,grid,fpath="job_0.183435_36.600028/mc.flx.spc.nc"):
edirs = nd.Dataset(fpath,'r')
Edir = np.zeros(p.shape[1])
Edown = np.zeros(p.shape[1])
Eup = np.zeros(p.shape[1])
for I in range(p.shape[1]):
i,j,k = get_boxindex(p[:,I],grid)
Edir[I] = edirs["Edir"][j,i,k,:]
Edown[I] = edirs["Edown"][j,i,k,:]
Eup[I] = edirs["Eup"][j,i,k,:]
edirs.close()
return Edir,Edown,Eup
def get_rad(p,grid):
Eup = np.zeros(p.shape[1])
Edown = np.zeros_like(Eup)
Eu,Ed = calc_Es(UMUS,PHIS,wumu,wphi,"mc.rad.spc.nc","radiance")
for I in range(p.shape[1]):
i,j,k = get_boxindex(p[:,I],grid)
Eup[I] = Eu["radiance"][j,i,k,:]
Edown[I] = Ed["radiance"][j,i,k,:]
return Eup,Edown
UMUS = np.loadtxt("input_params.txt",dtype=str, max_rows=1)
PHIS = np.loadtxt("input_params.txt",dtype=str,skiprows=1, max_rows=1)
wumu = np.loadtxt("numus.txt", skiprows=1, max_rows=1)
wphi = np.loadtxt("nphis.txt", skiprows=1, max_rows=1)
Nx,Ny,dx,dy = np.loadtxt("input_params.txt", skiprows = 6, max_rows=1)
Zlev = np.loadtxt("input_params.txt", skiprows = 4, max_rows=1)
Nz = 2 #Zlev.shape[0]
dz = 1
sza = np.loadtxt("input_params.txt", skiprows = 2, max_rows=1)
mu = np.cos(sza/180.*np.pi)
albedo = 0.2
grid = np.array([Nx,Ny,Nz,dx,dy,dz])
cloudx = np.array([3,4])
cloudy = np.array([0])
cloudz = np.array([0,1])
camerapos = np.array([1.5,0.01,2])
camerafov = 90.
camerapixels = 90
pixeledges = np.linspace(-camerafov/2.,camerafov/2,camerapixels+1)
pixelangles = (pixeledges[0:-1] + pixeledges[1:])/2.
pixelvalues = np.zeros(pixelangles.shape)
pixelground = get_sample_points(camerapos,pixelangles)[-1]
Edir,Edown,Eup = get_e(pixelground,grid)
#Eu,Ed = get_rad(pixelground,grid)
#
#pixelvalues = Edir * albedo / np.pi + Ed*albedo/np.pi
#palt = Edir *albedo/np.pi + Edown*albedo/np.pi
#
#truth = nd.Dataset("job_panorama/mc.rad.spc.nc" , "r")
Eu,Ed = calc_Es(UMUS,PHIS,wumu,wphi,"mc.rad.spc.nc","radiance")
#fig,ax = plt.subplots(3,1)
#ax[0].plot(np.arange(pixelvalues.shape[0]),pixelvalues,label="with Edown from radiances")
#ax[1].plot(np.arange(palt.shape[0]),palt,label="with Edown from flx file")
#ax[2].plot(np.arange(truth["radiance"][0,:,0,0].shape[0]),truth["radiance"][0,:,0,0], label="from mystic panorama")
#ax[0].legend()
#ax[1].legend()
#ax[2].legend()
#
#plt.tight_layout()
#plt.show()
|
MujkanovicMax/master-thesis
|
radiances/raytr.py
|
raytr.py
|
py
| 3,788 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27103187909
|
"""Fedor's Upper Envelope algorithm.
Based on the original MATLAB code by Fedor Iskhakov:
https://github.com/fediskhakov/dcegm/blob/master/model_retirement.m
"""
from typing import Callable
from typing import Dict
from typing import List
from typing import Tuple
import numpy as np
from dcegm.interpolation import linear_interpolation_with_extrapolation
from scipy.optimize import brenth as root
eps = 2.2204e-16
def upper_envelope(
policy: np.ndarray,
value: np.ndarray,
exog_grid: np.ndarray,
state_choice_vec: np.ndarray,
params: Dict[str, float],
compute_utility: Callable,
) -> Tuple[np.ndarray, np.ndarray]:
"""Runs the Upper Envelope algorithm and drops sub-optimal points.
Calculates the upper envelope over the overlapping segments of the
decision-specific value functions, which in fact are value "correspondences"
in this case, where multiple solutions are detected. The dominated grid
points are then eliminated from the endogenous wealth grid.
Discrete choices introduce kinks and non-concave regions in the value
function that lead to discontinuities in the policy function of the
continuous (consumption) choice. In particular, the value function has a
non-concave region where the decision-specific values of the
alternative discrete choices (e.g. continued work or retirement) cross.
These are referred to as "primary" kinks.
As a result, multiple local optima for consumption emerge and the Euler
equation has multiple solutions.
Moreover, these "primary" kinks propagate back in time and manifest
themselves in an accumulation of "secondary" kinks in the choice-specific
value functions in earlier time periods, which, in turn, also produce an
increasing number of discontinuities in the consumption functions
in earlier periods of the life cycle.
These discontinuities in consumption rules in period t are caused by the
worker's anticipation of landing exactly at the kink points in the
subsequent periods t + 1, t + 2, ..., T under the optimal consumption policy.
Args:
policy (np.ndarray): Array of choice-specific consumption policy
of shape (2, n_grid_wealth).
Position [0, :] of the arrays contain the endogenous grid over wealth M,
and [1, :] stores the corresponding value of the (consumption) policy
function c(M, d), for each time period and each discrete choice.
value (np.ndarray): Array of choice-specific value function
of shape (2, n_grid_wealth).
Position [0, :] of the array contains the endogenous grid over wealth M,
and [1, :] stores the corresponding value of the value function v(M, d),
for each time period and each discrete choice.
exog_grid (np.ndarray): 1d array of exogenous savings grid of shape
(n_grid_wealth,).
choice (int): The current choice.
params (dict): Dictionary containing the model's parameters.
compute_value (callable): Function to compute the agent's value.
Returns:
(tuple) Tuple containing
- policy_refined (np.ndarray): Worker's *refined* (consumption) policy
function of the current period, where suboptimal points have been dropped
and the kink points along with the corresponding interpolated values of
the policy function have been added. Shape (2, 1.1 * n_grid_wealth).
- value_refined (np.ndarray): Worker's *refined* value function of the
current period, where suboptimal points have been dropped and the kink
points along with the corresponding interpolated values of the value
function have been added. Shape (2, 1.1 * n_grid_wealth).
"""
n_grid_wealth = len(exog_grid)
min_wealth_grid = np.min(value[0, 1:])
credit_constr = False
if value[0, 1] <= min_wealth_grid:
segments_non_mono = locate_non_concave_regions(value)
else:
# Non-concave region coincides with credit constraint.
# This happens when there is a non-monotonicity in the endogenous wealth grid
# that goes below the first point.
# Solution: Value function to the left of the first point is analytical,
# so we just need to add some points to the left of the first grid point.
credit_constr = True
expected_value_zero_wealth = value[1, 0]
policy, value = _augment_grid(
policy,
value,
state_choice_vec,
expected_value_zero_wealth,
min_wealth_grid,
n_grid_wealth,
params,
compute_utility=compute_utility,
)
segments_non_mono = locate_non_concave_regions(value)
if len(segments_non_mono) > 1:
_value_refined, points_to_add = compute_upper_envelope(segments_non_mono)
index_dominated_points = find_dominated_points(
value, _value_refined, significance=10
)
if credit_constr:
value_refined = np.hstack(
[np.array([[0], [expected_value_zero_wealth]]), _value_refined]
)
else:
value_refined = _value_refined
policy_refined = refine_policy(policy, index_dominated_points, points_to_add)
else:
value_refined = value
policy_refined = policy
# Fill array with nans to fit 10% extra grid points,
# as the true shape is unknown ex ante
policy_refined_with_nans = np.empty((2, int(1.1 * n_grid_wealth)))
value_refined_with_nans = np.empty((2, int(1.1 * n_grid_wealth)))
policy_refined_with_nans[:] = np.nan
value_refined_with_nans[:] = np.nan
policy_refined_with_nans[:, : policy_refined.shape[1]] = policy_refined
value_refined_with_nans[:, : value_refined.shape[1]] = value_refined
return policy_refined_with_nans, value_refined_with_nans
def locate_non_concave_regions(
value: np.ndarray,
) -> List[np.ndarray]:
"""Locates non-concave regions.
Find non-monotonicity in the endogenous wealth grid where a grid point
to the right is smaller than its preceding point. Put differently, the
value function bends "backwards".
Non-concave regions in the value function are reflected by non-monotonous
regions in the underlying endogenous wealth grid.
Multiple solutions to the Euler equation cause the standard EGM loop to
produce a "value correspondence" rather than a value function.
The elimination of suboptimal grid points converts this correspondence back
to a proper function.
Args:
value (np.ndarray): Array storing the choice-specific value function
"correspondences". Shape (2, *n_endog_wealth_grid*), where
*n_endog_wealth_grid* is of variable length depending on the number of
kinks and non-concave regions in the value function.
In the presence of kinks, the value function is a "correspondence"
rather than a function due to non-concavities.
Returns:
(tuple) Tuple containing:
- value_refined (np.ndarray): Array of shape (2, *n_grid_refined*)
containing the *refined* choice-specific value functions, which means that
suboptimal points have been removed from the endogenous wealth grid and
the value function "correspondence". Furthermore, kink points and the
corresponding interpolated values of the value function have been added.
- points_to_add (np.ndarray): Array of shape (*n_kink_points*,)
containing the kink points and corresponding interpolated values of the
*refined* value function that have been added to ``value_refined``.
*n_kink_points* is of variable length.
- index_dominated_points (np.ndarray): Array of shape (*n_dominated_points*,)
containing the indices of dominated points in the endogenous wealth grid,
where *n_dominated_points* is of variable length.
"""
segments_non_mono = []
is_monotonic = value[0, 1:] > value[0, :-1]
niter = 0
move_right = True
while move_right:
index_non_monotonic = np.where(is_monotonic != is_monotonic[0])[0]
# Check if we are beyond the starting (left-most) point
if len(index_non_monotonic) == 0:
if niter > 0:
segments_non_mono += [value]
move_right = False
break
else:
index_non_monotonic = min(index_non_monotonic) # left-most point
part_one, part_two = _partition_grid(value, index_non_monotonic)
segments_non_mono += [part_one]
value = part_two
# Move point of first non-monotonicity to the right
is_monotonic = is_monotonic[index_non_monotonic:]
niter += 1
return segments_non_mono
def compute_upper_envelope(
segments: List[np.ndarray],
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute upper envelope and refines value function correspondence.
The upper envelope algorithm detects suboptimal points in the value function
correspondence. Consequently, (i) the suboptimal points are removed and the
(ii) kink points along with their corresponding interpolated values are included.
The elimination of suboptimal grid points converts the value
correspondence back to a proper function. Applying both (i) and (ii)
yields the refined endogenous wealth grid and the *refined* value function.
Args:
segments (List[np.ndarray]): List of non-monotonous segments in the
endogenous wealth grid, which results in non-concavities in the
corresponding value function. The list contains n_non_monotonous
np.ndarrays of shape (2, *len_non_monotonous*), where
*len_non_monotonous* is of variable length denoting the length of the
given non-monotonous segment.
Returns:
(tuple) Tuple containing:
- points_upper_env_refined (np.ndarray): Array containing the *refined*
endogenous wealth grid and the corresponding value function.
*refined* means suboptimal points have been dropped and the kink points
along with the corresponding interpolated values of the value function
have beend added.
Shape (2, *n_grid_refined*), where *n_grid_refined* is the length of
the *refined* endogenous grid.
- points_to_add (np.ndarray): Array containing the kink points and
corresponding interpolated values of the value function that have been
added to ``points_upper_env_refined``.
Shape (2, *n_intersect_points*), where *n_intersect_points* is the number of
intersection points between the two uppermost segments
(i.e. ``first_segment`` and ``second_segment``).
"""
endog_wealth_grid = np.unique(
np.concatenate([segments[arr][0] for arr in range(len(segments))])
)
values_interp = np.empty((len(segments), len(endog_wealth_grid)))
for i, segment in enumerate(segments):
values_interp[i, :] = linear_interpolation_with_inserting_missing_values(
x=segment[0],
y=segment[1],
x_new=endog_wealth_grid,
missing_value=-np.inf,
)
# values_interp has in each row the corresponding values of the upper curve
# in the overlapping seg
max_values_interp = np.tile(values_interp.max(axis=0), (3, 1)) # need this below
top_segments = values_interp == max_values_interp[0, :]
grid_points_upper_env = [endog_wealth_grid[0]]
values_upper_env = [values_interp[0, 0]]
intersect_points_upper_env = []
values_intersect_upper_env = []
move_right = True
while move_right:
# Index of top segment, starting at first (left-most) grid point
index_first_segment = np.where(top_segments[:, 0])[0][0]
for i in range(1, len(endog_wealth_grid)):
index_second_segment = np.where(top_segments[:, i] == 1)[0][0]
if index_second_segment != index_first_segment:
first_segment = index_first_segment
second_segment = index_second_segment
first_grid_point = endog_wealth_grid[i - 1]
second_grid_point = endog_wealth_grid[i]
values_first_segment = (
linear_interpolation_with_inserting_missing_values(
x=segments[first_segment][0],
y=segments[first_segment][1],
x_new=np.array([first_grid_point, second_grid_point]),
missing_value=np.nan,
)
)
values_second_segment = (
linear_interpolation_with_inserting_missing_values(
x=segments[second_segment][0],
y=segments[second_segment][1],
x_new=np.array([first_grid_point, second_grid_point]),
missing_value=np.nan,
)
)
if np.all(
np.isfinite(
np.vstack([values_first_segment, values_second_segment])
)
) and np.all(np.abs(values_first_segment - values_second_segment) > 0):
intersect_point = root(
_subtract_values,
first_grid_point,
second_grid_point,
args=(
segments[first_segment],
segments[second_segment],
),
)
value_intersect = (
linear_interpolation_with_inserting_missing_values(
x=segments[first_segment][0],
y=segments[first_segment][1],
x_new=np.array([intersect_point]),
missing_value=np.nan,
)[0]
)
values_all_segments = np.empty((len(segments), 1))
for segment in range(len(segments)):
values_all_segments[
segment
] = linear_interpolation_with_inserting_missing_values(
x=segments[segment][0],
y=segments[segment][1],
x_new=np.array([intersect_point]),
missing_value=-np.inf,
)[
0
]
index_max_value_intersect = np.where(
values_all_segments == values_all_segments.max(axis=0)
)[0][0]
if (index_max_value_intersect == first_segment) | (
index_max_value_intersect == second_segment
):
# There are no other functions above
grid_points_upper_env.append(intersect_point)
values_upper_env.append(value_intersect)
intersect_points_upper_env.append(intersect_point)
values_intersect_upper_env.append(value_intersect)
if second_segment == index_second_segment:
move_right = False
# Add point if it lies currently on the highest segment
if (
any(abs(segments[index_second_segment][0] - endog_wealth_grid[i]) < eps)
is True
):
grid_points_upper_env.append(endog_wealth_grid[i])
values_upper_env.append(max_values_interp[0, i])
index_first_segment = index_second_segment
points_upper_env_refined = np.empty((2, len(grid_points_upper_env)))
points_upper_env_refined[0, :] = grid_points_upper_env
points_upper_env_refined[1, :] = values_upper_env
points_to_add = np.empty((2, len(intersect_points_upper_env)))
points_to_add[0] = intersect_points_upper_env
points_to_add[1] = values_intersect_upper_env
return points_upper_env_refined, points_to_add
def find_dominated_points(
value_correspondence: np.ndarray,
value_refined: np.ndarray,
significance: int = 10,
) -> np.ndarray:
"""Returns indexes of dominated points in the value function correspondence.
Equality is measured up to 10**(-``significance``).
Args:
value_correspondence (np.ndarray): Array storing the choice-specific
value function correspondences. Shape (2, n_endog_wealth_grid), where
n_endog_wealth_grid is of variable length depending on the number of
kinks and non-concave regions in the value function.
In the presence of kinks, the value function is a correspondence
rather than a function due to non-concavities.
value_refined (np.ndarray): Array of refined value function, where
suboptimal points have been dropped and kink points along with the
corresponding interpolated values of the value function have been added.
Shape (2, n_grid_refined), where n_grid_refined is the length of
the refined endogenous grid.
significance (float): Level of significance. Equality is measured up to
10**(-``significance``).
Returns:
index_dominated_points (np.ndarray): Array of shape (n_dominated_points,)
containing the indices of dominated points in the endogenous wealth grid,
where n_dominated_points is of variable length.
"""
sig_pos = 10**significance
sig_neg = 10 ** (-significance)
grid_all = np.round(value_correspondence[0, :] * sig_pos) * sig_neg
grid_refined_sig = np.round(value_refined[0, :] * sig_pos) * sig_neg
value_all = np.round(value_correspondence[1, :] * sig_pos) * sig_neg
value_refined_sig = np.round(value_refined[1, :] * sig_pos) * sig_neg
index_all = np.arange(len(grid_all))
index_dominated_points = np.union1d(
index_all[~np.isin(grid_all, grid_refined_sig)],
index_all[~np.isin(value_all, value_refined_sig)],
)
return index_dominated_points
def refine_policy(
policy: np.ndarray, index_dominated_points: np.ndarray, points_to_add: np.ndarray
) -> np.ndarray:
"""Drop suboptimal points from policy correspondence and add new optimal ones.
Args:
points_to_add (np.ndarray): Array of shape (*n_kink_points*,),
containing the kink points and corresponding interpolated values of
the refined value function, where *n_kink_points* is of variable
length.
index_dominated_points (np.ndarray): Array of shape (*n_dominated_points*,)
containing the indices of dominated points in the endogenous wealth grid,
where *n_dominated_points* is of variable length.
Returns:
(np.ndarray): Array of shape (2, *n_grid_refined*)
containing the *refined* choice-specific policy function, which means that
suboptimal points have been removed from the endogenous wealth grid and
the policy "correspondence". Furthermore, kink points and the
corresponding interpolated values of the policy function have been added.
"""
# Remove suboptimal consumption points
endog_wealth_grid = np.delete(policy[0, :], index_dominated_points)
optimal_consumption = np.delete(policy[1, :], index_dominated_points)
# Add new optimal consumption points
new_points_policy_interp = []
for new_grid_point in range(len(points_to_add[0, :])):
all_points_to_the_left = np.where(
policy[0, :] < points_to_add[0, new_grid_point]
)[0]
all_points_to_the_right = np.where(
policy[0, :] > points_to_add[0, new_grid_point]
)[0]
last_point_to_the_left = max(
all_points_to_the_left[
~np.isin(all_points_to_the_left, index_dominated_points)
]
)
# Find (scalar) point interpolated from the left
interp_from_the_left = linear_interpolation_with_extrapolation(
x=policy[0, :][last_point_to_the_left : last_point_to_the_left + 2],
y=policy[1, :][last_point_to_the_left : last_point_to_the_left + 2],
x_new=points_to_add[0][new_grid_point],
)
first_point_to_the_right = min(
all_points_to_the_right[
~np.isin(all_points_to_the_right, index_dominated_points)
]
)
# Find (scalar) point interpolated from the right
interp_from_the_right = linear_interpolation_with_extrapolation(
x=policy[0, :][first_point_to_the_right - 1 : first_point_to_the_right + 1],
y=policy[1, :][first_point_to_the_right - 1 : first_point_to_the_right + 1],
x_new=points_to_add[0, new_grid_point],
)
new_points_policy_interp += [
np.array(
[
points_to_add[0, new_grid_point],
interp_from_the_left,
interp_from_the_right,
]
)
]
# Insert new points into the endogenous wealth grid and consumption policy
for to_add in range(len(new_points_policy_interp)):
index_insert = np.where(
endog_wealth_grid > new_points_policy_interp[to_add][0]
)[0][0]
# 1) Add new points to policy TWICE to accurately describe discontinuities
endog_wealth_grid = np.insert(
endog_wealth_grid,
index_insert,
new_points_policy_interp[to_add][0],
)
endog_wealth_grid = np.insert(
endog_wealth_grid,
index_insert + 1,
new_points_policy_interp[to_add][0] - 0.001 * 2.2204e-16,
)
# 2a) Add new optimal consumption point, interpolated from the left
optimal_consumption = np.insert(
optimal_consumption,
index_insert,
new_points_policy_interp[to_add][1],
)
# 2b) Add new optimal consumption point, interpolated from the right
optimal_consumption = np.insert(
optimal_consumption,
index_insert + 1,
new_points_policy_interp[to_add][2],
)
policy_refined = np.stack([endog_wealth_grid, optimal_consumption])
# Make sure first element in endogenous wealth grid and optiomal consumption policy
# are both 0.
if policy_refined[0, 0] != 0.0:
policy_refined = np.hstack([np.zeros((2, 1)), policy_refined])
return policy_refined
def _augment_grid(
policy: np.ndarray,
value: np.ndarray,
state_choice_vec: np.ndarray,
expected_value_zero_wealth: np.ndarray,
min_wealth_grid: float,
n_grid_wealth: int,
params,
compute_utility: Callable,
) -> Tuple[np.ndarray, np.ndarray]:
"""Extends the endogenous wealth grid, value, and policy function to the left.
Args:
policy (np.ndarray): Array storing the choice-specific
policy function. Shape (2, *n_endog_wealth_grid*), where
*n_endog_wealth_grid* is of variable length depending on the number of
discontinuities in the policy function.
In the presence of discontinuities, the policy function is a
"correspondence" rather than a function due to multiple local optima.
value (np.ndarray): Array storing the choice-specific
value function. Shape (2, *n_endog_wealth_grid*), where
*n_endog_wealth_grid* is of variable length depending on the number of
kinks and non-concave regions in the value function.
In the presence of kinks, the value function is a "correspondence"
rather than a function due to non-concavities.
expected_value_zero_wealth (float): The agent's expected value given that she
has a wealth of zero.
min_wealth_grid (float): Minimal wealth level in the endogenous wealth grid.
n_grid_wealth (int): Number of grid points in the exogenous wealth grid.
params (dict): Dictionary containing the model's parameters.
compute_value (callable): Function to compute the agent's value.
Returns:
policy_augmented (np.ndarray): Array containing endogenous grid and
policy function with ancillary points added to the left.
Shape (2, *n_grid_augmented*).
value_augmented (np.ndarray): Array containing endogenous grid and
value function with ancillary points added to the left.
Shape (2, *n_grid_augmented*).
"""
grid_points_to_add = np.linspace(min_wealth_grid, value[0, 1], n_grid_wealth // 10)[
:-1
]
utility = compute_utility(
consumption=grid_points_to_add,
params=params,
**state_choice_vec,
)
values_to_add = utility + params["beta"] * expected_value_zero_wealth
value_augmented = np.vstack(
[
np.append(grid_points_to_add, value[0, 1:]),
np.append(values_to_add, value[1, 1:]),
]
)
policy_augmented = np.vstack(
[
np.append(grid_points_to_add, policy[0, 1:]),
np.append(grid_points_to_add, policy[1, 1:]),
]
)
return policy_augmented, value_augmented
def _partition_grid(
value_correspondence: np.ndarray, j: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Splits the grid into two parts, 1,..., j and j, j+1,..., J.
Note that the index ``j``, after which the separation occurs,
is also included in the second partition.
Args:
value_correspondence (np.ndarray): Array storing the choice-specific
value function "correspondences". Shape (2, *n_endog_wealth_grid*), where
*n_endog_wealth_grid* is of variable length depending on the number of
kinks and non-concave regions in the value function.
In the presence of kinks, the value function is a "correspondence"
rather than a function due to non-concavities.
j (int): Index denoting the location where the endogenous wealth grid is
separated.
Returns:
part_one (np.ndarray): Array of shape (2, : ``j`` + 1) containing the first
partition.
part_two (np.ndarray): Array of shape (2, ``j``:) containing the second
partition.
"""
j = min(j, value_correspondence.shape[1])
part_one = np.vstack(
[
value_correspondence[0, : j + 1], # endogenous wealth grid
value_correspondence[1, : j + 1], # value function
]
)
# Include boundary points in both partitions
part_two = np.vstack([value_correspondence[0, j:], value_correspondence[1, j:]])
return part_one, part_two
def _subtract_values(grid_point: float, first_segment, second_segment):
"""Subtracts the interpolated values of the two uppermost segments."""
values_first_segment = linear_interpolation_with_extrapolation(
x=first_segment[0], y=first_segment[1], x_new=grid_point
)
values_second_segment = linear_interpolation_with_extrapolation(
x=second_segment[0], y=second_segment[1], x_new=grid_point
)
diff_values_segments = values_first_segment - values_second_segment
return diff_values_segments
def linear_interpolation_with_inserting_missing_values(x, y, x_new, missing_value):
"""Linear interpolation with inserting missing values.
Args:
x (np.ndarray): 1d array of shape (n,) containing the x-values.
y (np.ndarray): 1d array of shape (n,) containing the y-values
corresponding to the x-values.
x_new (np.ndarray or float): 1d array of shape (m,) or float containing
the new x-values at which to evaluate the interpolation function.
missing_value (np.ndarray or float): Flat array of shape (1,) or float
to set for values of x_new outside of the range of x.
Returns:
np.ndarray or float: 1d array of shape (m,) or float containing the
new y-values corresponding to the new x-values.
In case x_new contains values outside of the range of x, these
values are set equal to missing_value.
"""
interpol_res = linear_interpolation_with_extrapolation(x, y, x_new)
where_to_miss = (x_new < x.min()) | (x_new > x.max())
interpol_res[where_to_miss] = missing_value
return interpol_res
|
OpenSourceEconomics/dcegm
|
tests/utils/upper_envelope_fedor.py
|
upper_envelope_fedor.py
|
py
| 28,814 |
python
|
en
|
code
| 15 |
github-code
|
6
|
14169799786
|
import random
import numpy as np
class Tablica:
def __init__(self, size):
self.size = size
self.plansza = [[-1 for y in range(self.size)] for x in range(self.size)]
self.generate()
self.generate()
self.nextLeft = self.plansza
self.nextRight = self.plansza
self.nextUp = self.plansza
self.nextDown = self.plansza
self.leftPoints = 0
self.rightPoints = 0
self.upPoints = 0
self.downPoints = 0
def generate(self):
rand = random.randint(0, 15)
while self.plansza[int(rand / self.size)][rand % self.size] != -1:
rand = random.randint(0, self.size*self.size-1)
self.plansza[int(rand / self.size)][rand % self.size] = 1 if random.randint(0, 9) != 0 else 2
def isNotFull(self):
return any([any([x == -1 for x in el]) for el in self.plansza])
def print(self):
for el in self.plansza:
print([int(pow(2, x)) for x in el])
def move(self, movement):
moved = True
points = 0
if movement == 0 and self.plansza != self.nextLeft:
self.plansza = self.nextLeft
points = self.leftPoints
self.clearPoints()
# print("lewo")
elif movement == 1 and self.plansza != self.nextRight:
# print("Prawo")
self.plansza = self.nextRight
points = self.rightPoints
self.clearPoints()
elif movement == 2 and self.plansza != self.nextUp:
self.plansza = self.nextUp
points = self.upPoints
self.clearPoints()
elif movement == 3 and self.plansza != self.nextDown:
self.plansza = self.nextDown
points = self.downPoints
self.clearPoints()
else:
moved = False
return moved, points
def clearPoints(self):
self.leftPoints = 0
self.rightPoints = 0
self.upPoints = 0
self.downPoints = 0
def hasNext(self):
self.nextLeft = [self.left(x) for x in self.plansza]
self.nextRight = [self.right(x) for x in self.plansza]
self.nextUp = np.array([self.left(x, "up") for x in np.array(self.plansza).T.tolist()]).T.tolist()
self.nextDown = np.array([self.right(x, "down") for x in np.array(self.plansza).T.tolist()]).T.tolist()
if self.plansza != self.nextLeft:
# print("Istnieje ruch w lewo")
return True
if self.plansza != self.nextRight:
# print("Istnieje ruch prawo")
return True
if self.plansza != self.nextUp:
# print("Istnieje ruch w gore")
return True
if self.plansza != self.nextDown:
# print("Istnieje ruch w dół")
return True
return False
def left(self, x, direction="left"):
result = [number for number in x if number != -1]
while len(result) != len(x):
result.append(-1)
# print(result)
for i in range(len(x) - 1):
if result[i] == result[i + 1] and result[i] != -1:
result[i] += 1
if direction == "up":
self.upPoints += pow(2, result[i])
else:
self.leftPoints += pow(2, result[i])
for j in range(i + 1, len(x) - 1):
result[j] = result[j + 1]
result[-1] = -1
return result
def right(self, x, direction="right"):
result = [number for number in x if number != -1]
while len(result) != len(x):
result.insert(0, -1)
for i in range(len(x) - 1)[::-1]:
if result[i] == result[i + 1] and result[i] != -1:
result[i + 1] += 1
if direction == "down":
self.downPoints += pow(2, result[i+1])
else:
self.rightPoints += pow(2, result[i+1])
for j in range(1, i + 1)[::-1]:
result[j] = result[j - 1]
result[0] = -1
return result
@staticmethod
def up(x):
return x
@staticmethod
def down(x):
return x
|
tomasz-skrzypczyk/My2048
|
tablica.py
|
tablica.py
|
py
| 4,370 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71484464508
|
N = int(input())
P = list(map(int, input().split()))
max_point = sum(P)
dp = [False] * (max_point + 1)
dp[0] = True
for p in P:
dp_next = dp[:]
for i in range(max_point):
if dp[i]: dp_next[i+p] = True
dp = dp_next[:]
print(dp.count(True))
|
knuu/competitive-programming
|
atcoder/dp/tdpc_a.py
|
tdpc_a.py
|
py
| 260 |
python
|
en
|
code
| 1 |
github-code
|
6
|
17607810708
|
from calendar import c
n = int(input("enter the number :"))
r = 1
while r<=n:
c = 1
while c<=r:
print(chr(65+r+c+1))
c = c + 1
print()
r = r+1
|
maheshyadav84481/Problem_solution_with_python_MAHESH_YADAV
|
abcd_reverse.py
|
abcd_reverse.py
|
py
| 162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26818962506
|
import json
import urllib
import requests
import types
class MovesAPIError(Exception):
"""Raised if the Moves API returns an error."""
pass
class MovesAPINotModifed(Exception):
"""Raised if the document requested is unmodified. Need the use of etag header"""
pass
class MovesClient(object):
"""OAuth client for the Moves API"""
api_url = "https://api.moves-app.com/api/1.1"
app_auth_url = "moves://app/authorize"
web_auth_uri = "https://api.moves-app.com/oauth/v1/authorize"
token_url = "https://api.moves-app.com/oauth/v1/access_token"
tokeninfo_url = "https://api.moves-app.com/oauth/v1/tokeninfo"
refresh_url = "https://api.moves-app.com/oauth/v1/access_token"
def __init__(self, client_id=None, client_secret=None,
access_token=None, use_app=False):
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self.auth_url = self.app_auth_url if use_app else self.web_auth_uri
self.use_app = use_app
self._last_headers = None
def parse_response(self, response):
"""Parse JSON API responses."""
return json.loads(response.text)
def build_oauth_url(self, redirect_uri=None, scope="activity location"):
params = {
'client_id': self.client_id,
'scope': scope
}
if not self.use_app:
params['response_type'] = 'code'
if redirect_uri:
params['redirect_uri'] = redirect_uri
# Moves hates +s for spaces, so use %20 instead.
encoded = urllib.urlencode(params).replace('+', '%20')
return "%s?%s" % (self.auth_url, encoded)
def get_oauth_token(self, code, **kwargs):
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'grant_type': kwargs.get('grant_type', 'authorization_code')
}
if 'redirect_uri' in kwargs:
params['redirect_uri'] = kwargs['redirect_uri']
response = requests.post(self.token_url, params=params)
response = json.loads(response.content)
try:
return response['access_token'], response['refresh_token']
except:
error = "<%(error)s>: %(error_description)s" % response
raise MovesAPIError(error)
def refresh_oauth_token(self, refresh_token, **kwargs):
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': refresh_token,
'grant_type': kwargs.get('grant_type', 'refresh_token')
}
response = requests.post(self.refresh_url, params=params)
response = json.loads(response.content)
try:
return response['access_token'], response['refresh_token']
except:
error = "<%(error)s>: %(error_description)s" % response
raise MovesAPIError(error)
def tokeninfo(self):
params = {
'access_token': self.access_token
}
response = requests.get(self.tokeninfo_url, params=params)
response = json.loads(response.content)
try:
return response
except:
error = "<%(error)s>: %(error_description)s" % response
raise MovesAPIError(error)
def api(self, path, method='GET', **kwargs):
params = kwargs['params'] if 'params' in kwargs else {}
data = kwargs['data'] if 'data' in kwargs else {}
if not self.access_token and 'access_token' not in params:
raise MovesAPIError("You must provide a valid access token.")
url = "%s/%s" % (self.api_url, path)
if 'access_token' in params:
access_token = params['access_token']
del(params['access_token'])
else:
access_token = self.access_token
headers = {
"Authorization": 'Bearer ' + access_token
}
if 'etag' in params:
headers['If-None-Match'] = params['etag']
del(params['etag'])
resp = requests.request(method, url,
data=data,
params=params,
headers=headers)
if str(resp.status_code)[0] not in ('2', '3'):
raise MovesAPIError("Error returned via the API with status code (%s):" %
resp.status_code, resp.text)
if resp.status_code == 304:
raise MovesAPINotModifed("Unmodified")
self._last_headers = resp.headers
return resp
def get(self, path, **params):
return self.parse_response(
self.api(path, 'GET', params=params))
def post(self, path, **data):
return self.parse_response(
self.api(path, 'POST', data=data))
def set_first_date(self):
if not self.first_date:
response = self.user_profile()
self.first_date = response['profile']['firstDate']
def __getattr__(self, name):
'''\
Turns method calls such as "moves.foo_bar(...)" into
a call to "moves.api('/foo/bar', 'GET', params={...})"
and then parses the response.
'''
base_path = name.replace('_', '/')
# Define a function that does what we want.
def closure(*path, **params):
'Accesses the /%s API endpoints.'
path = list(path)
path.insert(0, base_path)
return self.parse_response(
self.api('/'.join(path), 'GET', params=params)
)
# Clone a new method with the correct name and doc string.
retval = types.FunctionType(
closure.func_code,
closure.func_globals,
name,
closure.func_defaults,
closure.func_closure)
retval.func_doc = closure.func_doc % base_path
# Cache it to avoid additional calls to __getattr__.
setattr(self, name, retval)
return retval
# Give Access to last attribute
_move_client_status = ['etag', 'x-ratelimit-hourlimit', 'x-ratelimit-hourremaining',
'x-ratelimit-minutelimit', 'x-ratelimit-minuteremaining']
for att in _move_client_status:
att = att.replace('-', '_')
setattr(MovesClient, att, property(lambda self,att=att: self._last_headers.get(att, None)
if self._last_headers else att))
|
lysol/moves
|
moves/_moves.py
|
_moves.py
|
py
| 6,546 |
python
|
en
|
code
| 58 |
github-code
|
6
|
14975435890
|
"""DuerOS entity class."""
from __future__ import annotations
from dueros_smarthome.client import DeviceActionResponse
from dueros_smarthome.const import STATUS_OK, STATUS_NOT_LOGIN
from dueros_smarthome.models import Appliance, Connectivity
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryAuthFailed, IntegrationError
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, NAME, VERSION, LOGGER
from .coordinator import DuerOSDataUpdateCoordinator, get_unique_id
class DuerOSEntity(CoordinatorEntity):
"""DuerOSEntity class."""
def _set_unique_id(self) -> None:
self._attr_unique_id = get_unique_id(self._appliance)
def __init__(
self, coordinator: DuerOSDataUpdateCoordinator, appliance: Appliance
) -> None:
"""Initialize."""
super().__init__(coordinator)
self._attr_available = False
self._attr_has_entity_name = True
self._update(appliance)
self._set_unique_id()
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
name=NAME,
model=VERSION,
manufacturer=self._appliance.bot_name,
)
def _update(self, appliance: Appliance):
self._appliance = appliance
self._attr_available = (
self._appliance.state_settings.connectivity.value == Connectivity.REACHABLE
)
self._attr_name = self._appliance.friendly_name
@callback
def _handle_coordinator_update(self) -> None:
self._update(self.coordinator.data[self.unique_id])
self.async_write_ha_state()
@property
def available(self) -> bool:
return self._attr_available
@staticmethod
def _check_response(rsp: DeviceActionResponse) -> None:
if STATUS_NOT_LOGIN == rsp.status:
LOGGER.error(rsp.msg)
raise ConfigEntryAuthFailed(rsp.msg)
if STATUS_OK != rsp.status:
LOGGER.error(rsp.msg)
raise IntegrationError(rsp.msg)
|
zsy056/dueros-ha
|
custom_components/dueros/entity.py
|
entity.py
|
py
| 2,131 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73340450429
|
import string
import math
# ----------------------------------------------------
# Parameters: None
# Return: polybius_square (string)
# Description: Returns the following polybius square
# as a sequential string:
# [1] [2] [3] [4] [5] [6] [7] [8]
# [1] ! " # $ % & '
# [2] ( ) * + ' - . /
# [3] 0 1 2 3 4 5 6 7
# [4] 8 9 : ; < = > ?
# [5] @ A B C D E F G
# [6] H I J K L M N O
# [7] P Q R S T U V W
# [8] X Y Z [ \ ] ^ _
# ---------------------------------------------------
def get_polybius_square():
polybius_square = ''
for i in range (32,96):
char = str(chr(int(i)))
polybius_square+= char
return polybius_square
# --------------------------------------------------------------
# Parameters: plaintext(string)
# key (none)
# Return: ciphertext (string)
# Description: Encryption using Polybius Square
# --------------------------------------------------------------
def e_polybius(plaintext, key):
ciphertext = ''
sqr = get_polybius_square()
p = plaintext.split('\n')
for plain in p:
for i in plain:
j = sqr.find(i.upper())
num = str(j+11+2*(j//8))
ciphertext+=str(num)
if '8567' in ciphertext:
print("newline found")
ciphertext.replace('8567', '\n')
ciphertext+='\n'
return ciphertext
# ---------------------------------
# Problem 5 #
# ---------------------------------
# -------------------------------------------------------
# Parameters: ciphertext(string)
# key (none)
# Return: plaintext (string)
# Description: Decryption using Polybius Square Cipher
# Detects invalid ciphertext --> print error msg and return ''
# Case 1: #of chars (other than \n) is not even
# Case 2: the ciphertext contains non-numerical chars (except \n')
# -------------------------------------------------------
def d_polybius(ciphertext, key):
plaintext = ''
sqr = get_polybius_square()
text = ciphertext.split("\n")
for x in text:
x = x.strip()
if len(x)%2!=0:
print("Invalid ciphertext! Decryption Failed!")
return ''
if x.isdigit() or x==0:
for i in range(0, len(x),2):
s = int(x[i:i+2])
if s< 19:
a = 0
elif s%10 > 5:
a = 2*round((s-11)/11)
else:
a = 2*round((s-11)/10)
asky = s-11-a
char = sqr[asky]
plaintext +=char
plaintext += '\n'
elif len(x)==0:
plaintext+='\n'
else:
print("Invalid ciphertext! Decryption Failed!")
return plaintext
|
shuaibr/Encryption-and-Security
|
Polybius Square Cipher.py
|
Polybius Square Cipher.py
|
py
| 3,099 |
python
|
en
|
code
| 0 |
github-code
|
6
|
72627151867
|
import sys
import pandas as pd
import pmd
def get_system_info(csv_file, id):
print('ID:', id)
df = pd.read_csv(csv_file)
row = df['ID'] == int(id)
smiles = df.loc[row, 'SMILES'].item()
smiles_solvent = df.loc[row, 'SMILES_solvent'].item()
ratio = df.loc[row, 'Ratio'].item()
return smiles, smiles_solvent, ratio
if __name__ == '__main__':
# Get system ID from command line argument
try:
system_id = sys.argv[1]
except IndexError:
print('System ID is not provided, please pass in ID as an argument')
exit()
# Get system-dependent parameters from the csv file
try:
system_info_csv = 'solvent_diffusivity.csv'
smiles, solvent, ratio = get_system_info(system_info_csv, system_id)
except FileNotFoundError:
raise FileNotFoundError(
'Having trouble getting info from the csv file')
system = pmd.SolventSystem(smiles=smiles,
solvent_smiles=solvent,
ru_nsolvent_ratio=ratio,
density=0.8,
natoms_total=5000,
natoms_per_chain=150,
builder=pmd.PSP(force_field='gaff2-am1bcc'))
lmp = pmd.Lammps(read_data_from=system)
lmp.add_procedure(pmd.Minimization())
lmp.add_procedure(
pmd.Equilibration(Teq=300, Peq=1, Tmax=600, Pmax=49346.163))
lmp.add_procedure(
pmd.NPT(Tinit=300,
Tfinal=300,
Pinit=1,
Pfinal=1,
duration=10000000,
reset_timestep_before_run=True))
lmp.add_procedure(
pmd.MSDMeasurement(T=300,
group=system.solvent_group,
create_block_every=10000000,
duration=200000000,
dump_image=True,
reset_timestep_before_run=True))
job = pmd.Torque(run_lammps=lmp,
jobname=system_id,
project='GT-rramprasad3-CODA20',
nodes=3,
ppn=24,
walltime='72:00:00')
run = pmd.Pmd(system, lmp, job)
run.create(system_id, save_config=True)
|
ritesh001/Polymer-Molecular-Dynamics
|
scripts/Solvent_diffusivity/mkinput_solvent.py
|
mkinput_solvent.py
|
py
| 2,301 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2785491141
|
# 205. Isomorphic Strings
# Input: s = "egg", t = "add"
# Output: true
# Input: s = "foo", t = "bar"
# Output: False
# Input: s = "paper", t = "title"
# Output: true
# I learn to solve this after watching a solution video. so cool!
def isomorphic(s,t):
dict1, dict2 = {},{}
for i in range(len(s)):
c1, c2 = s[i], t[i]
if (c1 in dict1 and dict1[c1] != c2) or (c2 in dict2 and dict2[c2] != c1):
return False
dict1[c1] = c2
dict2[c2] = c1
return True
# s= "paper"
# t = "title"
# s= "foo"
# t="bar"
s = "bbbaaaba"
t = "aaabbbba"
print(isomorphic(s,t))
|
Helenyixuanwang/algos
|
leet_205_isomorphicString.py
|
leet_205_isomorphicString.py
|
py
| 621 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7605645692
|
#two dimensional
nlist=['kumar','krihna priya','athul','Ravi']
deglist=['developer','tester','junior developer','HR']
salary=[54000,40000,30000,55000]
emp={'name':nlist,'Designation':deglist,'Salary':salary}
print(emp['name'][0])
lg=len(emp)
print("Name,Designation,Salary")
for j in range(lg):
print(emp['name'][j],emp['Designation'][j],emp['Salary'][j])
a=12
|
sathu341/pythonWork
|
Pythonworks/dictionary_emp.py
|
dictionary_emp.py
|
py
| 378 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6419648862
|
import time
import mysql.connector as mysql
try:
datebase_sql = mysql.connect(
host="database-1.cqpxsublkhcn.eu-central-1.rds.amazonaws.com",
port=3306,
user="user1",
passwd="1Passw0rd1",
database="QAP-05",
)
except Exception as err:
print(err)
cursor = datebase_sql.cursor(dictionary=True)
group_name = """(
SELECT s.name, s.surname, g.name as group_name, l.book_title, l.return_date FROM students s LEFT JOIN library l ON s.id = l.student
LEFT JOIN `groups` g ON s.id = g.id
)"""
cursor.execute(group_name)
result_table = cursor.fetchall()
count = 0
for students in result_table:
if students["return_date"] != None:
students["return_date"] = time.strftime(
"%B %d, %Y", time.strptime(students["return_date"], "%Y-%m-%d")
)
if students["return_date"] is None:
students["return_date"] = "неизвестного времени"
if students["book_title"] is None:
students["book_title"] = "не брал книгу"
if students["group_name"] is None:
students["group_name"] = "нет информации"
print(
f"Студент {students['name']} {students['surname']} учится в группе {students['group_name']} и взял в библиотеке следующую книгу: {students['book_title']} до {students['return_date']} года"
)
datebase_sql.commit()
datebase_sql.close()
|
APOSHAml/My-pieces-of-code
|
Homework_26/test_sql.py
|
test_sql.py
|
py
| 1,500 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74531247866
|
""" bst.py
Student: P.K Buddhika Chaturanga
Mail: [email protected]
Reviewed by: Tom Smedsaas
Date reviewed: 2021-110-03
"""
from math import log2
from linked_list import LinkedList
class BST:
class Node:
def __init__(self, key, left=None, right=None):
self.key = key
self.left = left
self.right = right
def __iter__(self): # Discussed in the text on generators
if self.left:
yield from self.left
yield self.key
if self.right:
yield from self.right
def __init__(self, root=None):
self.root = root
def __iter__(self): # Discussed in the text on generators
if self.root:
yield from self.root
def insert(self, key):
self.root = self._insert(self.root, key)
def _insert(self, r, key):
if r is None:
return self.Node(key)
elif key < r.key:
r.left = self._insert(r.left, key)
elif key > r.key:
r.right = self._insert(r.right, key)
else:
pass # Already there
return r
def insertX(self, key):
new_node = self.Node(key)
x = self.root
y = None
while x is not None:
# monitoring root
y = x
if key < x.key:
x = x.left
elif key > x.key:
x = x.right
else:
break
if y is None: # -> means tree is empty and new_node will be the root
y = new_node
self.root = y
elif key < y.key: # -> means new_Node will be connected to to left side to current leaf node
y.left = new_node
else:
y.right = new_node
return y
def print(self):
self._print(self.root)
def _print(self, r):
if r:
self._print(r.left)
print(r.key, end=' ')
self._print(r.right)
def contains(self, k):
n = self.root
while n and n.key != k:
if k < n.key:
n = n.left
else:
n = n.right
return n is not None
def _containsX(self, n, k):
if n and n.key == k:
return True
elif n is None:
return False
if k < n.key:
n = n.left
else:
n = n.right
return self._containsX(n, k)
def containsX(self, k):
n = self.root
return self._containsX(n, k)
def size(self):
return self._size(self.root)
def _size(self, r):
if r is None:
return 0
else:
return 1 + self._size(r.left) + self._size(r.right)
# experimental
# def sizeX(self):
# size = 0
# size += (1 for ele in self.root if ele.left or ele.right)
# return size
#
# Methods to be completed
#
def _height(self, r):
if r is None:
return 0
else:
return 1 + max(self._height(r.left), self._height(r.right))
def height(self): # Compulsory
if self.root is None:
return 0
r = self.root
if r.left is None and r.right is None:
return 1
return self._height(r)
def remove(self, key):
self.root = self._remove(self.root, key)
def _remove(self, r, k): # Compulsory
def smalleKeyNode(node):
current = node
while (current.left is not None):
current = current.left
return current
if r is None:
return None
elif k < r.key:
r.left = self._remove(r.left, k)
elif k > r.key:
r.right = self._remove(r.right, k)
else: # This is the key to be removed
if r.left is None: # Easy case
return r.right
elif r.right is None: # Also easy case
return r.left
else: # This is the tricky case.
# Find the smallest key in the right subtree
small_node = smalleKeyNode(r.right)
# Put that key in this node
r.key = small_node.key
# Remove that key from the right subtree
r.right = self._remove(r.right, small_node.key)
return r # Remember this! It applies to some of the cases above
def __str__(self): # Compulsory
if self.root is None:
return '<>'
return '<' + ', '.join(str(ele) for ele in self) + '>'
def to_list(self): # Compulsory
if self.root is None:
return []
lst = [ele for ele in self]
return lst
# complexity of the function below - Theta(n.log(n))
def to_LinkedList(self): # Compulsory
lst = LinkedList()
if self.root is None:
return lst
[lst.insert(ele) for ele in self]
return lst
def _ipl(self, r, lvl):
if r == None:
return 0
else :
return lvl + self._ipl(r.left,lvl+1) + self._ipl(r.right,lvl+1)
''''
r_cnt = 0
l_cnt = 0
if not (r.right or r.left):
return 0
if r.left is not None:
l_cnt += 1
l_cnt *= lvl
l_cnt += self._ipl(r.left, lvl + 1)
if r.right is not None:
r_cnt += 1
r_cnt *= lvl
r_cnt += self._ipl(r.right, lvl + 1)
return r_cnt + l_cnt
'''
def ipl(self): # Compulsory
return self._ipl(self.root, 1)
def random_tree(n): # Useful
import random
bst = BST()
for x in range(n):
bst.insert(random.random())
return bst
'''
def fib(n):
if n <= 1:
return n, n
else:
return fib(n - 1)[0] + fib(n - 2)[0], n
def multi_fib(fr=1, to=18):
from concurrent import futures as f
l = [i for i in range(fr, to + 1)]
with f.ProcessPoolExecutor() as ex:
results = ex.map(fib, l)
d = dict()
for value, key in results:
d[key] = value
print(d)
return d
'''
def main():
t = BST()
for x in [4, 1, 3, 6, 7, 1, 1, 5, 8]:
t.insertX(x)
t.print()
print()
print('size : ', t.size())
for k in [0, 1, 2, 5, 9]:
print(f"contains({k}): {t.containsX(k)}")
n_nodes = [1, 2, 4, 8, 16, 32, 64, 128]
cum_ipl = 0
for n in n_nodes:
bst = random_tree(n)
ipl = bst.ipl()
est_ipl = round(1.39 * n * log2(n), 2)
cum_ipl += ipl
print("BST size : ", bst.size(), " BST height : ", bst.height(), " BST IPL : ", ipl, \
" BST [IPL/n] Avg : ", round(ipl / n, 2), " EST IPL : ", est_ipl, " IPL Δ: ", \
abs(round(ipl - est_ipl, 2)), " Cumulative IPL : ", cum_ipl)
#multi_fib()
if __name__ == "__main__":
main()
"""
What is the generator good for?
==============================
1. computing size?
2. computing height?
3. contains?
4. insert?
5. remove?
generator good for replacing iterator protocol ,hence its so efficient suitable for iterate through current BST
Hence we can use it for Contains , computing height , computing Size,
some cautions :- https://www.python.org/dev/peps/pep-0380/#optimisations
Results for ipl of random trees
===============================
# without O(n) operation - 1:39nlog2(n)
Sample one - number of nodes - [1, 2, 4, 8, 16, 32, 64 , 128]
BST size : 1 BST height : 1 BST IPL : 1 BST [IPL/n] Avg : 1.0 EST IPL : 0.0 IPL Δ: 1.0 Cumulative IPL : 1
BST size : 2 BST height : 2 BST IPL : 3 BST [IPL/n] Avg : 1.5 EST IPL : 2.78 IPL Δ: 0.22 Cumulative IPL : 4
BST size : 4 BST height : 3 BST IPL : 8 BST [IPL/n] Avg : 2.0 EST IPL : 11.12 IPL Δ: 3.12 Cumulative IPL : 12
BST size : 8 BST height : 4 BST IPL : 22 BST [IPL/n] Avg : 2.75 EST IPL : 33.36 IPL Δ: 11.36 Cumulative IPL : 34
BST size : 16 BST height : 8 BST IPL : 70 BST [IPL/n] Avg : 4.38 EST IPL : 88.96 IPL Δ: 18.96 Cumulative IPL : 104
BST size : 32 BST height : 8 BST IPL : 155 BST [IPL/n] Avg : 4.84 EST IPL : 222.4 IPL Δ: 67.4 Cumulative IPL : 259
BST size : 64 BST height : 11 BST IPL : 380 BST [IPL/n] Avg : 5.94 EST IPL : 533.76 IPL Δ: 153.76 Cumulative IPL : 639
BST size : 128 BST height : 16 BST IPL : 1132 BST [IPL/n] Avg : 8.84 EST IPL : 1245.44 IPL Δ: 113.44 Cumulative IPL : 1771
Sample Two number of nodes - [1, 2, 5, 7, 11, 13, 29, 47, 123] Prime nodes
BST size : 1 BST height : 1 BST IPL : 1 BST [IPL/n] Avg : 1.0 EST IPL : 0.0 IPL Δ: 1.0 Cumulative IPL : 1
BST size : 2 BST height : 2 BST IPL : 3 BST [IPL/n] Avg : 1.5 EST IPL : 2.78 IPL Δ: 0.22 Cumulative IPL : 4
BST size : 5 BST height : 3 BST IPL : 11 BST [IPL/n] Avg : 2.2 EST IPL : 16.14 IPL Δ: 5.14 Cumulative IPL : 15
BST size : 7 BST height : 5 BST IPL : 20 BST [IPL/n] Avg : 2.86 EST IPL : 27.32 IPL Δ: 7.32 Cumulative IPL : 35
BST size : 11 BST height : 6 BST IPL : 43 BST [IPL/n] Avg : 3.91 EST IPL : 52.89 IPL Δ: 9.89 Cumulative IPL : 78
BST size : 13 BST height : 7 BST IPL : 61 BST [IPL/n] Avg : 4.69 EST IPL : 66.87 IPL Δ: 5.87 Cumulative IPL : 139
BST size : 29 BST height : 8 BST IPL : 137 BST [IPL/n] Avg : 4.72 EST IPL : 195.83 IPL Δ: 58.83 Cumulative IPL : 276
BST size : 47 BST height : 10 BST IPL : 280 BST [IPL/n] Avg : 5.96 EST IPL : 362.88 IPL Δ: 82.88 Cumulative IPL : 556
BST size : 123 BST height : 14 BST IPL : 890 BST [IPL/n] Avg : 7.24 EST IPL : 1186.96 IPL Δ: 296.96 Cumulative IPL : 1446
Machine - (Intel Corei7 Gen 11 Windows 11 x64)
i > As experimentally shown above when number of numbers exponentially grows up the deviation between theoretical IPL -
calculated IPL getting much deviated even without adding O(n) operation
Bit of disagreement happens when the number of nodes goes too high.
ii > height approximately twice of the value of (Average IPL/n ) h ≈ ipl/n
"""
|
bupa8694/programming2
|
Pyhton_Assignments/MA3/MA3/bst.py
|
bst.py
|
py
| 10,192 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5734551584
|
from time import time
start = time()
def josephus(n):
if n == 0: return False
flag = n%2;
counter = 0
index = n-1
people = [1]*n
while True:
if sum(people) == 1:
return index%n #for pattern finding counter, flag, n]
elif people[index%n] == 1:
temp_index = (index%n)+1
while True:
if people[temp_index%n] == 1:
people[temp_index%n] = 0
break
else:
temp_index += 1
index += 1
if index%n == 0:
counter += 1
pattern = []
for a in range(500):
pattern.append((josephus(a), a))
print(pattern)
print(time()-start)
|
wittwang98/Random
|
Josephus Problem.py
|
Josephus Problem.py
|
py
| 719 |
python
|
en
|
code
| 0 |
github-code
|
6
|
719390909
|
""" mcandecode.py
MCAN Modul
Modul zur Verwaltung, Analyse und Ersstellung
von Maerklin CAN-Bus Meldungen
Author: Rainer Maier-Lohmann
---------------------------------------------------------------------------
"THE BEER-WARE LICENSE" (Revision 42):
<[email protected]> wrote this file. As long as you retain this notice you
can do whatever you want with this stuff. If we meet some day, and you
think this stuff is worth it, you can buy me a beer in return.
---------------------------------------------------------------------------
(c) 2020
"""
from struct import unpack
from os import linesep
from .mcanmsgarray import McanMsgArray
# -----------------------------------------------------------------------------
# Klasse zur Analyse eines Maerklin CANbus Datenframes
# -----------------------------------------------------------------------------
class McanDecode(McanMsgArray):
__commands = { 0 : 'System command'
, 2 : 'MFX Discovery'
, 4 : 'MFX Bind'
, 6 : 'MFX Verify'
, 8 : 'Loc speed'
, 10 : 'Loc direction'
, 12 : 'Loc function'
, 14 : 'Loc read config'
, 16 : 'Loc write config'
, 22 : 'Equipment switch'
, 34 : 'Track state'
, 48 : 'Member ping'
, 58 : 'Statusdata config'
}
__subcmds = { 0 : 'STOPP'
, 1 : 'GO'
, 2 : 'HALT'
, 3 : 'Loc emergency stop'
, 4 : 'Loc end cycle'
, 9 : 'MFX new notifying counter'
, 10 : 'SYSTEM OVERLOAD'
, 11 : 'System status'
, 48 : 'System ???'
}
__cmdType = { 0: 'command'
, 1: 'response'
}
__directions={ 0 : 'remain'
, 1 : 'forward'
, 2 : 'backward'
, 3 : 'switch'
}
__outFormat = ' {:15} {}'
# 1 4 7 10 13 16 19 22 25 28 31 33 36 : Laenge: 37
# 0 3 6 9 12 15 18 21 24 27 30 32 35
# 00 00 cf 52 06 43 54 5a 86 30 01 00 00
# p cr hh hl dl d0 d1 d2 d3 d4 d5 d6 d7
def __init__(self, message):
super().__init__(unpack('13B', message))
self.__command = self.getByte('cmdAndResp') & 0xfe
self.__response = self.getByte('cmdAndResp') & 0x01
def decode(self):
out = str(self)
out += linesep
out += self.__getFormatedOut('Msg-Type:', self.__cmdType[self.__response])
out += self.__getFormatedOut('Command:', self.__decodeCmd())
out += self.__getFormatedOut('DataLenCount:', self.getByte('dlc'))
if self.__isValidCmd():
decodeFuncs = { 0 : self.__decodeCmd00
, 2 : self.__decodeCmd02
, 4 : self.__decodeCmd04
, 6 : self.__decodeCmd06
, 8 : self.__decodeCmd08
, 10 : self.__decodeCmd10
, 12 : self.__decodeCmd12
, 14 : self.__decodeCmd14
, 16 : self.__decodeCmd16
, 22 : self.__decodeCmd22
, 34 : self.__decodeCmd34
, 48 : self.__decodeCmd48
, 58 : self.__decodeCmd58
}
out += decodeFuncs[self.__command]()
return out
def __isValidCmd(self):
return self.__command in self.__commands.keys()
def __isValidSubCmd(self):
return self.getByte('d4') in self.__subcmds.keys()
def __getFormatedOut(self, msg, val):
return self.__outFormat.format(msg, val) + linesep
def __getFormatedIdOut(self):
deviceId = '{:02x}'.format(self.getByte('d0'))
deviceId += ' {:02x}'.format(self.getByte('d1'))
deviceId += ' {:02x}'.format(self.getByte('d2'))
deviceId += ' {:02x}'.format(self.getByte('d3'))
return self.__getFormatedOut('Device-ID:', deviceId)
def __decodeCmd00(self):
out = self.__getFormatedIdOut()
if self.__isValidSubCmd():
subCmd = self.getByte('d4')
subCmdB = self.getByte('d4').to_bytes(1, 'little')
subCmdName = '{} ({}) {}'.format(subCmd, subCmdB.hex(), self.__subcmds[subCmd])
out += self.__getFormatedOut('Subcommand:',subCmdName)
decodeFuncs = { 0 : self.__decodeCmd00Sub00
, 1 : self.__decodeCmd00Sub01
, 2 : self.__decodeCmd00Sub02
, 3 : self.__decodeCmd00Sub03
, 4 : self.__decodeCmd00Sub04
, 9 : self.__decodeCmd00Sub09
, 10 : self.__decodeCmd00Sub0a
, 11 : self.__decodeCmd00Sub0b
, 48 : self.__decodeCmd00Sub30
}
out += decodeFuncs[subCmd]()
else:
out += self.__getFormatedOut('Subcommand:','{} ({}) unknown subcommand'.format(subCmd, subCmdB.hex()))
return out
def __decodeCmd00Sub00(self):
return ''
def __decodeCmd00Sub01(self):
return ''
def __decodeCmd00Sub02(self):
return ''
def __decodeCmd00Sub03(self):
return ''
def __decodeCmd00Sub04(self):
return ''
def __decodeCmd00Sub09(self):
return self.__getFormatedOut('NN-counter:','{} {}'.format(self.getByte('d5')
,self.getByte('d6')))
def __decodeCmd00Sub0a(self):
return self.__getFormatedOut('Chanel:',self.getByte('d5'))
def __decodeCmd00Sub0b(self):
return self.__getFormatedOut('Chanel:',self.getByte('d5'))
def __decodeCmd00Sub30(self):
return self.__getFormatedOut('Value:',self.getByte('d5'))
def __decodeCmd02(self):
out = ''
if self.__dlc in [1,5,6]:
decodeFuncs = { 1 : self.__decodeCmd02Dlc01
, 5 : self.__decodeCmd02Dlc05
, 6 : self.__decodeCmd02Dlc06
}
out += decodeFuncs[self.__dlc]()
else:
out += self.__getFormatedOut('Subcommand:','unknown datalength for this subcommand')
return out
def __decodeCmd02Dlc01(self):
out = ''
if self.__d0 == 33:
out += self.__getFormatedOut('Protocol:','MM2')
else:
if self.__d0 < 33:
out += self.__getFormatedOut('Protocol:','MFX-ProgRail - {:02x}'.format(self.getByte('d0')))
else:
out += self.__getFormatedOut('Protocol:','MFX-MainRail - {:02x}'.format(self.getByte('d0')))
return out
def __decodeCmd02Dlc05(self):
out = self.__getFormatedIdOut()
if self.__d4 == 33:
out += self.__getFormatedOut('Protocol:','MM2')
else:
if self.__d4 < 33:
out += self.__getFormatedOut('Protocol:','MFX-ProgRail - {:02x}'.format(self.getByte('d4')))
else:
out += self.__getFormatedOut('Protocol:','MFX-MainRail - {:02x}'.format(self.getByte('d4')))
return out
def __decodeCmd02Dlc06(self):
out = self.__decodeCmd02Dlc05()
out += self.__getFormatedOut('Signal-Quality:',self.__bToHex(self.__d5))
return out
def __decodeCmd04(self):
out = self.__getFormatedIdOut()
return out
def __decodeCmd06(self):
out = self.__getFormatedIdOut()
return out
def __decodeCmd08(self):
out = self.__getFormatedIdOut()
return out
def __decodeCmd10(self):
out = self.__getFormatedIdOut()
return out
def __decodeCmd12(self):
out = self.__getFormatedIdOut()
return out
def __decodeCmd14(self):
out = self.__getFormatedIdOut()
return out
def __decodeCmd16(self):
out = self.__getFormatedIdOut()
return out
def __decodeCmd22(self):
out = self.__getFormatedIdOut()
out += self.__getFormatedOut('Position:', self.getByte('d4'))
out += self.__getFormatedOut('Strom:', self.getByte('d5'))
if self.getByte('dlc') == 8:
out += self.__getFormatedOut('Schaltzeit:', '{} {}'.format(self.getByte('d6'), self.getByte('d7')))
return out
def __decodeCmd34(self):
out = self.__getFormatedIdOut()
out += self.__getFormatedOut('device:', self.getByte('d0')*256 + self.getByte('d1'))
out += self.__getFormatedOut('contact:', self.getByte('d2')*256 + self.getByte('d3'))
out += self.__getFormatedOut('state(recent):', self.getByte('d4'))
out += self.__getFormatedOut('state:', self.getByte('d5'))
return out
def __decodeCmd48(self):
out = self.__getFormatedIdOut()
if self.__response != 1:
pass
else:
out += self.__getFormatedOut('SW-Version:', '{}.{}'.format(self.getByte('d4'), self.getByte('d5')))
out += self.__getFormatedOut('DB-Version:', '{}.{}'.format(self.getByte('d6'), self.getByte('d7')))
return out
def __decodeCmd58(self):
dlc = self.getByte('dlc')
if dlc == 5 or dlc == 6:
return self.__getFormatedIdOut()
def __decodeCmd(self):
out = '{0} ({0:02x}) - '.format(self.__command)
if self.__isValidCmd():
out += self.__commands[self.__command]
else:
out += 'unknown command'
return out
|
rml60/mcan
|
mcan/mcandecode.py
|
mcandecode.py
|
py
| 8,968 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11630342993
|
import os
"""
The os functions that we are given are:
os.path.getsize(path)
os.path.isfile(path)
os.listdir(path)
os.path.join(path, filename)
"""
def disk_usage_tail_recursion(path, size_so_far=0):
"""
Tail recursion implementation of the disk usage function.
"""
size_so_far += os.path.getsize(path)
# 1- base case
if os.path.isfile(path):
print('{0:<7}'.format(size_so_far), path)
return size_so_far
# 2- solve sub-problems step
for child in os.listdir(path):
child_path = os.path.join(path, child)
size_so_far = disk_usage_tail_recursion(child_path, size_so_far)
# 3- combine sub-solutions
# no need
print('{0:<7}'.format(size_so_far), path)
return size_so_far
if __name__ == '__main__':
disk_usage_tail_recursion('/Users/junior/geekgap/Repos/geekgap_webinars')
|
geekgap-io/geekgap_webinars
|
geekgap_webinars/notebooks/webinar_2/disk_usage_tail_recursion.py
|
disk_usage_tail_recursion.py
|
py
| 863 |
python
|
en
|
code
| 1 |
github-code
|
6
|
24252533109
|
from phrase import Phrase
import random
class Game:
def __init__(self):
self.missed = 0
self.phrases = self.create_phrases()
self.active_phrase = self.get_random_phrase()
self.guesses = []
def create_phrases(self):
phrases = [Phrase("Most things that never get done never get done because they never get started"), Phrase("Discipline equals freedom"), Phrase("Do what you can with all you have wherever you are"), Phrase("Get after it"), Phrase("Up and Rock and Roll")]
return phrases
def get_random_phrase(self):
return random.choice(self.phrases)
def welcome(self):
print("="*60+"""
*** WELCOME TO NIKOLAI'S MOTIVATIONAL PHRASE HUNTER 2020 ***
"""+"="*60, "\nRULES ===> You've got 5 tries to guess the phrase.\nPlease enter 1 letter at a time.\n")
def get_guess(self):
while True:
user_guess = (input("Please enter a letter: ")).lower()
if not user_guess.isalpha():
print("That's not a valid selection. Please enter a letter.")
elif len(user_guess) != 1:
print("Please enter one letter at a time.")
else:
return user_guess
def start(self):
self.welcome()
self.active_phrase.display(self.guesses)
while not self.missed >= 5:
print(f"*** Number missed: {self.missed} *** \n")
user_guess = self.get_guess()
self.guesses.append(user_guess)
if self.active_phrase.check_guess(user_guess):
print("YAY!\n")
self.active_phrase.display(self.guesses)
if self.active_phrase.check_complete(self.guesses):
print("CONGRATS! You win!\n")
break
if not self.active_phrase.check_guess(user_guess):
self.missed += 1
print("\nBummer :(\n")
if self.missed == 5:
print("You lost. Please play again!\n")
|
Nikolai-O/python-techdegree-project3
|
game.py
|
game.py
|
py
| 2,068 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16432205393
|
# IndicatosStrategy
class IndicatorStrategy(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2020, 1, 1)
self.SetEndDate(2021, 1, 1)
self.SetCash(10000)
self.spy = self.AddEquity("SPY", Resolution.Daily).Symbol
self.sma = CustomSimpleMovingAverage("CustomSMA", 30)
self.RegisterIndicator(self.spy, self.sma, Resolution.Daily)
#self.sma = self.SMA(self.spy, 30, Resolution.Daily) # 30 perod Simple Moving Average
#closing_prices = self.History(self.spy, 30, Resolution.Daily)['close']
# this get the simple moving average for the days before the start data so it would be ready from the begining
#for time, price in closing_prices.loc[self.spy].items():
# self.sma.Update(time, price)
def OnData(self, data):
if not self.sma.IsReady:
return
# get historical data for 1 year
hist = self.History(self.spy, timedelta(365), Resolution.Daily)
low = min(hist["low"]) # get the low of that period
high = max(hist["high"]) # get the maximun value of that period
price = self.Securities[self.spy].Price
# is the spy price is within 5% of the 365 day high and the price is above the simple moving average?
if price * 1.05 >= high and self.sma.Current.Value < price:
# if yest check if we have a open position
if not self.Portfolio[self.spy].IsLong:
# if you don't have open posittion invest 100% of the portfolio
self.SetHoldings(self.spy, 1)
# if the price is 5% of the low and the SMA is above the current price
elif price * 0.95 <= low and self.sma.Current.Value > price:
if not self.Portfolio[self.spy].IsShort:
self.SetHoldings(self.spy, -1)
else:
self.Liquidate()
self.Plot("Benchmark", "52w-High", high)
self.Plot("Benchmark", "52w-low", low)
self.Plot("Benchmark", "SMA", self.sma.Current.Value)
|
sotoblanco/QuantConnectTrading
|
IndicatosStrategy.py
|
IndicatosStrategy.py
|
py
| 2,046 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75241220986
|
"""
Author: Tyler Wagner
Date Created: 7-21-23
Edited By: Tyler Van Pelt
Edited On: 7-28-23
"""
import tkinter as tk
def draw_decision_tree(canvas, node, x, y, depth=0):
if node is None:
return
# Draw the current node
if node.attribute is not None and node.threshold is not None:
node_size = 30
canvas.create_oval(x - node_size, y - node_size, x + node_size, y + node_size, fill="blue")
text_x = x - 30
text_y = y - 10
# Convert node.threshold to float if it is numeric
if node.threshold.replace('.', '', 1).isdigit():
node.threshold = float(node.threshold)
canvas.create_text(text_x, text_y, text=f'Attr: {node.attribute}\nThresh: {node.threshold:.2f}', fill="black")
else:
canvas.create_text(text_x, text_y, text=f'Attr: {node.attribute}\nThresh: {node.threshold}', fill="black")
if node.left is not None:
x_left = x - 100 / (2 ** depth)
y_left = y + 60
canvas.create_line(x, y, x_left, y_left, fill="red")
draw_decision_tree(canvas, node.left, x_left, y_left, depth + 1)
if node.right is not None:
x_right = x + 100 / (2 ** depth)
y_right = y + 60
canvas.create_line(x, y, x_right, y_right, fill="blue")
draw_decision_tree(canvas, node.right, x_right, y_right, depth + 1)
def display_results_window(predictions_test, predictions_new_data, root_node):
window = tk.Tk()
window.title("Decision Tree Results")
canvas = tk.Canvas(window, width=800, height=600, bg="white")
canvas.pack()
# Test data predictions
test_result_label = tk.Label(window, text="Predicted class labels for test data:")
test_result_label.pack()
test_results_var = tk.StringVar()
test_results_var.set(predictions_test)
test_results_label = tk.Label(window, textvariable=test_results_var)
test_results_label.pack()
# New data predictions
new_data_result_label = tk.Label(window, text="Predicted class labels for new data:")
new_data_result_label.pack()
new_data_results_var = tk.StringVar()
new_data_results_var.set(predictions_new_data)
new_data_results_label = tk.Label(window, textvariable=new_data_results_var)
new_data_results_label.pack()
# Draw the decision tree
draw_decision_tree(canvas, root_node, 400, 40)
window.mainloop()
|
Tyler-Wagner/Programming-Assignment-2
|
GUI.py
|
GUI.py
|
py
| 2,389 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6446901297
|
from telemetry.page import page as page_module
from telemetry import story
class StartedPage(page_module.Page):
def __init__(self, url, startup_url, page_set):
super(StartedPage, self).__init__(
url=url, page_set=page_set, startup_url=startup_url)
self.archive_data_file = 'data/startup_pages.json'
def RunNavigateSteps(self, action_runner):
action_runner.Wait(10)
def RunPageInteractions(self, action_runner):
self.RunNavigateSteps(action_runner)
class StartupPagesPageSet(story.StorySet):
""" Pages for testing starting Chrome with a URL.
Note that this file can't be used with record_wpr, since record_wpr requires
a true navigate step, which we do not want for startup testing. Instead use
record_wpr startup_pages_record to record data for this test.
"""
def __init__(self):
super(StartupPagesPageSet, self).__init__(
archive_data_file='data/startup_pages.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
# Typical page.
self.AddStory(StartedPage('about:blank', 'about:blank', self))
# Typical page.
self.AddStory(StartedPage('http://bbc.co.uk', 'http://bbc.co.uk', self))
# Horribly complex page - stress test!
self.AddStory(StartedPage(
'http://kapook.com', 'http://kapook.com', self))
|
danrwhitcomb/Monarch
|
tools/perf/page_sets/startup_pages.py
|
startup_pages.py
|
py
| 1,298 |
python
|
en
|
code
| 5 |
github-code
|
6
|
25687467876
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_channel, out_channel, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channel)
self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.shortcut = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channel)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResFeatureNet(nn.Module):
def __init__(self):
super().__init__()
self.f1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=7, stride=1, padding=3),
nn.BatchNorm2d(32),
nn.ReLU())
self.res1 = BasicBlock(32, 64, stride=2)
self.res2 = BasicBlock(64, 128, stride=2)
self.res3 = BasicBlock(128, 128, stride=2)
self.res4 = BasicBlock(128, 256, stride=2)
# self.res5 = BasicBlock(512, 1024, stride=2)
self.flatten = nn.Flatten()
def forward(self, x):
o = self.f1(x)
o = self.res1(o)
o = self.res2(o)
o = self.res3(o)
o = self.res4(o)
# o = self.res5(o)
o = self.flatten(o)
return o
class FeatureNet(nn.Module):
def __init__(self):
super(FeatureNet, self).__init__()
self.f = nn.Sequential(
nn.Conv2d(1, 24, kernel_size=7, stride=1, padding=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(24, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(64, 96, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(96, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Flatten(), # flatten directly without bottleneck
)
def forward(self, x):
out = self.f(x)
return out
class MetricNet(nn.Module):
def __init__(self, in_dim=4096, hidden_size=512):
super(MetricNet, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_dim * 2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 2)
)
def forward(self, x):
return self.fc(x)
class Projection(nn.Module):
def __init__(self, in_dim=4096, hidden_size=1024):
super(MetricNet, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_dim, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size)
)
def forward(self, x):
return self.fc(x)
if __name__ == "__main__":
x = torch.randn(1, 1, 64, 64)
# m = FeatureNet()
m = ResFeatureNet()
o = m(x)
print(o.size())
from utils import cal_parameters
print(cal_parameters(m))
|
p3i0t/task2
|
models.py
|
models.py
|
py
| 3,506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7170796324
|
# Answer to Apple and Orange
# https://www.hackerrank.com/challenges/apple-and-orange/problem
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the countApplesAndOranges function below.
def countApplesAndOranges(s, t, a, b, apples, oranges):
apple, orange = 0, 0
for i in range(len(apples)):
apples[i] += a
for i in range(len(oranges)):
oranges[i] += b
for i in apples:
if i >= s and i <= t:
apple += 1
for i in oranges:
if i >= s and i <= t:
orange += 1
print(apple,orange,sep='\n')
if __name__ == '__main__':
st = input().split()
s = int(st[0])
t = int(st[1])
ab = input().split()
a = int(ab[0])
b = int(ab[1])
mn = input().split()
m = int(mn[0])
n = int(mn[1])
apples = list(map(int, input().rstrip().split()))
oranges = list(map(int, input().rstrip().split()))
countApplesAndOranges(s, t, a, b, apples, oranges)
|
CompetitiveCode/hackerrank-python
|
Practice/Algorithms/Implementation/Apple and Orange.py
|
Apple and Orange.py
|
py
| 993 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32197951073
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver=webdriver.Chrome(executable_path='./driver/chromedriver')
driver.get('https://web.whatsapp.com/')
input("please scan qr code and press any key to continue:")
RM=driver.find_element_by_css_selector('span[title="Assignments & CT"]')
RM.click()
testinput=driver.find_element_by_xpath("/html/body/div/div[1]/div[1]/div[4]/div[1]/footer/div[1]/div[2]/div/div[1]/div/div[2]")
time.sleep(10)
testinput.send_keys("Hello friends")
testinput.send_keys(Keys.RETURN)
|
AbhayPal005/Whatsaap-Automation-Using-Selenium
|
chrome_driver_windows.py
|
chrome_driver_windows.py
|
py
| 575 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70064084669
|
from django.conf.urls import url
from tests import views, exceptions
urlpatterns = [
url(r'^snippets/$', views.SnippetList.as_view(), name='snippet-list'),
url(r'^snippets2/$', views.SnippetList.as_view(), name='snippet2-list'),
url(r'^snippet/(?P<pk>\d+)/$', views.SnippetDetail.as_view(),
name='snippet-detail'),
url(r'^server_error/$', exceptions.server_error, name='server-error'),
url(r'^not_found/$', exceptions.not_found, name='not-found'),
url(r'^method_not_allowed/$', exceptions.method_not_allowed,
name='not-allowed'),
url(r'^not_authenticated/$', exceptions.not_authenticated,
name='not-authenticated'),
]
|
FutureMind/drf-friendly-errors
|
tests/urls.py
|
urls.py
|
py
| 673 |
python
|
en
|
code
| 129 |
github-code
|
6
|
26693503665
|
import pytorch_lightning as pl
import pandas as pd
import torch
from torch import nn
from torch.utils.data import DataLoader
from sklearn.metrics import cohen_kappa_score
from transformers import AutoTokenizer, RobertaForSequenceClassification
from torch.utils.data import Dataset
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import (LearningRateMonitor,
EarlyStopping,
ModelCheckpoint,
TQDMProgressBar)
from pytorch_lightning import seed_everything
import wandb
import click
def kappa(y, yhat):
y = y.cpu().numpy()
yhat = yhat.cpu().numpy()
return cohen_kappa_score(y, yhat, weights="quadratic")
class SmilesDataset(Dataset):
def __init__(self,
filename,
load_labels=True
):
self.load_labels = load_labels
# Contains columns: Id, smiles, sol_category
self.df = pd.read_csv(filename)
self.smiles = (self.df["smiles"].values.tolist())
if self.load_labels:
self.labels = self.df["sol_category"].values
self.point_id = self.df["Id"].values
# Need to override methods __len__ and __getitem__
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
smiles = self.smiles[idx]
if "Id" in self.df.columns:
ids = self.point_id[idx]
if self.load_labels:
labels = torch.as_tensor(self.labels[idx])
return smiles, labels, idx, ids
else:
return smiles, idx, ids
else:
if self.load_labels:
labels = torch.as_tensor(self.labels[idx])
return smiles, labels, idx
else:
return smiles, idx
class ChemBERTa(pl.LightningModule):
def __init__(self,
size,
num_classes,
data_dir,
learning_rate=1e-3,
batch_size=300,
dropout=0.3,
weights=True,
file_template="split_{}.csv",
):
super().__init__()
# Define loss function:
if weights:
print("*** training with weighted loss ***")
self.Loss = nn.CrossEntropyLoss(weight=torch.Tensor([0.9711, 0.9599, 0.068]),
reduction='mean')
else:
print("*** training WITHOUT weights ***")
self.Loss = nn.CrossEntropyLoss(reduction='mean')
# Data loading variables
self.num_workers = 4*torch.cuda.device_count() # 8
self.batch_size = batch_size
# Data paths
self.data_dir = data_dir
self.train_file = file_template.format("train")
self.valid_file = file_template.format("valid")
self.test_file = "test.csv"
# Model specific variables
self.learning_rate = learning_rate
# Define PyTorch model
self.pretrained = "DeepChem/ChemBERTa-10M-MTR" #DeepChem/ChemBERTa-77M-MTR
self.tokenizer = (AutoTokenizer.
from_pretrained(
self.pretrained
))
self.model = (RobertaForSequenceClassification
.from_pretrained(
self.pretrained,
num_labels=num_classes
))
def forward(self, x):
# define prediction/inference actions
x = self.tokenizer(list(x),
return_tensors="pt",
padding=True)
x = {key: x[key].to("cuda:0")
for key in x.keys()}
return self.model(**x).logits
def training_step(self, batch, batch_idx):
# define train loop
x, y, idxs, p_ids = batch
logits = self(x)
loss = self.Loss(logits, y)
self.log(f"train_loss", loss, on_epoch=True, on_step=False)
return loss
def validation_step(self, batch, batch_idx):
x, y, idxs, p_ids = batch
logits = self(x)
pred = nn.Softmax(dim=1)(logits)
pred = torch.argmax(pred, dim=1)
kap = kappa(y, pred)
self.log(f"valid_kappa", kap, on_epoch=True, on_step=False, prog_bar=True)
def test_step(self, batch, batch_idx):
x, idxs, p_ids = batch
logits = self(x)
pred = nn.Softmax(dim=1)(logits)
pred = torch.argmax(pred, dim=1).cpu().numpy()
return pd.DataFrame(list(zip(p_ids, pred)))
def test_epoch_end(self, outputs):
# Concat all test results
print(outputs)
all_outs = pd.concat(outputs)
print(all_outs)
all_outs.columns = ["Id", "pred"]
all_outs.to_csv(f"Chemberta_train.csv", index=False)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(),
lr=self.learning_rate)
lr_scheduler = {
"scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode="max",
factor=0.3,
patience=1,
cooldown=0,
verbose=True
),
"monitor": "valid_kappa"
}
return [optimizer], [lr_scheduler]
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
self.train_data = SmilesDataset(self.data_dir + self.train_file)
self.val_data = SmilesDataset(self.data_dir + self.valid_file)
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.test_data = SmilesDataset(self.data_dir + self.test_file,
load_labels=False)
def train_dataloader(self):
return DataLoader(self.train_data,
batch_size=self.batch_size,
num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.test_data,
batch_size=2000,
num_workers=self.num_workers)
def val_dataloader(self):
return DataLoader(self.val_data,
batch_size=2000,
num_workers=self.num_workers)
#############################################################
@click.command()
@click.option("--size", type=int, default=300)
@click.option("--num_classes", type=int, default=3)
@click.option("--max_epochs", type=int, default=50)
@click.option("--data_dir", type=str, default="../../data/")
@click.option("--learning_rate", type=float, default=1e-3)
@click.option("--batch_size", type=int, default=30)
@click.option("--weights", is_flag=True)
def main(size,
num_classes,
max_epochs,
data_dir,
learning_rate,
batch_size,
weights=True
):
"""
Train and evaluate model
"""
seed = 0
seed_everything(seed, workers=True)
wandb.init(project="solubility_prediction")
model = ChemBERTa(
size=size,
num_classes=num_classes,
data_dir=data_dir,
learning_rate=learning_rate,
batch_size=batch_size,
weights=True
)
wandb_logger = WandbLogger()
wandb.watch(model)
checkpoint_callback = ModelCheckpoint(dirpath="models/checkpoint/",
filename="best",
save_last=False,
save_top_k=1,
monitor="valid_kappa",
mode="max")
earlystop_callback = EarlyStopping(monitor="valid_kappa",
mode="max",
patience=3,
min_delta=0.001,
verbose=True)
trainer = pl.Trainer(
accelerator="auto",
devices=1 if torch.cuda.is_available() else None,
max_epochs=max_epochs,
callbacks=[TQDMProgressBar(refresh_rate=5),
LearningRateMonitor(logging_interval="epoch"),
#earlystop_callback,
checkpoint_callback,
],
logger=wandb_logger,
deterministic=True
)
# Train
trainer.fit(model)
# Save model
torch.save(model.state_dict(), 'models/checkpoint/last_weights.pth')
# Test model
trainer.test(ckpt_path="best")
if __name__ == "__main__":
main()
|
maltefranke/solubility_prediction
|
models/ChemBERTa/chemberta10M.py
|
chemberta10M.py
|
py
| 8,935 |
python
|
en
|
code
| 1 |
github-code
|
6
|
46046555266
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.template import Template, Context
from django.utils.html import mark_safe
from hooks.templatehook import hook
from hooks.templatetags.hooks_tags import template_hook_collect
from . import utils_hooks
class HookTagTest(TestCase):
def setUp(self):
self.hook_name = 'myhook'
hook.unregister_all(self.hook_name)
utils_hooks.myhook.unregister_all()
def test_hook_tag(self):
def func(context, *args, **kwargs):
self.assertEqual(args, ("foobar", ))
self.assertEqual(kwargs, {'bar': "bar", })
self.assertEqual(context['foo'], "foo")
return "hello"
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' bar='bar' %}"
).render(Context({"hook_name": self.hook_name, "foo": "foo", }))
self.assertEqual(out, u"hello")
def test_hook_tag_many(self):
"""
Should join multiple responses
"""
def func_a(*args, **kwargs):
return "hello"
def func_b(*args, **kwargs):
return "goodbye"
hook.register(self.hook_name, func_a)
hook.register(self.hook_name, func_b)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "hello\ngoodbye")
def test_hook_tag_escaped(self):
"""
Should escape responses (if they are not marked as safe)
"""
def func(*args, **kwargs):
return "<span>hello</span>"
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "<span>hello</span>")
def test_hook_tag_mark_safe(self):
"""
Should not escape safe strings
"""
def func(*args, **kwargs):
return mark_safe("<span>hello</span>")
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "<span>hello</span>")
def test_template_hook_collect(self):
def func(context, *args, **kwargs):
self.assertEqual(context, "context")
self.assertEqual(args, ("foo", ))
self.assertEqual(kwargs, {'extra': "bar", })
return "hello"
utils_hooks.myhook.register(func)
res = template_hook_collect(utils_hooks, 'myhook', "context", "foo", extra="bar")
self.assertEqual(res, u"hello")
res = template_hook_collect(utils_hooks, 'badhook')
self.assertEqual(res, u"")
def test_template_hook_collect_escaped(self):
def func(*args, **kwargs):
return "<span>hello</span>"
utils_hooks.myhook.register(func)
res = template_hook_collect(utils_hooks, 'myhook', "context", "foo", extra="bar")
self.assertEqual(res, "<span>hello</span>")
|
nitely/django-hooks
|
hooks/tests/tests_templatetags.py
|
tests_templatetags.py
|
py
| 3,337 |
python
|
en
|
code
| 16 |
github-code
|
6
|
9002769780
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 10:21:15 2019
This is the modl with Keras framework
@author: ago
"""
from __future__ import print_function
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from IPython.display import display, HTML
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Reshape
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.layers import LSTM
# Set some standard parameters upfront
pd.options.display.float_format = '{:.1f}'.format
sns.set() # Default seaborn look and feel
plt.style.use('ggplot')
print('keras version ', keras.__version__)
# Same labels will be reused throughout the program
LABELS = ['Frequency','V RMS','I ph.angle','V ph.angle','I RMS']
# The number of steps within one time segment
TIME_PERIODS = 80
# The steps to take from one segment to the next; if this value is equal to
# TIME_PERIODS, then there is no overlap between the segments
STEP_DISTANCE = 40
def read_data(file_path):
columns = ['Time','value a','value b','label']
df = pd.read_csv(file_path,
header=None,
names=columns)
# will show up as NAN
df.dropna(axis=0, how='any', inplace=True)
return df
def convert_to_float(x):
try:
return np.float(x)
except:
return np.nan
def show_basic_dataframe_info(dataframe):
# Shape and how many rows and columns
print('Number of columns in the dataframe: %i' % (dataframe.shape[1]))
print('Number of rows in the dataframe: %i\n' % (dataframe.shape[0]))
# Load data set containing all the data from csv
df = verticalStack
# Describe the data
show_basic_dataframe_info(df)
df.head(20)
# Define column name of the label vector
LABEL = 'label'
# Transform the labels from String to Integer via LabelEncoder
le = preprocessing.LabelEncoder()
# Add a new column to the existing DataFrame with the encoded values
df[LABEL] = le.fit_transform(df['label'].values.ravel())
RANDOM_SEED =50
N_TIME_STEPS = 200
N_FEATURES = 2
classes= 4
step = 1
segments = []
labels = []
for i in range(1, len(df) - N_TIME_STEPS, step):
x1 = df['value a'].values[i: i + N_TIME_STEPS]
x2 = df['value b'].values[i: i + N_TIME_STEPS]
label = stats.mode(df['label'][i: i + N_TIME_STEPS])[0][0]
segments.append([x1,x2])
labels.append(label)
reshaped_segments = np.asarray(segments, dtype= np.float32).reshape(-1, N_TIME_STEPS, N_FEATURES)
labels = np.asarray(pd.get_dummies(labels), dtype = np.float32)
X_train, X_test, y_train, y_test = train_test_split(reshaped_segments, labels, test_size=0.2, random_state=RANDOM_SEED)
print('x_train shape: ', X_train.shape)
print( X_train.shape[0], 'training samples')
print('y_train shape: ', y_train.shape)
model = Sequential()
model.add(LSTM(200, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(0.3))
# model.add(LSTM(70))
# model.add(Dropout(0.3))
model.add(Dense(classes))
callbacks_list = [
keras.callbacks.ModelCheckpoint(
filepath='best_model.{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss', save_best_only=True),
keras.callbacks.EarlyStopping(monitor='acc', patience=1)
]
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
print(model.summary())
# Hyper-parameters
BATCH_SIZE = 1024
EPOCHS =10
# Enable validation to use ModelCheckpoint and EarlyStopping callbacks.
history = model.fit(X_train,
y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=callbacks_list,
validation_split=0.2,
verbose=1)
plt.figure(figsize=(6, 4))
plt.plot(history.history['acc'], 'r', label='Accuracy of training data')
plt.plot(history.history['val_acc'], 'b', label='Accuracy of validation data')
plt.plot(history.history['loss'], 'r--', label='Loss of training data')
plt.plot(history.history['val_loss'], 'b--', label='Loss of validation data')
plt.title('Model Accuracy and Loss')
plt.ylabel('Accuracy and Loss')
plt.xlabel('Training Epoch')
plt.ylim(0)
plt.legend()
plt.show()
def show_confusion_matrix(validations, predictions):
matrix = metrics.confusion_matrix(validations, predictions)
plt.figure(figsize=(6, 4))
sns.heatmap(matrix,
cmap='coolwarm',
linecolor='white',
linewidths=1,
xticklabels=LABELS,
yticklabels=LABELS,
annot=True,
fmt='d')
plt.title('Confusion Matrix')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()
y_pred_test = model.predict(X_test)
# Take the class with the highest probability from the test predictions
max_y_pred_test = np.argmax(y_pred_test, axis=1)
max_y_test = np.argmax(y_test, axis=1)
show_confusion_matrix(max_y_test, max_y_pred_test)
print(classification_report(max_y_test, max_y_pred_test))
|
Dirbas/PMU_classifier
|
Keras_PMU.py
|
Keras_PMU.py
|
py
| 5,451 |
python
|
en
|
code
| 2 |
github-code
|
6
|
12814349068
|
import itertools
import matplotlib.pyplot as plt
import numpy as np
def get_mtot(event_jets):
all_px = sum([j.px ** 2 for j in event_jets])
all_py = sum([j.py ** 2 for j in event_jets])
all_pz = sum([j.pz ** 2 for j in event_jets])
all_e = sum([j.e for j in event_jets])
if all_e ** 2 - all_px - all_py - all_pz >= 0:
return (all_e ** 2 - all_px - all_py - all_pz) ** 0.5
else:
raise Exception('Bad MTot: all_e=%d, all_px=%d, all_py=%d, all_pz=%d'.format(all_e, all_px, all_py, all_pz))
def get_mjj(event_jets):
"""
The 2 first jets are the leading jets
:param event_jets:
:return: The mjj for the 2 leading jets
"""
e = event_jets[0].e + event_jets[1].e
px = event_jets[0].px + event_jets[1].px
py = event_jets[0].py + event_jets[1].py
pz = event_jets[0].pz + event_jets[1].pz
return (e ** 2 - px ** 2 - py ** 2 - pz ** 2) ** 0.5
def get_mjj_all_pairs(event_jets):
mjj_all_pairs = []
for pair in itertools.product(event_jets, repeat=2):
(jo, jt) = pair
e = jo.e + jt.e
px = jo.px + jt.px
py = jo.py + jt.py
pz = jo.pz + jt.pz
if (e ** 2 - px ** 2 - py ** 2 - pz ** 2) >= 0:
mjj_all_pairs += [(e ** 2 - px ** 2 - py ** 2 - pz ** 2) ** 0.5]
else:
raise Exception('Bad Mjj: e=%d, px=%d, py=%d, pz=%d'.format(e, px, py, pz))
return mjj_all_pairs
def get_lead_pt(event_jets):
return event_jets[0].pt
def get_nj(event_jets):
return len(event_jets)
def get_mht(event_jets, pt_cutoff=30, eta_cutoff=5):
all_px = np.array([jet.px for jet in event_jets if (jet.pt > pt_cutoff and jet.eta < eta_cutoff)])
all_py = np.array([jet.py for jet in event_jets if (jet.pt > pt_cutoff and jet.eta < eta_cutoff)])
return sum(np.square(all_px) + np.square(all_py)) ** 0.5
def get_ht(event_jets, pt_cutoff=30, eta_cutoff=2.5):
all_px = np.array([jet.px for jet in event_jets if (jet.pt > pt_cutoff and jet.eta < eta_cutoff)])
all_py = np.array([jet.py for jet in event_jets if (jet.pt > pt_cutoff and jet.eta < eta_cutoff)])
return sum(np.square(all_px) + np.square(all_py)) ** 0.5
def get_meff(event_jets):
all_px = np.array([jet.px for jet in event_jets])
all_py = np.array([jet.py for jet in event_jets])
return sum(jet.pt for jet in event_jets) + (sum(np.square(all_px) + np.square(all_py)))**0.5
def plot_histogram(data, x_label, y_label, color='b'):
plt.figure()
plt.hist(data, bins=50, facecolor=color, alpha=0.2)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
def plot_scatter(x, y, x_label, y_label):
plt.figure()
plt.scatter(x, y)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
def get_m1(event_jets):
jet1 = event_jets[0]
return (jet1.e ** 2 - jet1.px ** 2 - jet1.py ** 2 - jet1.pz ** 2) ** 0.5
def get_m2(event_jets):
jet2 = event_jets[1]
return (jet2.e ** 2 - jet2.px ** 2 - jet2.py ** 2 - jet2.pz ** 2) ** 0.5
def get_m1_sub_m2(event_jets):
return abs(get_m1(event_jets) - get_m2(event_jets))
|
rotemov/ML4Jets-HUJI
|
jupyter_methods.py
|
jupyter_methods.py
|
py
| 3,097 |
python
|
en
|
code
| 1 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.