seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2311755172
|
import logging
import pathlib
import requests
import os
from pathlib import Path
from typing import Dict
from PIL.Image import Image
from result import Result, Ok, Err, Some
from definitions import EXT_API_SAVE_QUOTE_URL, EXT_API_OUTPUT_URL
from models import ImRequest
_logger = logging.getLogger(__name__)
class QuoteImUtils:
@staticmethod
def parse_req_dict( some_dict: Dict ) -> ImRequest:
try:
errs, validated = ImRequest.parser(some_dict)
validated.errs = errs
return validated
except Exception as err:
_logger.error( err, exc_info=True )
im_req = ImRequest()
im_req.errs = [err]
return im_req
@staticmethod
def send_to_ext_api( filename: str , filepath: str, default_url: str ) -> Result[str, Exception]:
s_key: str = os.environ['API_SECRET_KEY']
s_value: str = os.environ['API_SECRET_VAL']
url = f'{EXT_API_SAVE_QUOTE_URL}?{s_key}={s_value}'
try:
with open( filepath, 'rb' ) as im_file:
from PIL import Image as Im
payload={}
files=[( 'quote', (filename , im_file, 'image/png') )]
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
response = requests.post( url, headers=headers, data=payload, files=files)
if response and response.status_code == 200:
im_url: str = f'{EXT_API_OUTPUT_URL}/{filename}'
return Ok( im_url )
else:
_logger.warn( response.headers )
_logger.warn( response.text )
_logger.warn( f'"{url}"' )
im_url: str = f'{EXT_API_OUTPUT_URL}/default.png'
return Some(default_url)
except Exception as err:
_logger.error(err, exc_info=True)
return Err(err)
@staticmethod
def save_im_as( img: Image, filename: str , filepath: str ) -> Result[None, Exception]:
keep_alpha: bool = False
if img is None:
return Err( ValueError('Image is None') )
try:
file_extension = pathlib.Path(filename).suffix
keep_alpha = file_extension == '.png' or file_extension == '.PNG'
except Exception:
keep_alpha = False
try:
im_clone = img
if not keep_alpha:
im_clone = im_clone.convert('RGB')
im_clone.save( filepath )
return Ok( None )
except Exception as err:
_logger.error( err, exc_info=True )
return Err( err )
@staticmethod
def save_im_as_b64( filename: str , filepath: str, default_url: str ):
pass
@staticmethod
def send_to_ext_api_b64( filename: str , filepath: str, default_url: str ):
(800,600)
@staticmethod
def img_already_exists_ext_api( filename: str ) -> bool:
url: str = f'{EXT_API_OUTPUT_URL}/{filename}'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
}
try:
response = requests.get( url, headers=headers )
return response.status_code == 200
except Exception as err:
_logger.error(err)
return False
@staticmethod
def img_already_exists_local( filepath: str ) -> bool:
path = Path(filepath)
return path.is_file()
|
lcapuano-app/quote-image-generator
|
src/imquote/qt_im_utils.py
|
qt_im_utils.py
|
py
| 3,700 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32148869393
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the rotLeft function below.
def rotLeft(a, d):
vestige = a[:d]
append = a[d:]
print(vestige)
print(append)
append.reverse()
for ele in append:
vestige.insert(0,ele)
return vestige
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
a = list(map(int, input().rstrip().split()))
result = rotLeft(a, d)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
mihirp1/HackerRank_Algorithms_Problems
|
arrays-leftrotation/solution.py
|
solution.py
|
py
| 609 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34181662048
|
import json
import time
from collections import defaultdict
current = 2370
maxGame = 2426
import datetime
import matplotlib.pyplot as plt
MERCY_ULT_TIME = 20
from pathlib import Path
ult_timers = {
'doomfist':4,
'genji':6,
'mccree': 6,
'pharah': 3,
'reaper': 3,
'soldier':6,
'mercy':6,
'sombra':6,
'tracer':3,
'bastion':8,
'hanzo':5,
'junkrat':10,
'mei':5,
'torbjorn': 12,
'widowmaker': 15.5,
'orisa': 5,
'reinhardt': 3,
'roadhog': 6,
'winston': 10,
'zarya': 4,
'ana': 8,
'lucio': 6.25,
'mercy':20,
'moira':8
}
def update_mercy_lifespan(player,seconds,mercy_list):
mercy_list[player][0] += seconds
mercy_list[player][1] += 1
#means the player died.
if seconds < 20:
mercy_list[player][2] += 1
def time_converter(start, current_time):
return str(datetime.timedelta(seconds=(current_time-start)))
def calculate_ults_in_teamfight(ult_time_dictionary,first_kill,last_kill):
current_ults = defaultdict(list)
future_ults = defaultdict(list)
for color,ult_list in ult_time_dictionary.items():
current_list = []
new_list = []
for ult in ult_list:
character = ult[0]
time = ult[1]
advantage = ult[2]
# if ult happened more than 12 seconds before, it doesn't count as part of the teamfight.
if first_kill - time > 12:
continue
elif time - last_kill > 0:
advantage = 0
future_ults[color].append((character,time,advantage))
else:
current_ults[color].append((character,time,advantage))
return current_ults, future_ults
mercy_ult_win_ratio = [0,0]
kills = 0
ults = 0
#mercy lifespan contains three values. The first number is seconds alive, while the second number is times used, and third is times interrupted.
mercy_lifespan = defaultdict(list)
mercy_killers = defaultdict(int)
ults_used = defaultdict(int)
players_by_playtime = {}
mercy_ult_by_advantage = defaultdict(int)
time_to_charge_ult = defaultdict(dict)
#comp at team fight.
team_fight_comp = {}
while current < maxGame:
fights = 0
fight_happening = False
game_number = 1
round_number = 1
blue_name = ""
red_name = ""
current_character_by_player = defaultdict(int)
while game_number < 6:
fight_happening = False
current_ults = 0
print(game_number, round_number)
players = defaultdict(int)
players_by_id = {'red':defaultdict(int),
'blue':defaultdict(int)}
file_name = "game_data/"
file_name += str(current) + "_" + str(game_number) + "_" + str(round_number) + ".json"
f = Path(file_name)
#game finished
if not f.is_file():
game_number += 1
round_number = 1
continue
with open(file_name,'r') as f:
datastore = json.load(f)
datastore = json.loads(datastore)
blue_playtime = defaultdict(int)
red_playtime = defaultdict(int)
start = 0
end = 0
#initializing players
for key in datastore.keys():
if key != 'events':
if key == 'blue':
blue_name = datastore[key]
if key == 'red':
red_name = datastore[key]
if key == 'bluenames':
for index, player in enumerate(datastore[key]):
if player == "Nus":
player = "nus"
players_by_id['blue'][index+1] = player
if player not in players_by_playtime:
players_by_playtime[player] = defaultdict(int)
if key == 'rednames':
for index, player in enumerate(datastore[key]):
if player == "Nus":
player = "nus"
players_by_id['red'][index+1] = player
if player not in players_by_playtime:
players_by_playtime[player] = defaultdict(int)
current_character_by_player = defaultdict(int)
'''
Keep track of mercies ulting. If mercy ult > 20 seconds or death,
set a negative number to end ult.
'''
mercy_ult_start = defaultdict(int)
mercy_ult_start['red'] = -1000
mercy_ult_start['blue'] = -1000
opposite_color = {"blue":"red", "red":"blue"}
last_ult_time = defaultdict(int)
ults_used_by_color = defaultdict(list)
player_advantage_in_ult = defaultdict(list)
last_kill = 0
first_kill = 0
fight_kills = 0
kills_by_color = {'red': 0, 'blue': 0}
mercy = {}
for event in datastore['events']:
time = event[0]
standard_time = str(datetime.timedelta(seconds=(time-start)))
if(event[1] == 'PAUSE' or event[1] == 'UNPAUSE'):
continue
if fight_happening:
#if fight has terminated
if time - last_kill > 14 or event[1] == 'END':
fight_happening = False
#print("fight end is at " + str(datetime.timedelta(seconds=(last_kill-start))))
current_ults, future_ults = calculate_ults_in_teamfight(player_advantage_in_ult,first_kill, last_kill)
ult_first = None
mercy_color = None
both_mercies = False
for color in current_ults:
for ult in current_ults[color]:
character = ult[0]
ult_time = ult[1]
advantage = ult[2]
if character == 'mercy':
if not ult_first:
ult_first = (ult_time,advantage)
else:
if ult_time < ult_first[0]:
ult_first = (ult_time,advantage)
if mercy_color == None:
mercy_color = color
else:
if mercy_color != color:
both_mercies = True
mercy_ult_time = str(datetime.timedelta(seconds=(ult_time-start)))
if ult_first:
mercy_ult_by_advantage[ult_first[1]] += 1
player_advantage_in_ult = future_ults
winning_color = max(kills_by_color, key=kills_by_color.get)
if max(kills_by_color.values()) != min(kills_by_color.values()):
if (not both_mercies) and mercy_color != None:
print("There is only one mercy")
mercy_ult_win_ratio[1] += 1
if winning_color == mercy_color:
print("One mercy won!")
mercy_ult_win_ratio[0] += 1
kills_by_color = dict.fromkeys(kills_by_color, 0)
#weird glitch involving player switches on Dragons vs Mayhem
if time >= 11687 and time < 11699 and current == 2412:
continue
#Check if mercy lived through ult
for color in mercy_ult_start:
if mercy_ult_start[color] > 0 and time - mercy_ult_start[color] > 20:
mercy_player = mercy[color]
last_ult_time[mercy_player] = mercy_ult_start[color] + 20
update_mercy_lifespan(mercy_player,20, mercy_lifespan)
mercy_ult_start[color] = -1000
if event[1] == 'END':
end = time
elif event[1] == 'MATCH':
start = time
else:
color = event[2].lower()
opposing_color = opposite_color[color]
player_id = event[3]
first_character = event[4]
player = players_by_id[color][player_id]
if event[1] == 'SWITCH':
second_character = event[5]
if player in current_character_by_player:
old_time,old_character = current_character_by_player[player]
play_time = time - old_time
players_by_playtime[player][old_character] += play_time
#since player switched, last ult time is now inaccurate.
if player in last_ult_time:
del last_ult_time[player]
if second_character == "mercy":
if player not in mercy_lifespan:
mercy_lifespan[player] = [0,0,0]
mercy[color] = player
current_character_by_player[player] = (time, second_character)
elif event[1] == "ULT_USE":
ults_used_by_color[color] += [first_character]
ults_used[player] += 1
kills_differential = kills_by_color[color] - kills_by_color[opposing_color]
last_ult_time[player] = time
if current_character_by_player[player][1] == "mercy":
#print("{2} Mercy ulted at {0} with {1} advantage".format(standard_time,kills_differential,color))
#print(kills_by_color)
player_advantage_in_ult[color].append((first_character,time,kills_differential))
mercy_ult_by_advantage[kills_differential] += 1
mercy_ult_start[color] = time
elif event[1] == "KILL":
kills_by_color[color] += 1
last_kill = time
#the fight has started.
if not fight_happening:
first_kill = time
fights += 1
fight_happening = True
kills += 1
enemy_id = event[5]
dead_character = event[6]
killed_player = players_by_id[opposing_color][enemy_id]
if dead_character == "mercy":
#mercy died mid-ult
if mercy_ult_start[opposing_color] > 0:
last_ult_time[player] = time
mercy_killers[first_character] += 1
ult_time = time - mercy_ult_start[opposing_color]
update_mercy_lifespan(killed_player,ult_time, mercy_lifespan)
#mark ult as terminated
mercy_ult_start[opposing_color] = -1000
elif event[1] == "REVIVE":
continue
elif event[1] == "ULT_GAIN":
if first_character not in time_to_charge_ult[player]:
time_to_charge_ult[player][first_character] = []
initial_time, dummy = current_character_by_player[player]
if player in last_ult_time:
initial_time = last_ult_time[player]
if first_character == "mercy":
print ("Ult gained for {0} mercy at".format(color),time_converter(start,initial_time), time_converter(start,time))
time_to_charge_ult[player][first_character].append(time - initial_time)
last_ult_time[player] = time
for player in current_character_by_player:
old_time,old_character = current_character_by_player[player]
play_time = end - old_time
players_by_playtime[player][old_character] += play_time
#by playtime
'''
for player in players_by_playtime:
for character in players_by_playtime[player]:
playtime = players_by_playtime[player][character]
print("{0} has been played by {1} for {2} seconds".format(character,player,playtime))
'''
print("fights are {0}".format(fights))
print(str(datetime.timedelta(seconds=(end-start))))
print(mercy_ult_win_ratio)
round_number += 1
current += 1
#calculate average lifespan of mercies
print("Total fights is {0}".format(fights))
print("Total kills is {0}".format(kills))
total_mercy_ults = 0
total_mercy_deaths = 0
mercy_death_graph = {}
#gathering data on average mercy lifespan in valkyrie
for player in mercy_lifespan:
lifetimes, ult_times,deaths = mercy_lifespan[player]
total_mercy_ults += ult_times
total_mercy_deaths += deaths
if ult_times > 0:
mercy_death_graph[player] = deaths/ult_times
avg_ult_time = lifetimes/ult_times
print("{1} lives for an average of {0} seconds and died {2} times out of {3}".format(avg_ult_time,player,deaths,ult_times))
avg_seconds_per_ult = defaultdict(dict)
std_deviation_by_player = defaultdict(dict)
for player,player_ults in time_to_charge_ult.items():
for character in player_ults:
playtime = sum(player_ults[character])
ults = len(player_ults[character])
avg = playtime/ults
avg_seconds_per_ult[character][player] = avg
summation = 0
if player == "nus" and character == "mercy":
print("ults are {0}".format(player_ults[character]))
for ult in player_ults[character]:
summation += pow(ult - avg,2)
std_dev = pow(summation/ults,0.5)
std_deviation_by_player[character][player] = std_dev/pow(avg,0.5)
print("Percentage of mercies that die in ult is {0}".format(total_mercy_deaths/(total_mercy_ults)))
print("Mercy win ratio when only ulting on one side is {0} out of {1}".format(mercy_ult_win_ratio[0]/(mercy_ult_win_ratio[1]),mercy_ult_win_ratio[1]))
analyzed_character = "mercy"
d = avg_seconds_per_ult[analyzed_character]
print(avg_seconds_per_ult[analyzed_character])
x_axis = []
y_axis = []
error = []
for w in sorted(d, key=d.get, reverse=True):
x_axis += [w]
y_axis += [d[w]]
error += [std_deviation_by_player[analyzed_character][w]]
print(x_axis)
print(y_axis)
print(error)
plt.errorbar(list(range(0,len(x_axis))), y_axis,yerr=error,fmt='o')
#plt.bar(range(len(y_axis)), list(y_axis), align='center')
plt.xticks(range(len(x_axis)), list(x_axis))
plt.xticks(rotation=90)
plt.title("Seconds to generate ult as " + analyzed_character)
plt.ylabel("seconds")
plt.tight_layout()
plt.show()
quit()
|
Cheraws/AnalyzingOWL
|
stat_collector.py
|
stat_collector.py
|
py
| 14,816 |
python
|
en
|
code
| 0 |
github-code
|
6
|
43972979596
|
import argparse
import re
# CPT tools
from wp_tools import CPTLink
def parse_inputs(text,file,galaxy_mode=False):
"""
Parses the inputs of a text box and new line separated pacc file
"""
accs = []
if text:
if re.search(("__cn__"),str(text[0])):
acc = text[0]
split = acc.split("__cn__")
accs.extend(split)
else:
accs.extend(text)
if file:
a = open(file.name).read().splitlines()
accs.extend(a)
if not accs:
raise Exception("No accessions used, check file and input.")
else:
return accs
def parse_email(email):
"""
Parses user input email and appends CPT Admins to NCBI email
"""
ADMINS = ["[email protected]","[email protected]","[email protected]"]
sep = ';'
try:
if "__at__" in email:
split = email.split("__at__")
email = f"{split[0]}@{split[1]}"
except TypeError:
raise Exception("Please Insert Email Address")
ADMINS.insert(0,email)
emails = sep.join(ADMINS)
return emails
def write_table(list_of_data, file):
"""
writes output table, uses list of data from CPTlink output
"""
with file as f:
f.write(f"WP_accession\tGenome_accession\tTaxID\tOrganism\tWP_count\n")
for acc in list_of_data:
for gacc_data in acc[1]:
f.write(f"{acc[0]}\t{gacc_data[0]}\t{gacc_data[1]}\t{gacc_data[2]}\t{acc[2]}\n")
def write_gaccs(list_of_data, file):
"""
writes output gacc file, uses list of data from CPTlink output
"""
for acc in list_of_data:
print(acc)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Retrieve information from a WP accession"
)
parser.add_argument(
"--wp_text",
nargs="*",
help="WP accessions, separated by __cn__ for Galaxy, or space for offline"
)
parser.add_argument(
"--wp_file",
type=argparse.FileType("r"),
help="New line separated WP accessions file"
)
parser.add_argument(
"--api_key",
help="NCBI API Key"
)
parser.add_argument(
"--email",
type=str,
help="Entrez requires an email to connect to NCBI database. CPT Admin emails will be appended to list."
)
parser.add_argument(
"--wp_amt",
dest='wp_amt',
choices=('first','all'),
default='first',
)
parser.add_argument(
"--out_table",
type=argparse.FileType("w"),
default="_return_table.txt",
help="Output table consisting of accession data"
)
args = parser.parse_args()
# Get accessions from input file and/or text
accs = parse_inputs(args.wp_text,args.wp_file)
# Emails
emails = parse_email(args.email)
# Run functions
package = {
"email" : emails,
"db" : "nuccore",
"dbfrom" : "protein",
"api_key" : args.api_key,
}
wps = []
for acc in accs:
package["acc"] = acc
if args.wp_amt == 'all': # sorta a hacky way to check and see if we're grabbing first or all
wp_all = True
else:
wp_all = False
pacc, gacc, wp_amt = CPTLink(**package).map_accessions(wp_all)
current_wp = [pacc,gacc,wp_amt]
wps.append(current_wp)
write_table(wps,args.out_table)
|
TAMU-CPT/galaxy-tools
|
tools/wp_analysis/wp_data.py
|
wp_data.py
|
py
| 3,447 |
python
|
en
|
code
| 5 |
github-code
|
6
|
30153335935
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 19 11:52:30 2023
@author: moureaux pierre
"""
import numpy as np
from abc import ABC, abstractmethod
from numpy import linalg as LA
"""The abstract finite difference class"""
class FiniteDifferences(object):
def __init__(self, r0, T, sigma, alpha, beta,rmin, rmax, M, N):
self.r0 = r0
self.T = T
self.sigma = sigma
self.alpha = alpha
self.beta = beta
self.rmin = rmin
self.rmax = rmax
self.M, self.N = int(M), int(N)
self.boundary_conds = np.linspace(rmin, rmax, self.M+1)
self.dr = (rmax - rmin) / float(self.M)
if self.dr > (self.sigma**2)/(LA.norm(self.alpha*(self.beta-self.boundary_conds), ord=np.inf)):
self.dr = (self.sigma**2)/(LA.norm(self.alpha*(self.beta-self.boundary_conds), ord=np.inf))
self.dt = T / float(self.N)
if self.dt > (self.dr**2)/(self.sigma**2):
self.dt = (self.dr**2)/(self.sigma**2)
self.j_values = np.arange(self.N)
self.grid = np.zeros(shape=(self.M+1, self.N+1))
@abstractmethod
def _setup_boundary_conditions_(self):
pass
def _setup_coefficients_(self):
self.a = self.dt/2*((self.sigma/self.dr)**2-self.alpha*(self.beta-self.boundary_conds)/self.dr)
self.b = -(self.sigma**2)*self.dt/(self.dr**2)-self.boundary_conds*self.dt+1
self.c = self.dt/2*((self.sigma/self.dr)**2+self.alpha*(self.beta-self.boundary_conds)/self.dr)
self.a0 = self.dt*(-((self.sigma/self.dr)**2)/3-1/self.dr*self.alpha*(self.beta-self.boundary_conds)-self.boundary_conds)+1
self.b0 = self.dt/self.dr*((self.sigma**2)/(2*self.dr)+self.alpha*(self.beta-self.boundary_conds))
self.c0 = -self.dt*(self.sigma**2)/(6*(self.dr**2))
self.aM = self.dt*(((self.sigma/self.dr)**2)/3+1/self.dr*self.alpha*(self.beta-self.boundary_conds)-self.boundary_conds)+1
self.bM = self.dt/self.dr*(-(self.sigma**2)/(2*self.dr)-self.alpha*(self.beta-self.boundary_conds))
self.cM = self.dt*(self.sigma**2)/(6*(self.dr**2))
@abstractmethod
def _traverse_grid_(self):
pass
@abstractmethod
def _interpolate_(self):
return np.interp(self.r0,
self.boundary_conds,
self.grid[:, 0])
def price(self):
self._setup_boundary_conditions_()
self._setup_coefficients_()
self._traverse_grid_()
return self._interpolate_()
"""The Bond finite difference class"""
class FDExplicitBond(FiniteDifferences):
def __init__(self, r0, T, sigma, alpha, beta,rmin, rmax, M, N, couponSchedule, coupon):
super().__init__(r0, T, sigma, alpha, beta,rmin, rmax, M, N)
self.couponSchedule = couponSchedule
self.coupon = coupon
def _setup_boundary_conditions_(self):
self.grid[:, -1] = 1 + self.coupon
def _traverse_grid_(self):
for j in reversed(self.j_values):
if j in self.couponSchedule:
for i in range(self.M+1):
if i == 0:
self.grid[i,j] = self.a0[i]*self.grid[i,j+1] \
+self.b0[i]*self.grid[i+1,j+1] \
+self.c0*self.grid[i+3,j+1] + self.coupon
elif i == self.M:
self.grid[i,j] = self.aM[i]*self.grid[i,j+1] \
+self.bM[i]*self.grid[i-1,j+1] \
+self.cM*self.grid[i-3,j+1] + self.coupon
else:
self.grid[i,j] = self.a[i]*self.grid[i-1,j+1] \
+self.b[i]*self.grid[i,j+1] \
+self.c[i]*self.grid[i+1,j+1] + self.coupon
else:
for i in range(self.M+1):
if i == 0:
self.grid[i,j] = self.a0[i]*self.grid[i,j+1] \
+self.b0[i]*self.grid[i+1,j+1] \
+self.c0*self.grid[i+3,j+1]
elif i == self.M:
self.grid[i,j] = self.aM[i]*self.grid[i,j+1] \
+self.bM[i]*self.grid[i-1,j+1] \
+self.cM*self.grid[i-3,j+1]
else:
self.grid[i,j] = self.a[i]*self.grid[i-1,j+1] \
+self.b[i]*self.grid[i,j+1] \
+self.c[i]*self.grid[i+1,j+1]
"""The TRS finite difference class"""
class FDExplicitTRS(FiniteDifferences):
def __init__(self, r0, T, sigma, alpha, beta,rmin, rmax, M, N, Bond, K, sTRS, is_perfReceiver=True):
super().__init__(r0, T, sigma, alpha, beta,rmin, rmax, M, N)
self.Bond = Bond
self.K = K
self.sTRS = sTRS
self.is_perfReceiver = is_perfReceiver
self.Bond.price()
def _setup_boundary_conditions_(self):
index = int(self.Bond.N*self.T/self.Bond.T)
if self.is_perfReceiver:
self.grid[:, -1] = self.Bond.grid[:,index] - self.K - self.sTRS*self.T
else:
self.grid[:, -1] = -(self.Bond.grid[:,index] - self.K - self.sTRS*self.T)
def _traverse_grid_(self):
for j in reversed(self.j_values):
if j in self.Bond.couponSchedule:
for i in range(self.M+1):
if i == 0:
self.grid[i,j] = self.a0[i]*self.grid[i,j+1] \
+self.b0[i]*self.grid[i+1,j+1] \
+self.c0*self.grid[i+3,j+1] + self.Bond.coupon
elif i == self.M:
self.grid[i,j] = self.aM[i]*self.grid[i,j+1] \
+self.bM[i]*self.grid[i-1,j+1] \
+self.cM*self.grid[i-3,j+1] + self.Bond.coupon
else:
self.grid[i,j] = self.a[i]*self.grid[i-1,j+1] \
+self.b[i]*self.grid[i,j+1] \
+self.c[i]*self.grid[i+1,j+1] + self.Bond.coupon
else:
for i in range(self.M+1):
if i == 0:
self.grid[i,j] = self.a0[i]*self.grid[i,j+1] \
+self.b0[i]*self.grid[i+1,j+1] \
+self.c0*self.grid[i+3,j+1]
elif i == self.M:
self.grid[i,j] = self.aM[i]*self.grid[i,j+1] \
+self.bM[i]*self.grid[i-1,j+1] \
+self.cM*self.grid[i-3,j+1]
else:
self.grid[i,j] = self.a[i]*self.grid[i-1,j+1] \
+self.b[i]*self.grid[i,j+1] \
+self.c[i]*self.grid[i+1,j+1]
couponSchedule = (25,50,75)
Bond = FDExplicitBond(0.05,2,0.1,0.3,0.01,0.0,0.2,100,100,couponSchedule,0.025)
TRS = FDExplicitTRS(0.05,1,0.1,0.3,0.01,0.0,0.2,100,80,Bond, 1.0,0.02, True)
print(TRS.price())
|
PierreMoureaux/Securities-finance-derivatives---Wilmott-s-program-of-study-Finite-difference
|
6 - TRS on Bonds and interest rate dependencies/Python code/TRS on Bond.py
|
TRS on Bond.py
|
py
| 7,469 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37016298011
|
"""ASE LAMMPS Calculator Library Version"""
from __future__ import print_function
import os
import ctypes
import operator
import sys
import numpy as np
from numpy.linalg import norm
from lammps import lammps
from ase.calculators.calculator import Calculator
from ase.data import atomic_masses
from ase.atoms import symbols2numbers
import ase.units
import re
# TODO
# 1. should we make a new lammps object each time ?
# 2. upper triangular test does not look good
# 3. lmp object is not closed
# 4. need a routine to get the model back from lammps
# 5. if we send a command to lmps directly then the calculator does
# not know about it and the energy could be wrong.
# 6. do we need a subroutine generator that converts a lammps string
# into a python function that can be called
def is_upper_triangular(mat):
"""test if 3x3 matrix is upper triangular"""
def near0(x):
"""Test if a float is within .00001 of 0"""
return abs(x) < 0.00001
return near0(mat[1, 0]) and near0(mat[2, 0]) and near0(mat[2, 1])
def convert_cell(ase_cell):
"""
Convert a parallel piped (forming right hand basis)
to lower triangular matrix LAMMPS can accept. This
function transposes cell matrix so the bases are column vectors
"""
cell = np.matrix.transpose(ase_cell[:,:])
if not is_upper_triangular(cell) or cell[0,0] < 0.0 or cell[1,1] < 0.0 or cell[2,2] < 0.0:
# rotate bases into triangular matrix
tri_mat = np.zeros((3, 3))
A = cell[:, 0]
B = cell[:, 1]
C = cell[:, 2]
tri_mat[0, 0] = norm(A)
Ahat = A / norm(A)
AxBhat = np.cross(A, B) / norm(np.cross(A, B))
tri_mat[0, 1] = np.dot(B, Ahat)
tri_mat[1, 1] = norm(np.cross(Ahat, B))
tri_mat[0, 2] = np.dot(C, Ahat)
tri_mat[1, 2] = np.dot(C, np.cross(AxBhat, Ahat))
tri_mat[2, 2] = norm(np.dot(C, AxBhat))
# create and save the transformation for coordinates
volume = np.linalg.det(ase_cell)
trans = np.array([np.cross(B, C), np.cross(C, A), np.cross(A, B)])
trans = trans / volume
coord_transform = np.dot(tri_mat , trans)
return tri_mat, coord_transform
else:
return cell, None
lammps_real = {
"mass" : 0.001 * ase.units.kg / ase.units.mol,
"distance" : ase.units.Angstrom,
"time" : ase.units.fs,
"energy" : ase.units.kcal/ase.units.mol,
"velocity": ase.units.Angstrom / ase.units.fs,
"force": ase.units.kcal/ase.units.mol/ase.units.Angstrom,
"pressure" : 101325 * ase.units.Pascal
}
lammps_metal = {
"mass" : 0.001 * ase.units.kg / ase.units.mol,
"distance" : ase.units.Angstrom,
"time" : 1e-12 * ase.units.second,
"energy" : ase.units.eV,
"velocity": ase.units.Angstrom / (1e-12*ase.units.second),
"force": ase.units.eV/ase.units.Angstrom,
"pressure" : 1e5 * ase.units.Pascal
}
lammps_units={"real":lammps_real,
"metal":lammps_metal}
def unit_convert(quantity, units='metal'):
try:
return lammps_units[units][quantity]
except:
raise NotImplementedError("Unit {} in unit system {} is not implemented.".format(quantity,units))
class LAMMPSlib(Calculator):
r"""
LAMMPSlib Interface Documentation
**Introduction**
LAMMPSlib is an interface and calculator for LAMMPS_. LAMMPSlib uses
the python interface that comes with LAMMPS to solve an atoms model
for energy, atom forces and cell stress. This calculator creates a
'.lmp' object which is a running lammps program, so further commands
can be sent to this object executed until it is explicitly closed. Any
additional variables calculated by lammps can also be extracted. This
is still experimental code.
**Arguments**
================= ==========================================================
Keyword Description
================= ==========================================================
``lmpcmds`` list of strings of LAMMPS commands. You need to supply
enough to define the potential to be used e.g.
["pair_style eam/alloy",
"pair_coeff * * potentials/NiAlH_jea.eam.alloy Ni Al"]
``atom_types`` dictionary of "atomic_symbol":lammps_atom_type pairs,
e.g. {'Cu':1} to bind copper to lammps atom type 1.
Default method assigns lammps atom types in order that they
appear in the atoms model. Mandatory.
``log_file`` string
path to the desired LAMMPS log file
``lammps_header`` string to use for lammps setup. Default is to use
metal units and simple atom simulation.
lammps_header=['units metal',
'atom_style atomic',
'atom_modify map array sort 0 0'])
``keep_alive`` Boolean
whether to keep the lammps routine alive for more commands
================= ==========================================================
**Requirements**
To run this calculator you must have LAMMPS installed and compiled to
enable the python interface. See the LAMMPS manual.
If the following code runs then lammps is installed correctly.
>>> from lammps import lammps
>>> lmp = lammps()
The version of LAMMPS is also important. LAMMPSlib is suitable for
versions after approximately 2011. Prior to this the python interface
is slightly different from that used by LAMMPSlib. It is not difficult
to change to the earlier format.
**LAMMPS and LAMMPSlib**
The LAMMPS calculator is another calculator that uses LAMMPS (the
program) to calculate the energy by generating input files and running
a separate LAMMPS job to perform the analysis. The output data is then
read back into python. LAMMPSlib makes direct use of the LAMMPS (the
program) python interface. As well as directly running any LAMMPS
comand line it allows the values of any of LAMMPS variables to be
extracted and returned to python.
**Example**
::
from ase import Atom, Atoms
from lammpslib import LAMMPSlib
cmds = ["pair_style eam/alloy",
"pair_coeff * * NiAlH_jea.eam.alloy Al H"]
a = 4.05
al = Atoms([Atom('Al')], cell=(a, a, a), pbc=True)
h = Atom([Atom('H')])
alh = al + h
lammps = LAMMPSlib(lmpcmds = cmds, logfile='test.log')
alh.set_calculator(lammps)
print "Energy ", alh.get_potential_energy()
**Implementation**
LAMMPS provides a set of python functions to allow execution of the
underlying C++ LAMMPS code. The functions used by the LAMMPSlib
interface are::
from lammps import lammps
lmp = lammps(cmd_args) # initiate LAMMPS object with command line args
lmp.scatter_atoms('x',1,3,positions) # atom coords to LAMMPS C array
lmp.command(cmd) # executes a one line cmd string
lmp.extract_variable(...) # extracts a per atom variable
lmp.extract_global(...) # extracts a global variable
lmp.close() # close the lammps object
For a single atom model the following lammps file commands would be run
by invoking the get_potential_energy() method::
units metal
atom_style atomic
atom_modify map array sort 0 0
region cell prism 0 xhi 0 yhi 0 zhi xy xz yz units box
create_box 1 cell
create_atoms 1 single 0 0 0 units box
mass * 1.0
## user lmpcmds get executed here
pair_style eam/alloy
pair_coeff * * lammps/potentials/NiAlH_jea.eam.alloy Al
## end of user lmmpcmds
run 0
**Notes**
.. _LAMMPS: http://lammps.sandia.gov/
* Units: The default lammps_header sets the units to Angstrom and eV
and for compatibility with ASE Stress is in GPa.
* The global energy is currently extracted from LAMMPS using
extract_variable since lammps.lammps currently extract_global only
accepts the following ['dt', 'boxxlo', 'boxxhi', 'boxylo', 'boxyhi',
'boxzlo', 'boxzhi', 'natoms', 'nlocal'].
* If an error occurs while lammps is in control it will crash
Python. Check the output of the log file to find the lammps error.
* If the are commands direfctly sent to the LAMMPS object this may
change the energy value of the model. However the calculator will not
know of it and still return the original energy value.
End LAMMPSlib Interface Documentation
"""
implemented_properties = ['energy', 'forces', 'stress']
#NB
started = False
initialized = False
default_parameters = dict(
atom_types=None,
log_file=None,
lammps_name='',
keep_alive=False,
lammps_header=['units metal',
'atom_style atomic',
'atom_modify map array sort 0 0'],
boundary=True,
create_box=True,
create_atoms=True,
read_molecular_info=False,
comm=None)
def parse_bonds(self, atoms):
atoms.bonds = []
atoms.max_n_bonds = 0
for i in range(len(atoms)):
if atoms.arrays['bonds'][i] != '_':
n_bonds = 0
for bond_list in atoms.arrays['bonds'][i].split(','):
n_bonds += 1
m = re.match('(\d+)\((\d+)\)',bond_list)
atoms.bonds.append((int(m.group(2)),i+1,int(m.group(1))+1))
atoms.max_n_bonds = max(atoms.max_n_bonds, n_bonds)
def set_bonds(self, atoms):
for (t, i1, i2) in atoms.bonds:
self.lmp.command('create_bonds single/bond {} {} {} '.format(t, i1, i2))
def parse_angles(self, atoms):
atoms.angles = []
atoms.max_n_angles = 0
for i in range(len(atoms)):
if atoms.arrays['angles'][i] != '_':
n_angles = 0
for angle_list in atoms.arrays['angles'][i].split(','):
n_angles += 1
m = re.match('(\d+)\-(\d+)\((\d+)\)',angle_list)
atoms.angles.append((int(m.group(3)),int(m.group(1))+1,i+1,int(m.group(2))+1))
atoms.max_n_angles = max(atoms.max_n_angles, n_angles)
def set_angles(self, atoms):
for (t, i1, i2, i3) in atoms.angles:
self.lmp.command('create_bonds single/angle {} {} {} {}'.format(t, i1, i2, i3))
def parse_dihedrals(self,atoms):
atoms.dihedrals = []
atoms.max_n_dihedrals = 0
for i in range(len(atoms)):
if atoms.arrays['dihedrals'][i] != '_':
n_dihedrals = 0
for dihedral_list in atoms.arrays['dihedrals'][i].split(','):
n_dihedrals += 1
m = re.match('(\d+)\-(\d+)\-(\d+)\((\d+)\)',dihedral_list)
atoms.dihedrals.append((int(m.group(4)),i+1,int(m.group(1))+1,int(m.group(2))+1,int(m.group(3))+1))
atoms.max_n_dihedrals = max(atoms.max_n_dihedrals, n_dihedrals)
def set_dihedrals(self, atoms):
for (t, i1, i2, i3, i4) in atoms.dihedrals:
self.lmp.command('create_bonds single/dihedral {} {} {} {} {}'.format(t, i1, i2, i3, i4))
def parse_impropers(self,atoms):
atoms.impropers = []
atoms.max_n_impropers = 0
for i in range(len(atoms)):
if atoms.arrays['impropers'][i] != '_':
n_impropers = 0
for improper_list in atoms.arrays['impropers'][i].split(','):
n_impropers += 1
m = re.match('(\d+)\-(\d+)\-(\d+)\((\d+)\)',improper_list)
atoms.impropers.append((int(m.group(4)),i+1,int(m.group(1))+1,int(m.group(2))+1,int(m.group(3))+1))
atoms.max_n_impropers = max(atoms.max_n_impropers, n_impropers)
def set_impropers(self, atoms):
for (t, i1, i2, i3, i4) in atoms.impropers:
self.lmp.command('create_improper {} {} {} {} {}'.format(t, i1, i2, i3, i4))
def set_charges(self, atoms):
for i,j in enumerate(atoms.arrays['mmcharge']):
self.lmp.command('set atom {} charge {} '.format(i+1,j))
def set_cell(self, atoms, change=False):
lammps_cell, self.coord_transform = convert_cell(atoms.get_cell())
xhi = lammps_cell[0, 0]
yhi = lammps_cell[1, 1]
zhi = lammps_cell[2, 2]
xy = lammps_cell[0, 1]
xz = lammps_cell[0, 2]
yz = lammps_cell[1, 2]
if change:
cell_cmd = 'change_box all x final 0 {} y final 0 {} z final 0 {} xy final {} xz final {} yz final {}'\
.format(xhi, yhi, zhi, xy, xz, yz)
else:
# just in case we'll want to run with a funny shape box, and here command will only happen once, and before any calculation
if self.parameters.create_box:
self.lmp.command('box tilt large')
cell_cmd = 'region cell prism 0 {} 0 {} 0 {} {} {} {} units box'\
.format(xhi, yhi, zhi, xy, xz, yz)
self.lmp.command(cell_cmd)
def set_lammps_pos(self, atoms):
pos = atoms.get_positions() / unit_convert("distance", self.units)
# If necessary, transform the positions to new coordinate system
if self.coord_transform is not None:
pos = np.dot(self.coord_transform , np.matrix.transpose(pos))
pos = np.matrix.transpose(pos)
# Convert ase position matrix to lammps-style position array
lmp_positions = list(pos.ravel())
# Convert that lammps-style array into a C object
lmp_c_positions =\
(ctypes.c_double * len(lmp_positions))(*lmp_positions)
# self.lmp.put_coosrds(lmp_c_positions)
self.lmp.scatter_atoms('x', 1, 3, lmp_c_positions)
def calculate(self, atoms, properties, system_changes):
self.propagate(atoms, properties, system_changes, 0)
def propagate(self, atoms, properties, system_changes, n_steps, dt=None,
dt_not_real_time=False, velocity_field=None):
""""atoms: Atoms object
Contains positions, unit-cell, ...
properties: list of str
List of what needs to be calculated. Can be any combination
of 'energy', 'forces', 'stress', 'dipole', 'charges', 'magmom'
and 'magmoms'.
system_changes: list of str
List of what has changed since last calculation. Can be
any combination of these five: 'positions', 'numbers', 'cell',
'pbc', 'charges' and 'magmoms'.
"""
if len(system_changes) == 0:
return
self.coord_transform = None
if not self.started:
self.start_lammps()
########################################################################
# NB
if not self.initialized:
self.initialise_lammps(atoms)
else: # Still need to reset cell
# Reset positions so that if they are crazy from last propagation,
# change_box (in set_cell()) won't hang.
# Could do this only after testing for crazy positions?
# Could also use scatter_atoms() to set values (requires MPI comm),
# or extra_atoms() to get pointers to local data structures to zero,
# but then will have to be careful with parallelism
self.lmp.command("set atom * x 0.0 y 0.0 z 0.0")
self.set_cell(atoms, change=True)
if self.parameters.atom_types is None:
raise NameError("atom_types are mandatory.")
do_rebuild = False
do_redo_atom_types = False
try:
do_rebuild = (len(atoms.numbers) != len(self.previous_atoms_numbers)) or ("numbers" in system_changes)
if not do_rebuild:
do_redo_atom_types = (
atoms.numbers != self.previous_atoms_numbers).any()
except Exception:
pass
self.lmp.command('echo none') # don't echo the atom positions
if do_rebuild:
self.rebuild(atoms)
elif do_redo_atom_types:
self.redo_atom_types(atoms)
self.lmp.command('echo log') # switch back log
self.set_lammps_pos(atoms)
if n_steps > 0: # TODO: here are velocities passed onto LAMMPS
if velocity_field is None:
vel = atoms.get_velocities() / unit_convert("velocity",
self.units)
else:
vel = atoms.arrays[velocity_field]
# If necessary, transform the velocities to new coordinate system
if self.coord_transform is not None:
vel = np.dot(self.coord_transform, np.matrix.transpose(vel))
vel = np.matrix.transpose(vel)
# Convert ase velocities matrix to lammps-style velocities array
lmp_velocities = list(vel.ravel())
# Convert that lammps-style array into a C object
lmp_c_velocities =\
(ctypes.c_double * len(lmp_velocities))(*lmp_velocities)
# self.lmp.put_coords(lmp_c_velocities)
self.lmp.scatter_atoms('v', 1, 3, lmp_c_velocities)
# Keep atoms fixed
# # RY: use LAMMPS_init_cmds to set up NVE,
# # e.g. group fixed id <= X; group mobile id > X; fix 1 mobile nve
# keep_atoms_fixed = int(sum([x == 0 for x in lmp_velocities]) / 3)
# if keep_atoms_fixed > 0:
# self.lmp.command("group fixed id <= " + str(keep_atoms_fixed))
# self.lmp.command("group mobile id > " + str(keep_atoms_fixed))
#self.lmp.command("fix freeze fixed setforce 0.0 0.0 0.0")
#if atoms.info["set_wall"]:
# self.lmp.command("fix walls all wall/reflect zlo 0 zhi "
# + str(atoms.cell[2, 2]) + " units box")
# TODO: if we fix forces here, then it should be passed on, just
# pass on keep_atoms_fixed
# TODO: if you have atoms with EXACTLY zero velocities, then freeze
# them
# TODO: keep_atoms_fixed = 0 for potential energy calculations of the
# initial configurations
# Run for 0 time to calculate
if dt is not None:
if dt_not_real_time:
self.lmp.command('timestep %.30f' % dt)
else:
self.lmp.command('timestep %.30f' % ( dt/unit_convert("time", self.units)) )
self.lmp.command('run %d' % n_steps)
if n_steps > 0:
# TODO this must be slower than native copy, but why is it broken?
pos = np.array([x for x in self.lmp.gather_atoms("x",1,3)]).reshape(-1,3)
if self.coord_transform is not None:
pos = np.dot(pos, self.coord_transform)
atoms.set_positions(pos * unit_convert("distance", self.units))
vel = np.array([v for v in self.lmp.gather_atoms("v",1,3)]).reshape(-1,3)
if self.coord_transform is not None:
vel = np.dot(vel, self.coord_transform)
if velocity_field is None:
atoms.set_velocities(vel * unit_convert("velocity", self.units))
if velocity_field is not None:
nreflects = self.lmp.extract_fix('1',0,1,0)
atoms.info['nreflects'] = nreflects
nreversals = self.lmp.extract_fix('1',0,1,1)
atoms.info['nreversals'] = nreversals
# Extract the forces and energy
# if 'energy' in properties:
self.results['energy'] = self.lmp.extract_variable('pe', None, 0) * unit_convert("energy", self.units)
# self.results['energy'] = self.lmp.extract_global('pe', 0)
# if 'stress' in properties:
stress = np.empty(6)
# stress_vars = ['pxx', 'pyy', 'pzz', 'pxy', 'pxz', 'pyz']
stress_vars = ['pxx', 'pyy', 'pzz', 'pyz', 'pxz', 'pxy']
for i, var in enumerate(stress_vars):
stress[i] = self.lmp.extract_variable(var, None, 0)
stress_mat = np.zeros( (3,3) )
stress_mat[0,0] = stress[0]
stress_mat[1,1] = stress[1]
stress_mat[2,2] = stress[2]
stress_mat[1,2] = stress[3]
stress_mat[2,1] = stress[3]
stress_mat[0,2] = stress[4]
stress_mat[2,0] = stress[4]
stress_mat[0,1] = stress[5]
stress_mat[1,0] = stress[5]
if self.coord_transform is not None:
stress_mat = np.dot(self.coord_transform.T, np.dot(stress_mat, self.coord_transform))
stress[0] = stress_mat[0,0]
stress[1] = stress_mat[1,1]
stress[2] = stress_mat[2,2]
stress[3] = stress_mat[1,2]
stress[4] = stress_mat[0,2]
stress[5] = stress_mat[0,1]
self.results['stress'] = stress * (-unit_convert("pressure", self.units))
# if 'forces' in properties:
f = np.zeros((len(atoms), 3)) # TODO: sets forces, doesn't update them
f[:,:] = np.array([x for x in self.lmp.gather_atoms("f",1,3)]).reshape(-1,3)
f *= unit_convert("force", self.units)
if self.coord_transform is not None:
self.results['forces'] = np.dot(f, self.coord_transform)
else:
self.results['forces'] = f.copy()
if not self.parameters.keep_alive:
self.lmp.close()
def lammpsbc(self, pbc, fix):
if pbc:
return 'p'
elif fix:
return 'f'
else:
return 's'
def rebuild(self,atoms):
try:
n_diff = len(atoms.numbers) - len(self.previous_atoms_numbers)
except:
n_diff = len(atoms.numbers)
if n_diff > 0:
if any([("reax/c" in cmd) for cmd in self.parameters.lmpcmds]):
self.lmp.command("pair_style lj/cut 2.5")
self.lmp.command("pair_coeff * * 1 1")
for cmd in self.parameters.lmpcmds:
if ("pair_style" in cmd) or ("pair_coeff" in cmd):
self.lmp.command(cmd)
cmd = "create_atoms 1 random {} 1 NULL".format(n_diff)
self.lmp.command(cmd)
elif n_diff < 0:
cmd = "group delatoms id {}:{}".format(len(atoms.numbers)+1,len(self.previous_atoms_numbers))
self.lmp.command(cmd)
cmd = "delete_atoms group delatoms"
self.lmp.command(cmd)
self.redo_atom_types(atoms)
def redo_atom_types(self,atoms):
if self.parameters.atom_types_equal_atomic_numbers:
current_types = { (i+1,Z) for i,Z in enumerate( atoms.get_atomic_numbers() ) }
else:
current_types = { (i+1,self.parameters.atom_types[Z]) for i,Z in enumerate( atoms.get_atomic_numbers() ) }
try:
if self.parameters.atom_types_equal_atomic_numbers:
previous_types = { (i+1,Z)
for i,Z in enumerate( self.previous_atoms_numbers ) }
else:
previous_types = { (i+1,self.parameters.atom_types[Z])
for i,Z in enumerate( self.previous_atoms_numbers ) }
except:
previous_types = set()
for (i,i_type) in current_types - previous_types:
cmd = "set atom {} type {}".format(i,i_type)
self.lmp.command(cmd)
self.previous_atoms_numbers = atoms.numbers.copy()
def restart_lammps(self, atoms):
if self.started:
self.lmp.command("clear")
# hope there's no other state to be reset
self.started=False
self.initialized=False
self.previous_atoms_numbers = []
self.start_lammps()
self.initialise_lammps(atoms)
def start_lammps(self):
# start lammps process
if self.parameters.log_file is None:
cmd_args = ['-echo', 'log', '-log', 'none', '-screen', 'none', '-nocite']
else:
cmd_args = ['-echo', 'log', '-log', self.parameters.log_file,
'-screen', 'none','-nocite']
self.cmd_args = cmd_args
if not hasattr(self, 'lmp'):
self.lmp = lammps(self.parameters.lammps_name, self.cmd_args, comm=self.parameters.comm)
# Use metal units: Angstrom, ps, and eV
for cmd in self.parameters.lammps_header:
self.lmp.command(cmd)
for cmd in self.parameters.lammps_header:
if "units" in cmd:
self.units = cmd.split()[1]
if hasattr(self.parameters, "lammps_header_extra") and self.parameters.lammps_header_extra is not None:
for cmd in self.parameters.lammps_header_extra:
self.lmp.command(cmd)
self.started=True
def initialise_lammps(self, atoms):
# Initialising commands
if self.parameters.boundary:
# if the boundary command is in the supplied commands use that
# otherwise use atoms pbc
pbc = atoms.get_pbc()
for cmd in self.parameters.lmpcmds:
if 'boundary' in cmd:
break
else:
fix = False
# TODO: RBW – quick fix so that boundary parallel to surface
# is not shrink wrapped
# if "set_wall" in atoms.info.keys():
# fix = True
self.lmp.command('boundary ' + ' '.join([self.lammpsbc(bc, fix)
for bc in pbc]))
# Initialize cell
self.set_cell(atoms, change=not self.parameters.create_box)
if self.parameters.atom_types is None:
raise NameError("atom_types are mandatory.")
if isinstance(self.parameters.atom_types,dict):
# atom_types is a dictionary with symbols (or numbers) as keys
self.parameters.atom_types_equal_atomic_numbers = False
symbol_atom_types = self.parameters.atom_types.copy()
self.parameters.atom_types = {}
for sym in symbol_atom_types:
try:
num = int(sym)
except:
num = symbols2numbers(sym)[0]
self.parameters.atom_types[num] = symbol_atom_types[sym]
else: # not a dict, must be the string TYPE_EQUALS_Z
if self.parameters.atom_types == "TYPE_EQUALS_Z":
self.parameters.atom_types_equal_atomic_numbers = True
self.parameters.atom_types = {}
for Z in atoms.get_atomic_numbers():
self.parameters.atom_types[Z] = Z
else:
raise ValueError('atom_types parameter "%s" is string, but not TYPE_EQUALS_Z' % self.parameters.atom_types)
# Collect chemical symbols
symbols = np.asarray(atoms.get_chemical_symbols())
numbers = np.asarray(atoms.get_atomic_numbers())
# Initialize box
if self.parameters.create_box:
# count number of known types
n_types = len(self.parameters.atom_types)
create_box_command = 'create_box {} cell'.format(n_types)
# count numbers of bonds and angles defined by potential
n_dihedral_types = 0
n_improper_types = 0
n_angle_types = 0
n_bond_types = 0
for cmd in self.parameters.lmpcmds:
m = re.match('\s*angle_coeff\s+(\d+)', cmd)
if m is not None:
n_angle_types = max(int(m.group(1)), n_angle_types)
m = re.match('\s*bond_coeff\s+(\d+)', cmd)
if m is not None:
n_bond_types = max(int(m.group(1)), n_bond_types)
m = re.match('\s*dihedral_coeff\s+(\d+)', cmd)
if m is not None:
n_dihedral_types = max(int(m.group(1)), n_dihedral_types)
m = re.match('\s*improper_coeff\s+(\d+)', cmd)
if m is not None:
n_improper_types = max(int(m.group(1)), n_improper_types)
if self.parameters.read_molecular_info:
if 'bonds' in atoms.arrays:
self.parse_bonds(atoms)
create_box_command += ' bond/types {} extra/bond/per/atom {}'.format(n_bond_types,atoms.max_n_bonds)
if 'angles' in atoms.arrays:
self.parse_angles(atoms)
create_box_command += ' angle/types {} extra/angle/per/atom {}'.format(n_angle_types,atoms.max_n_angles)
if 'dihedrals' in atoms.arrays:
self.parse_dihedrals(atoms)
create_box_command += ' dihedral/types {} extra/dihedral/per/atom {}'.format(n_dihedral_types,atoms.max_n_dihedrals)
if 'impropers' in atoms.arrays:
self.parse_impropers(atoms)
create_box_command += ' improper/types {} extra/improper/per/atom {}'.format(n_improper_types,atoms.max_n_impropers)
self.lmp.command(create_box_command)
# Initialize the atoms with their types
# positions do not matter here
if self.parameters.create_atoms:
self.lmp.command('echo none') # don't echo the atom positions
self.rebuild(atoms)
self.lmp.command('echo log') # turn back on
# execute the user commands
for cmd in self.parameters.lmpcmds:
self.lmp.command(cmd)
# Set masses after user commands, to override EAM provided masses, e.g.
masses = atoms.get_masses()
for Z in self.parameters.atom_types:
in_cur_sys=False
for i in range(len(atoms)):
if numbers[i] == Z:
# convert from amu (ASE) to lammps mass unit)
self.lmp.command('mass %d %.30f' % (self.parameters.atom_types[Z], masses[i] /
unit_convert("mass", self.units) ))
in_cur_sys=True
break
if not in_cur_sys:
self.lmp.command('mass %d %.30f' % (self.parameters.atom_types[Z], 1.0))
# Define force & energy variables for extraction
self.lmp.command('variable pxx equal pxx')
self.lmp.command('variable pyy equal pyy')
self.lmp.command('variable pzz equal pzz')
self.lmp.command('variable pxy equal pxy')
self.lmp.command('variable pxz equal pxz')
self.lmp.command('variable pyz equal pyz')
# I am not sure why we need this next line but LAMMPS will
# raise an error if it is not there. Perhaps it is needed to
# ensure the cell stresses are calculated
self.lmp.command('thermo_style custom pe pxx emol ecoul')
self.lmp.command('variable fx atom fx')
self.lmp.command('variable fy atom fy')
self.lmp.command('variable fz atom fz')
# do we need this if we extract from a global ?
self.lmp.command('variable pe equal pe')
self.lmp.command("neigh_modify delay 0 every 1 check yes")
if self.parameters.read_molecular_info:
# read in bonds if there are bonds from the ase-atoms object if the molecular flag is set
if 'bonds' in atoms.arrays:
self.set_bonds(atoms)
# read in angles if there are angles from the ase-atoms object if the molecular flag is set
if 'angles' in atoms.arrays:
self.set_angles(atoms)
# read in dihedrals if there are dihedrals from the ase-atoms object if the molecular flag is set
if 'dihedrals' in atoms.arrays:
self.set_dihedrals(atoms)
# read in impropers if there are impropers from the ase-atoms object if the molecular flag is set
if 'impropers' in atoms.arrays:
self.set_impropers(atoms)
if self.parameters.read_molecular_info and 'mmcharge' in atoms.arrays:
self.set_charges(atoms)
self.initialized = True
def write_lammps_data(filename, atoms, atom_types, comment=None, cutoff=None,
molecule_ids=None, charges=None, units='metal',
bond_types=None, angle_types=None, dihedral_types=None,
improper_types=None):
if isinstance(filename, basestring):
fh = open(filename, 'w')
else:
fh = filename
if comment is None:
comment = 'lammpslib autogenerated data file'
fh.write(comment.strip() + '\n\n')
fh.write('{0} atoms\n'.format(len(atoms)))
fh.write('{0} atom types\n'.format(len(atom_types)))
if bond_types:
from matscipy.neighbours import neighbour_list
i_list, j_list = neighbour_list('ij', atoms, cutoff)
print('Bonds:')
bonds = []
for bond_type, (Z1, Z2) in enumerate(bond_types):
bond_mask = (atoms.numbers[i_list] == Z1) & (atoms.numbers[j_list] == Z2)
print((Z1, Z2), bond_mask.sum())
for (I, J) in zip(i_list[bond_mask], j_list[bond_mask]):
#NB: LAMMPS uses 1-based indices for bond types and particle indices
bond = (bond_type+1, I+1, J+1)
bonds.append(bond)
print('')
if len(bonds) > 0:
fh.write('{0} bonds\n'.format(len(bonds)))
fh.write('{0} bond types\n'.format(len(bond_types)))
if angle_types:
print('Angles:')
angle_count = { angle : 0 for angle in angle_types }
angles = []
for I in range(len(atoms)):
for J in j_list[i_list == I]:
for K in j_list[i_list == J]:
if J < K:
continue
Zi, Zj, Zk = atoms.numbers[[I, J, K]]
if (Zj, Zi, Zk) in angle_types:
angle = (angle_types.index((Zj, Zi, Zk))+1, J+1, I+1, K+1)
angle_count[(Zj, Zi, Zk)] += 1
angles.append(angle)
for angle in angle_types:
print(angle, angle_count[angle])
print('')
if len(angles) > 0:
fh.write('{0} angles\n'.format(len(angles)))
fh.write('{0} angle types\n'.format(len(angle_types)))
if dihedral_types:
print('Dihedrals:')
dihedral_count = { dihedral : 0 for dihedral in dihedral_types }
dihedrals = []
for I in range(len(atoms)):
for J in j_list[i_list == I]:
for K in j_list[i_list == J]:
for L in j_list[i_list == K]:
Zi, Zj, Zk, Zl = atoms.numbers[[I, J, K, L]]
if (Zi, Zj, Zk, Zl) in dihedral_types:
dihedral = (dihedral_types.index((Zi, Zj, Zk, Zl))+1,
I+1, J+1, K+1, L+1)
dihedral_count[(Zi, Zj, Zk, Zl)] += 1
dihedrals.append(dihedral)
for dihedral in dihedral_types:
print(dihedral, dihedral_count[dihedral])
print('')
if len(dihedrals) > 0:
fh.write('{0} dihedrals\n'.format(len(dihedrals)))
fh.write('{0} dihedral types\n'.format(len(dihedral_types)))
if improper_types:
print('Impropers:')
improper_count = { improper : 0 for improper in improper_types }
impropers = []
for I in range(len(atoms)):
for J in j_list[i_list == I]:
for K in j_list[i_list == J]:
for L in j_list[i_list == K]:
Zi, Zj, Zk, Zl = atoms.numbers[[I, J, K, L]]
if (Zi, Zj, Zk, Zl) in improper_types:
improper = (improper_types.index((Zi, Zj, Zk, Zl))+1,
I+1, J+1, K+1, L+1)
improper_count[(Zi, Zj, Zk, Zl)] += 1
impropers.append(improper)
for improper in improper_types:
print(improper, improper_count[improper])
print('')
if len(impropers) > 0:
fh.write('{0} impropers\n'.format(len(impropers)))
fh.write('{0} improper types\n'.format(len(improper_types)))
fh.write('\n')
cell, coord_transform = convert_cell(atoms.get_cell())
fh.write('{0:16.8e} {1:16.8e} xlo xhi\n'.format(0.0, cell[0, 0]))
fh.write('{0:16.8e} {1:16.8e} ylo yhi\n'.format(0.0, cell[1, 1]))
fh.write('{0:16.8e} {1:16.8e} zlo zhi\n'.format(0.0, cell[2, 2]))
fh.write('{0:16.8e} {1:16.8e} {2:16.8e} xy xz yz\n'.format(cell[0, 1], cell[0, 2], cell[1, 2]))
fh.write('\nMasses\n\n')
sym_mass = {}
masses = atoms.get_masses()
symbols = atoms.get_chemical_symbols()
numbers = atoms.get_atomic_numbers()
for Z in atom_types:
for i in range(len(atoms)):
if numbers[i] == Z:
Z_mass[Z] = masses[i] / unit_convert("mass", units)
break
else:
Z_mass[Z] = atomic_masses[Z] / unit_convert("mass", units)
for (Z, typ) in sorted(atom_types.items(), key=operator.itemgetter(1)):
fh.write('{0} {1}\n'.format(typ, Z_mass[Z]))
fh.write('\nAtoms # full\n\n')
if molecule_ids is None:
molecule_ids = np.zeros(len(atoms), dtype=int)
if charges is None:
charges = atoms.get_initial_charges()
for i, (Z, mol, q, pos) in enumerate(zip(numbers, molecule_ids,
charges, atoms.get_positions())):
typ = atom_types[Z]
fh.write('{0} {1} {2} {3:16.8e} {4:16.8e} {5:16.8e} {6:16.8e}\n'
.format(i+1, mol, typ, q, pos[0], pos[1], pos[2]))
if bond_types and len(bonds) > 0:
fh.write('\nBonds\n\n')
for idx, bond in enumerate(bonds):
fh.write('{0} {1} {2} {3}\n'
.format(*[idx+1] + list(bond)))
if angle_types and len(angles) > 0:
fh.write('\nAngles\n\n')
for idx, angle in enumerate(angles):
fh.write('{0} {1} {2} {3} {4}\n'
.format(*[idx+1] + list(angle)))
if dihedral_types and len(dihedrals) > 0:
fh.write('\nDihedrals\n\n')
for idx, dihedral in enumerate(dihedrals):
fh.write('{0} {1} {2} {3} {4} {5}\n'
.format(*[idx+1] + list(dihedral)))
if improper_types and len(impropers) > 0:
fh.write('\nImpropers\n\n')
for idx, improper in enumerate(impropers):
fh.write('{0} {1} {2} {3} {4} {5}\n'
.format(*[idx+1] + list(improper)))
if isinstance(filename, basestring):
fh.close()
|
libAtoms/pymatnest
|
lammpslib.py
|
lammpslib.py
|
py
| 38,515 |
python
|
en
|
code
| 26 |
github-code
|
6
|
39672826344
|
import protocol
import pytest
class TestBitstream:
@pytest.mark.parametrize(
"s,val",
[
("", []),
("1", [0x10000000]),
("01", [0x01000000]),
("0102", [0x01020000]),
("0102030405", [0x01020304, 0x05000000]),
],
)
def test_from_string(self, s, val):
bs = protocol.Bitstream.from_string(s)
assert bs._data == val
@pytest.mark.parametrize(
"data,num,val",
[
([], 0, 0),
([0xA0000000], 1, 1),
([0xA0000000], 2, 2),
([0x00000001], 32, 1),
([0x00000001, 0x80000000], 33, 3),
],
)
def test_get_bits(self, data, num, val):
bs = protocol.Bitstream(data)
assert bs.get_bits(num) == val
@pytest.mark.parametrize(
"data,num,val",
[
([0x12340000], [2, 2, 4, 8], [0, 1, 2, 0x34]),
],
)
def test_get_bits_multiple(self, data, num, val):
bs = protocol.Bitstream(data)
for i in range(len(num)):
assert bs.get_bits(num[i]) == val[i]
class TestProtocol:
def test_read_packet(self):
bs = protocol.Bitstream.from_string("D2FE28")
p = protocol.Parser(bs)
assert p.read_version() == 6
assert p.read_type() == 4
assert p.read_literal() == (2021, 15)
|
cmatsuoka/aoc
|
2021 - submarine/16 - bitstream protocol/test_protocol.py
|
test_protocol.py
|
py
| 1,376 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21998646716
|
from typing import List
from collections import defaultdict
class Solution:
def countPairs(self, deliciousness: List[int]) -> int:
maxsum = max(deliciousness) * 2
pairs = 0
dd = defaultdict(int)
for i in deliciousness:
s = 1
while s <= maxsum:
count = dd[s - i]
pairs += count
s <<= 1
dd[s - i] += 1
return pairs % 1000000007
|
hangwudy/leetcode
|
1700-1799/1711. 大餐计数.py
|
1711. 大餐计数.py
|
py
| 456 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22083768955
|
def DisappearanceOfIntegers (A, Q, M, t, N):
out=[]
en=N//2
on=N-en
for x in range(Q):
time=t[x]%(2*N)
pos=M[x]
if time==0:
ans=pos
elif time==N:
ans=-1
elif time<N:
if time<=on:
if pos<=time:
ans=2*pos
else:
ans=2*time+pos-time
else:
ans=(time-on)*2+2*pos
elif time>N:
if time-N<=on:
ans=2*pos-1
if time-N<pos:
ans=-1
else:
temp=2*(time-N-on)+1
if pos<=temp:
ans=pos
else:
ans=temp+2*(pos-temp)
if ans>0 and ans<=N:
print(A[ans-1])
else:
print(-1)
return out
# Write your code here
N = int(input())
A = list(map(int, input().split(" ")))
Q = int(input())
t=[]
M=[]
for i in range(Q):
tmp = list(map(int, input().split(" ")))
t.append(tmp[0])
M.append(tmp[1])
out_ = DisappearanceOfIntegers(A, Q, M, t, N)
|
vamshipv/code-repo
|
NOKIA/4.py
|
4.py
|
py
| 1,144 |
python
|
en
|
code
| 0 |
github-code
|
6
|
2375158470
|
class Employee:
def __init__(self, name, age, salary):
self.name = name
self.age = age
self.salary = salary
def __str__(self):
return f"{self.name}, {self.age}, {self.salary}"
class EmployeesManager:
def __init__ (self):
self.employees = []
def add_new_employee(self, name, age, salary):
emp = Employee(name, age, salary)
self.employees.append(emp)
print("\n" , "************************")
print("A new employee has been added.")
print("\n" , "************************")
def list_all_employees(self):
for emp in self.employees:
print(emp , "\n")
if self.employees.__len__() == 0:
print("There are no employees to display. ")
def delete_employee_by_age_range(self, min_age, max_age):
self.min_age = min_age
self.max_age = max_age
for emp in range (len(self.employees)):
x = self.employees[emp]
if x.age in range(min_age, max_age):
del self.employees[emp]
print("\n", "Employee has been deleted." , "\n")
else:
print("There are no employees with this age range.")
def update_salary_given_a_name(self, name, newsalary):
for emp in self.employees:
if emp.name == name:
emp.salary = newsalary
else:
break
def make_choice(self, choice):
if choice == 1:
name = input("Enter employee name: ")
age = int(input("Enter employee age: "))
salary = int(input("Enter employee salary: "))
self.add_new_employee(name, age, salary)
elif choice == 2:
self.list_all_employees()
print("\n")
elif choice == 3:
if self.employees.__len__() == 0:
print("No employees to remove. ")
print("\n")
else:
min_age = int(input("Enter the starting age: "))
max_age = int(input("Enter the ending age: "))
self.delete_employee_by_age_range(min_age, max_age)
elif choice == 4:
while True:
if self.employees.__len__() == 0:
print("There are no employees to update salary. \n")
break
else:
name = input("Enter employee name: ")
if not any(emp.name == name for emp in self.employees):
print("Name doesn't match.")
else:
newsalary = int(input("Enter the new salary: "))
self.update_salary_given_a_name(name, newsalary)
print("\n","The selected employee's salary has been updated.\n")
break
elif choice == 5:
print("End of the program.")
exit()
else:
print("Invalid input. Enter a valid input from 1 to 5! ")
class FrontendManager:
def __init__(self):
self.emp_manager = EmployeesManager()
def print_options(self):
print("Welcome to Employees system. Wish you a nice experience 😁" , "\n")
print("Program options:")
print("1) Add new employee")
print("2) List all employees")
print("3) Delete by age range")
print("4) Update salary given a name")
print("5) End the program")
def get_option(self):
while True:
choice = int(input("Enter your choice (from 1 to 5): "))
if 1 <= choice <= 5:
return choice
else:
print("Invalid input. Enter a choice from 1 to 5! ")
def run(self):
while True:
self.print_options()
choice = self.get_option()
self.emp_manager.make_choice(choice)
if __name__ == "__main__":
FrontendManager().run()
|
mariamelwirish/Employee_System
|
Employees System.py
|
Employees System.py
|
py
| 4,002 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42063972724
|
import mysql.connector
from mysql.connector import Error
# Configuración de la conexión a MySQL
config = {
'user': 'root',
'password': 'Mafer159159',
'host': 'localhost',
}
try:
connection = mysql.connector.connect(**config)
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("CREATE DATABASE IF NOT EXISTS reviews2;")
cursor.execute("USE reviews2;")
# Crear la tabla usuarios
cursor.execute("""
CREATE TABLE IF NOT EXISTS usuarios (
id INT AUTO_INCREMENT PRIMARY KEY,
nombre VARCHAR(100) NOT NULL,
correo VARCHAR(100) UNIQUE NOT NULL,
contraseña VARCHAR(100) NOT NULL
);""")
# Crear la tabla posts
cursor.execute("""
CREATE TABLE IF NOT EXISTS posts (
id INT AUTO_INCREMENT PRIMARY KEY,
titulo VARCHAR(255) NOT NULL,
contenido TEXT NOT NULL,
usuario_id INT,
FOREIGN KEY (usuario_id) REFERENCES usuarios(id)
);""")
print("Base de datos y tablas creadas con éxito!")
except Error as e:
print("ERROR", e)
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("Conexión a MySQL cerrada")
|
maferfarfan21/Juegos
|
flask/crud/main.py
|
main.py
|
py
| 1,308 |
python
|
es
|
code
| 0 |
github-code
|
6
|
20844744315
|
import os
import random
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
class Images(Dataset):
def __init__(self, folder, size, is_training, downsample=False, preload=False):
"""
I assume that all images in the
folder have size at least `size`.
Arguments:
folder: a string, the path to a folder with images.
size: an integer.
is_training: a boolean.
downsample: a boolean.
preload: a boolean.
"""
self.names = os.listdir(folder)
self.folder = folder
self.downsample = downsample
self.preload = preload
self.size = size
if is_training:
self.transform = transforms.Compose([
transforms.RandomCrop(size),
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
])
else:
self.transform = transforms.Compose([
transforms.CenterCrop(size),
transforms.ToTensor()
])
if preload:
# load all images into the memory
self.images = []
for i in range(len(self)):
image = self.load(i)
self.images.append(image)
def __len__(self):
return len(self.names)
def __getitem__(self, i):
"""
Returns:
a float tensor with shape [3, size, size].
It represents a RGB image with
pixel values in [0, 1] range.
"""
image = self.images[i] if self.preload else self.load(i)
if self.downsample:
r = random.choice([1, 2, 3])
w, h = image.size
w, h = w // r, h // r
if r > 1 and w >= self.size and h >= self.size:
image = image.resize((w, h), Image.LANCZOS)
return self.transform(image)
def load(self, i):
name = self.names[i]
path = os.path.join(self.folder, name)
image = Image.open(path).convert('RGB')
return image
|
TropComplique/SRFeat-pytorch
|
input_pipeline.py
|
input_pipeline.py
|
py
| 2,160 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23308286546
|
import stanza
from headline_retriever import load_articles, collect_articles, save_articles
from textblob import TextBlob
from datetime import date
NLP = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos,lemma,ner')
END_DATE = date(2020, 3, 27) # the chosen last day to retrieve article headlines
# in place modification of a list of article dictionaries
def pre_process(articles):
for article in articles:
headline = article['headline']
sentiment = TextBlob(headline).sentiment
# print("sentiment:", sentiment)
sentiment_polarity = sentiment[0] # range from -1 to 1. -1 being the most negative, 1 being the most positive
sentiment_subjectivity = sentiment[1] # range from 0 to 1. 0 being factual, 1 being an opinion
processed_headline = NLP(headline)
words = []
lemmas = []
pos = []
entities = processed_headline.entities
entity_dicts = []
for entity in entities:
entity_dict = dict()
entity_dict['text'] = entity.text
entity_dict['type'] = entity.type
entity_dict['start_char'] = entity.start_char
entity_dict['end_char'] = entity.end_char
entity_dicts.append(entity_dict)
for sentence in processed_headline.sentences:
for word in sentence.words:
words.append(word.text)
lemmas.append(word.lemma)
pos.append(word.pos)
article['sentiment_polarity'] = sentiment_polarity
article['sentiment_subjectivity'] = sentiment_subjectivity
article['words'] = words
article['lemmas'] = lemmas
article['pos'] = pos
article['entities'] = entity_dicts
def average_sentiments(preprocessed_articles):
if len(preprocessed_articles) < 1:
print("avg polarity:", 0)
print("avg subjectivity:", 0)
return
total_subjectivity = 0
total_polarity = 0
for article in preprocessed_articles:
total_polarity += article['sentiment_polarity']
total_subjectivity += article['sentiment_subjectivity']
print("avg polarity:", total_polarity/len(preprocessed_articles))
print("avg subjectivity:", total_subjectivity/len(preprocessed_articles))
def average_words_per_headline(preprocessed_articles):
total_words = 0
for article in preprocessed_articles:
total_words += len(article['headline'].split())
print("avg words:", total_words/len(preprocessed_articles))
if __name__ == "__main__":
pass
#Example commands contained below:
# attempt to load in the article data if it exists
# fox_articles = load_articles("foxnews_headlines")
# msnbc_articles = load_articles("msnbc_headlines")
# pre_process(fox_articles)
# pre_process(msnbc_articles)
# save the retrieved article data
# save_articles("foxnews_pre_processed", fox_articles)
# save_articles("msnbc_pre_processed", msnbc_articles)
|
NoahBlume/nlp_project
|
pre_processor.py
|
pre_processor.py
|
py
| 2,997 |
python
|
en
|
code
| 0 |
github-code
|
6
|
9803009738
|
from __future__ import division
from pydub.utils import make_chunks
import re
import sys
from google.cloud import speech
import pyaudio
from six.moves import queue
from io import BytesIO
from pydub import AudioSegment
from multiprocessing import Process
# You can choose voices from https://cloud.google.com/text-to-speech/docs/voices
voice_choice = "en-US-Wavenet-F"
output_lang_code = re.search("[a-z]{2,3}-[A-Z]{2}", voice_choice).group()
# Audio recording parameters
RATE = 16000
CHUNK = int(RATE / 10) # 100ms
# You can use PyAudio to find the right index for the device you'd like to use
output_device_index = 11
def synthesize_text(text):
"""Synthesizes speech from the input file of text."""
from google.cloud import texttospeech
client = texttospeech.TextToSpeechClient()
input_text = texttospeech.SynthesisInput(text=text)
# Note: the voice can also be specified by name.
# Names of voices can be retrieved with client.list_voices().
voice = texttospeech.VoiceSelectionParams(
language_code=output_lang_code, name=voice_choice
)
audio_config = texttospeech.AudioConfig(
audio_encoding=texttospeech.AudioEncoding.MP3
)
response = client.synthesize_speech(
request={"input": input_text, "voice": voice, "audio_config": audio_config}
)
# The response's audio_content is binary.
fp = BytesIO()
fp.write(response.audio_content)
fp.seek(0)
song = AudioSegment.from_file(fp, format="mp3")
play_from_device(song, output_device_index)
class MicrophoneStream(object):
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, rate, chunk):
self._rate = rate
self._chunk = chunk
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
format=pyaudio.paInt16,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
yield b"".join(data)
# modified from pydub's _play_with_pyaudio
def play_from_device(seg, device_index):
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(seg.sample_width),
channels=seg.channels,
rate=seg.frame_rate,
output=True,
output_device_index=device_index)
# Just in case there were any exceptions/interrupts, we release the resource
# So as not to raise OSError: Device Unavailable should play() be used again
try:
# break audio into half-second chunks (to allows keyboard interrupts)
for chunk in make_chunks(seg, 500):
stream.write(chunk._data)
finally:
stream.stop_stream()
stream.close()
p.terminate()
def listen_print_loop(responses):
"""Iterates through server responses and prints them.
The responses passed is a generator that will block until a response
is provided by the server.
Each response may contain multiple results, and each result may contain
multiple alternatives; for details, see https://goo.gl/tjCPAU. Here we
print only the transcription for the top alternative of the top result.
In this case, responses are provided for interim results as well. If the
response is an interim one, print a line feed at the end of it, to allow
the next result to overwrite it, until the response is a final one. For the
final one, print a newline to preserve the finalized transcription.
"""
num_chars_printed = 0
for response in responses:
if not response.results:
continue
# The `results` list is consecutive. For streaming, we only care about
# the first result being considered, since once it's `is_final`, it
# moves on to considering the next utterance.
result = response.results[0]
if not result.alternatives:
continue
# Display the transcription of the top alternative.
transcript = result.alternatives[0].transcript
# Display interim results, but with a carriage return at the end of the
# line, so subsequent lines will overwrite them.
#
# If the previous result was longer than this one, we need to print
# some extra spaces to overwrite the previous result
overwrite_chars = " " * (num_chars_printed - len(transcript))
if not result.is_final:
sys.stdout.write(transcript + overwrite_chars + "\r")
sys.stdout.flush()
num_chars_printed = len(transcript)
else:
print(transcript + overwrite_chars)
p2 = Process(target=synthesize_text(transcript + overwrite_chars))
p2.start()
num_chars_printed = 0
def main():
# See http://g.co/cloud/speech/docs/languages
# for a list of supported languages.
input_lang_code = "en-US" # a BCP-47 language tag
client = speech.SpeechClient()
config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code=input_lang_code,
)
streaming_config = speech.StreamingRecognitionConfig(
config=config, interim_results=True
)
with MicrophoneStream(RATE, CHUNK) as stream:
audio_generator = stream.generator()
requests = (
speech.StreamingRecognizeRequest(audio_content=content)
for content in audio_generator
)
responses = client.streaming_recognize(streaming_config, requests)
# Now, put the transcription responses to use.
listen_print_loop(responses)
if __name__ == "__main__":
main()
|
EHowardHill/speak-easy
|
basic-runtime.py
|
basic-runtime.py
|
py
| 7,631 |
python
|
en
|
code
| 0 |
github-code
|
6
|
31406415884
|
'''
Emily Lee
SoftDev1 pd6
K#25 -- Getting More REST
2018-11-15
'''
import json
import urllib.request
from flask import Flask,render_template
app=Flask(__name__)
@app.route("/")
def Hello_world():
url_stub="http://www.asterank.com/api/skymorph/search?target="
target="J99TS7A"
req=urllib.request.urlopen(url_stub+target)
fin=json.loads(req.read())
key=fin["results"][0]["key"]
url_stub="http://www.asterank.com/api/skymorph/image?key="
return render_template("index.html",
url=url_stub+key)
if __name__=="__main__":
app.debug=True
app.run()
|
ecrystale/leeE
|
25_rest/app.py
|
app.py
|
py
| 628 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17915780851
|
import smtplib
import re
import sys
import getpass
import random
import math
import time
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
import socket
socket.setdefaulttimeout(10)
"""
created by sayansree paria
verson<1.0>
"""
userId=[]
password=[]
print("\n\n\n\temail spammer v1.0\n\tdevlovper sayansree paria\n\tinitiating dummy Ids import.... ")
path=os.path.dirname(os.path.realpath(__file__))
try:
file=open(path+'\\Attribute.dat','r')
lines=file.readlines()
for i in lines:
userId.append(i.split('|')[0])
password.append(i.split('|')[1])
del lines
except FileNotFoundError:
print ("please define attributes.dat")
#raise
sys.exit(0)
except:
print("unexpected fatal error encountered while accessing Attributes\nmake sure code has access permition")
#raise
sys.exit(0)
else:
print ("\tdummy IDs successfully imported")
finally:
file.close()
def check(email):
if not re.match(r"[\w\.-_]+@[\w]+\.com",email):
print('\tinvalid email format')
#raise TypeError('userids not in format')
sys.exit(0)
print('\tprechecking ID format...\n')
if(len(userId)==0):
print('\nno IDs detected\nplease redefine Attribute file')
#raise TypeError('userids not in format')
sys.exit(0)
for i in userId:
check(i)
print(i+'\tvalid')
print('\tprecheck succesful')
print('\n\t{num} dummies will be used '.format(num=len(userId)))
print('\tInitiating authentication process\n\tthis may take several minutes')
module=[]
for i in range(len(userId)):
try:
server =smtplib.SMTP('smtp.gmail.com',587)
except :
print('\ncheck your internet connection')
sys.exit(0)
else:
print('connection established\t\t' + userId[i])
try:
server.starttls()
server.login(userId[i],password[i])
module.append(server)
del server
except smtplib.SMTPConnectError:
print('\nconnection error')
server.quit()
except smtplib.SMTPAuthenticationError:
print('\nauthentication failed'+userId[i]+'*'*5+password[i][-3:])
server.quit()
except:
print('\nunexpected error')
server.quit()
raise
else:
print('succesfully authinticated\t\t'+userId[i])
##needs sighting
target=input('enter target username:\t')
print('\t checking email')
check(target)
print(target+'\tvalid')
itr=input('enter no of attacks:\t')
print('\timporting payload')
payload=[]
try:
file=open(path+'\\payload.txt','r')
lines=file.readlines()
for i in lines:
payload.append(i)
del lines
except FileNotFoundError:
print ("please define payload.txt")
sys.exit(0)
except:
print("unexpected fatal error encountered while accessing payload\nmake sure code has access permition")
sys.exit(0)
else:
print ("\tpayload successfully imported")
finally:
file.close()
tim=3.5*int(itr)
print('\tinitiating payload injection\n\t expected time {0}min,{1}sec'.format(tim%60,tim//60))
sublist=payload[0].split('|')
for i in range(int(itr)):
rand=math.floor(random.random()*len(userId))
msg= MIMEMultipart()
msg['From'] = userId[rand]
msg['To']= target
msg['Subject']= sublist[math.floor(random.random()*len(sublist))]
body= payload[ 1+math.floor(random.random()*(len(payload)-1))]
msg.attach(MIMEText(body,'html'))
module[rand].sendmail(msg['From'],msg['To'],msg.as_string())
del msg
print('payload <{0}> using {1} successful '.format(i+1,userId[rand]))
time.sleep(2+random.random()*3)# some improvement required
print('\t terminating server connections')
for i in module:
i.quit()
print('\t payload successful\n\t devloper sayansree paria\n\t build v<1.0>')
|
Sayansree/email_spammer
|
spammer1.0.py
|
spammer1.0.py
|
py
| 3,987 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4964092483
|
import math
def gap(g, m, n):
b_prime = 0
for i in range(m,n):
if isPrime(i):
if(i - b_prime) == g:
return [b_prime, i]
b_prime = i
return None
def isPrime(n):
for i in range(2,int(math.sqrt(n)+1)):
if n % i == 0:
return False
return True
|
bluecloud1102/codility
|
code/gap_in_primes/gap_in_primes.py
|
gap_in_primes.py
|
py
| 328 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44673837506
|
from ListasRefNodoFinalAbstract import ListaRefNodoFinalAbstract
from Nodo import Nodo
class ListaRefNodoFinal(ListaRefNodoFinalAbstract):
def __init__(self):
self.inicio= None
self.fin = None
self.cuantos=0
#Metodo que egresa true si la lista esta vacia y false en otro caso.
def esta_vacia(self):
if self.cuantos==0:
return True
else:
return False
#Método para eliminar todos los elementos de una lista, no regresa nada
def vaciar(self):
self.inicio=None
self.cuantos=0
# Método que regresa la cantidad de elementos que tiene una lista, debe de ser de complejidad O(1)
def tamanio(self,elemento):
cont=0
if self.esta_vacia():
return False
else:
pos=self.inicio
while pos != None:
if pos ==elemento:
cont +=1
return cont
# Método para agregar un elemento al inicio de la lista, no regresa nada
def agregar_inicio(self,elemento):
nuevo=Nodo(elemento)
nuevo.siguiente=self.inicio
return nuevo
#Metodo para agregar un elemento al final de la lista, no regresa nada
def agregar_final(self,elemento):
nuevo=Nodo(elemento)
self.fin.siguiente=nuevo
self.fin=nuevo
self.cuantos +=1
# Método para determinar si un elemento pertenece a la lista, regresa true si el elemento está en la lista y false en otro caso
def contiene (self,elemento):
if self.esta_vacia():
return False
else:
posicion=self.incio
while posicion != None:
if posicion==elemento:
return True
return False
#Método que regresa el primer elemento inicial de la lista
def primer_elemento(self):
return self.inicio
#Método que devuelve el último elemento de la lista
def ultimo_elemento(self,elemento):
temp=elemento
while temp.siguiente:
temp=temp.siguiente
return self.fin
#Método para eliminar el último elemento de la lista, no regresa nada
def eliminar_primero(self,elemento):
if self.esta_vacia():
print("es vacio")
else:
pos=self.fin
while pos.siguiente:
pos=pos.siguiente
pos.siguiente=None
self.inicio=pos
# Método para eliminar el último elemento de la lista, no regresa nada
def eliminar_ultimo (self,elemento):
if self.esta_vacia():
print("es vacio")
else:
pos=self.inicio
while pos.siguiente:
pos=pos.siguiente
pos.siguiente=None
self.fin=pos
#Método para sustituir un elemento
def sustituir(self,actual,nuevo):
posicion = self.inicio
while (posicion != None):
if posicion.elemento == actual:
posicion.elemento = nuevo
posicion = posicion.siguiente
#Método para imprimir a los elementos en las posiciones impares que se encuentran en la lista de inicio a fin, no regresa nada
def imprimir(self):
it= self.iterador()
print("imprimir...")
while (it.has_next()):
print(it.next())
#Método que devuelve un iterador sobre la lista
def iterador(self):
return _Iterador(self.inicio)
#Clase _Iterador
class _Iterador:
def __init__(self,inicio):
self.posicion = inicio
def has_next(self):
return self.posicion!= None
def next(self):
if self.posicion == None:
raise NameError("Se llegó al final de la lista")
elemento = self.posicion.elemento
self.posicion = self.posicion.siguiente
self.posicion = self.posicion.siguiente
return elemento
|
MichelleeD/Tarea-3-DBM
|
ListasRefNodoFinal.py
|
ListasRefNodoFinal.py
|
py
| 4,135 |
python
|
es
|
code
| 0 |
github-code
|
6
|
37756152880
|
i=int(input("Enter the number:"))
a=i
sum=0
while i>0:
sum=sum+(i%10)*(i%10)*(i%10)
i=i//10
if a==sum:
print(a,"is armstrong number")
else:
print(a,"is not armstrong number")
# 0,1,153,370,371,407
|
Bhavana-thakare/loop
|
Armstrong numbers.py
|
Armstrong numbers.py
|
py
| 222 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42420664031
|
import requests, json
url = 'https://sandbox.techops.engineering/Demand/v1/Surveys/BySurveyNumber/4592039'
params = ""
headers = {'Content-type': 'application/json', 'Authorization' : 'YOUR_API_KEY_HERE', 'Accept': 'text/plain'}
response = requests.get(url, data=params, headers=headers)
print(response.content.decode())
file = open("survey_get.json","w")
file.write(response.text)
file.close()
|
ajay-ar30/lucidcodesproject
|
week5/survey_get.py
|
survey_get.py
|
py
| 398 |
python
|
en
|
code
| 0 |
github-code
|
6
|
74553900026
|
# Exam 1 Practice
#6/11/2017
#In this program I will write a program that helps calculate
#the shipping charges for a shipping company
#First, Define the main function and greet the user
def main():
print('Hello, this is the Fast Freight Shipping Company')
print('This program will help you calculate the cost of shipping')
#Second, gather the input of the weight of the package
weight = float(input('Please enter your package\'s weight in pounds: '))
#Third, caculate the rate for the package
if weight > 10:
rate = 4.75
elif weight < 10 and weight > 6:
rate = 4.00
elif weight < 6 and weight > 2:
rate = 3.00
else:
rate = 1.50
#Fourth, calculate the cost of the package
cost = weight * rate
#Lastly, print the results to the user and call the main function
print('Your shipping costs will be: $',cost)
main()
|
BijanJohn/Python
|
ACC/Exam_1_review_shipping_charges.py
|
Exam_1_review_shipping_charges.py
|
py
| 894 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14468056163
|
'''
A binary tree is given such that each node contains an additional random pointer which could point to any node in the tree or null.
Return a deep copy of the tree.
The tree is represented in the same input/output way as normal binary trees where each node is represented as a pair of [val, random_index] where:
val: an integer representing Node.val
random_index: the index of the node (in the input) where the random pointer points to, or null if it does not point to any node.
You will be given the tree in class Node and you should return the cloned tree in class NodeCopy. NodeCopy class is just a clone of Node class with the same attributes and constructors.
Example 1:
Input: root = [[1,null],null,[4,3],[7,0]]
Output: [[1,null],null,[4,3],[7,0]]
Explanation: The original binary tree is [1,null,4,7].
The random pointer of node one is null, so it is represented as [1, null].
The random pointer of node 4 is node 7, so it is represented as [4, 3] where 3 is the index of node 7 in the array representing the tree.
The random pointer of node 7 is node 1, so it is represented as [7, 0] where 0 is the index of node 1 in the array representing the tree.
Example 2:
Input: root = [[1,4],null,[1,0],null,[1,5],[1,5]]
Output: [[1,4],null,[1,0],null,[1,5],[1,5]]
Explanation: The random pointer of a node can be the node itself.
Example 3:
Input: root = [[1,6],[2,5],[3,4],[4,3],[5,2],[6,1],[7,0]]
Output: [[1,6],[2,5],[3,4],[4,3],[5,2],[6,1],[7,0]]
Constraints:
The number of nodes in the tree is in the range [0, 1000].
1 <= Node.val <= 106
'''
# Definition for Node.
# class Node:
# def __init__(self, val=0, left=None, right=None, random=None):
# self.val = val
# self.left = left
# self.right = right
# self.random = random
class Solution:
def __init__(self):
self.seen: dict = {None: None}
def bfs(self, root: 'Optional[Node]') -> 'Optional[NodeCopy]':
if not root:
return None
pending = deque()
pending.append(root)
self.seen[root] = NodeCopy(root.val)
while pending:
old_node = pending.popleft()
new_node = self.seen[old_node]
if old_node.left:
if not old_node.left in self.seen:
pending.append(old_node.left)
self.seen[old_node.left] = NodeCopy(old_node.left.val)
new_node.left = self.seen[old_node.left]
if old_node.right:
if not old_node.right in self.seen:
pending.append(old_node.right)
self.seen[old_node.right] = NodeCopy(old_node.right.val)
new_node.right = self.seen[old_node.right]
if old_node.random:
if not old_node.random in self.seen:
pending.append(old_node.random)
self.seen[old_node.random] = NodeCopy(old_node.random.val)
new_node.random = self.seen[old_node.random]
return self.seen[root]
def copyRandomBinaryTree(self, root: 'Optional[Node]') -> 'Optional[NodeCopy]':
new_root = self.bfs(root)
return new_root
|
loganyu/leetcode
|
problems/1485_clone_binary_tree_with_random_pointer.py
|
1485_clone_binary_tree_with_random_pointer.py
|
py
| 3,196 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25945686355
|
#!/usr/bin/env python
# coding=utf-8
# author = ruiruige
# email = [email protected]
import web
from jx3wj.common.rest.rest_base import resources
from jx3wj.common.rest.dto.dto import deco_dump_to_str
from jx3wj.common.log import log as logging
from jx3wj.common.db.crud import select
from jx3wj.common.db.do.item import Item
from jx3wj.common.db.do.base_do import Base_do
from jx3wj.common.utils.web_utils import not_found_utils
from jx3wj.common.utils.web_utils import redirect_utils
from jx3wj.common.rest.response_control import assemble_response
from jx3wj.mgmt.items import items_view
LOG = logging.getLogger(__name__)
# 这两段放在前面以便被引用到
# 处理api相关的url
api_urls = (
"/items", "items"
)
# api相关的子应用
api_app = web.application(api_urls, locals())
# url入口点
urls = (
# 要按顺序,否则"/api/items"这种请求,就走"/"不走"/api"了
"/api", api_app,
"(.*)", not_found_utils.not_found_app,
"/", items_view.app,
"", redirect_utils.add_backslash_app,
)
class reitems(object):
def GET(self):
raise web.seeother('/')
class items(resources):
@assemble_response
@Base_do.deco_sqlalchemy_obj_to_dict
@resources.manage_rest_api()
def GET(self):
rst = select(cls=Item)
return rst
def before_response(self, session=None):
"""Do some preparations before response to the REST request.
inherited from super, This function is run before the doGet doPost etc is run.
:see: super.before_response
:raises: None
"""
cls_name = self.__class__.__name__
# 类的初始化顺序遵循MRO(Method Resolution Order),即方法解析序列
super(items, self).__init__()
LOG.debug("running before_response of class : %s" % cls_name)
# 入口点应用
app = web.application(urls, locals())
|
ruiruige/myifttt
|
myifttt/mgmt/items/items.py
|
items.py
|
py
| 1,893 |
python
|
en
|
code
| 1 |
github-code
|
6
|
5834659983
|
from rick.filter import registry as filter_registry, Filter
import inspect
TYPE_FIELD = 1
TYPE_RECORD = 2
TYPE_RECORDSET = 3
def field(**kwargs):
"""
Spec wrapper for Field
:param type: str field type
:param label: str field label
:param value: optional predefined value
:param required: bool required
:param readonly: bool readonly
:param validators: string|dict validators
:param error: optional custom error message
:param select: optional select value list
:param filter: optional filter
:param attributes: dict optional attributes
:param options: dict extra options
:return: dict
"""
kwargs["cls"] = Field
kwargs["_type"] = TYPE_FIELD
return kwargs
def record(cls, required=False, error=None):
"""
Spec wrapper for a record
:param cls: record class
:param required: if the field is mandatory
:return: dict
"""
return {
"_type": TYPE_RECORD,
"cls": cls,
"validators": "required|dict" if required else "dict",
"error": error,
}
def recordset(cls, required=False, error=None):
"""
Spec wrapper for a list of records
:param cls: record class
:param required: if the field is mandatory
:return: dict
"""
return {
"_type": TYPE_RECORDSET,
"cls": cls,
"validators": "required|list" if required else "list",
"error": error,
}
class Field:
def __init__(
self,
type="",
label="",
value=None,
required=False,
readonly=False,
validators="",
error=None,
select: list = None,
filter=None,
attributes: dict = None,
options: dict = None,
bind: str = None,
):
"""
Field Constructor
:param type: str field type
:param label: str field label
:param value: optional predefined value
:param required: bool required
:param readonly: bool readonly
:param validators: string|dict validators
:param error: optional custom error message
:param select: optional select value list
:param filter: optional filter
:param attributes: dict optional attributes
:param options: dict extra options
:param bind: optional bind name for data elements
"""
if select is None:
select = []
if attributes is None:
attributes = {}
if options is None:
options = {}
# Field attributes
self.type = type
self.label = label
self.value = value
self.required = required
self.readonly = readonly
self.validators = validators
self.error_message = error
self.select = select
self.filter = filter
self.attributes = attributes
self.options = options
self.bind = bind
# pass direct read-only mapping to options
if self.readonly:
self.options["readonly"] = True
# pass direct options read-only to main scope
if "readonly" in self.options.keys():
self.readonly = self.options["readonly"]
# fetch/build filter object if any
if self.filter is not None:
if isinstance(self.filter, str):
# self.filter has a filter name, use it to fetch the object
if not filter_registry.has(self.filter):
raise ValueError("Invalid filter name '{}'".format(self.filter))
self.filter = filter_registry.get(self.filter)
elif inspect.isclass(self.filter):
# build object
self.filter = self.filter()
if not isinstance(self.filter, Filter):
raise ValueError("Field filter must be either a string or a class")
if self.required:
# add required validator
if len(self.validators) == 0:
self.validators = {"required": None}
else:
if isinstance(self.validators, str):
self.validators = "required|" + self.validators
elif isinstance(self.validators, dict):
self.validators["required"] = None
|
oddbit-project/rick
|
rick/form/field.py
|
field.py
|
py
| 4,282 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5086805014
|
DOCUMENTATION = '''
---
module: cisco_asa_network_objectgroup
author: Patrick Ogenstad (@networklore)
version: 1.0
short_description: Creates deletes or edits network object-groups.
description:
- Configures network object-groups
requirements:
- rasa
options:
category:
description:
- The type of object you are creating. Use slash notation for networks, i.e. 192.168.0.0/24. Use - for ranges, i.e. 192.168.0.1-192.168.0.10.
choices: [ 'ipv4_address', 'ipv6_address', 'ipv4_subnet', 'ipv6_subnet', 'ipv4_range', 'ipv6_range', 'ipv4_fqdn', 'ipv6_fqdn', 'object', 'object_group' ]
required: false
description:
description:
- Description of the object
required: false
entry_state:
description:
- State of the entire object-group
choices: [ 'present', 'absent' ]
required: false
host:
description:
- Typically set to {{ inventory_hostname }}
required: true
members:
description:
- NOT YET IMPLEMENTED Variable containing all the objects within the network object-group
required: false
name:
description:
- Name of the network object
required: true
password:
description:
- Password for the device
required: true
state:
description:
- State of the entire object-group
choices: [ 'present', 'absent' ]
required: true
username:
description:
- Username for device
required: true
validate_certs:
description:
- If no, SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
choices: [ 'no', 'yes']
default: 'yes'
required: false
value:
description:
- The data to enter into the network object
required: false
'''
EXAMPLES = '''
# Create a network object for a web server
- cisco_asa_network_object:
host={{ inventory_hostname }}
username=api_user
password=APIpass123
name=tsrv-web-1
state=present
category=IPv4Address
description='Test web server'
value='10.12.30.10'
validate_certs=no
# Remove test webserver
- cisco_asa_network_object:
host={{ inventory_hostname }}
username=api_user
password=APIpass123
name=tsrv-web-2
state=absent
validate_certs=no
'''
import sys
from ansible.module_utils.basic import *
from collections import defaultdict
try:
from rasa import ASA
has_rasa = True
except:
has_rasa = False
object_kind = {
'ipv4_address': 'IPv4Address',
'ipv6_address': 'IPv6Address',
'ipv4_subnet': 'IPv4Network',
'ipv6_subnet': 'IPv6Network',
'ipv4_range': 'IPv4Range',
'ipv6_range': 'IPv6Range',
'ipv4_fqdn': 'IPv4FQDN',
'ipv6_fqdn': 'IPv6FQDN',
'object': 'objectRef#NetworkObj',
'object_group': 'objectRef#NetworkObjGroup'
}
object_kind_type = {
'ipv4_address': 'value',
'ipv6_address': 'value',
'ipv4_subnet': 'value',
'ipv6_subnet': 'value',
'ipv4_range': 'value',
'ipv6_range': 'value',
'object': 'objectId',
'object_group': 'objectId',
}
def add_object(dev, module, net_object, member_data):
try:
result = dev.add_member_networkobjectgroup(net_object,[member_data])
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code != 204:
module.fail_json(msg='Unable to add object - %s' % result.status_code)
return True
def create_object(dev, module, desired_data):
try:
result = dev.create_networkobjectgroup(desired_data)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 201:
return_status = True
else:
module.fail_json(msg='Unable to create object - %s' % result.status_code)
return return_status
def delete_object(dev, module, name):
try:
result = dev.delete_networkobjectgroup(name)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 204:
return_status = True
else:
module.fail_json(msg='Unable to delete object - %s' % result.status_code)
return return_status
def find_member(current_data, desired_data, module):
member_exists = False
for member in current_data['members']:
if member == desired_data:
member_exists = True
return member_exists
def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(required=True),
username=dict(required=True),
password=dict(required=True),
members=dict(required=False),
name=dict(required=True),
entry_state=dict(required=False, choices=['absent', 'present']),
description=dict(required=False),
state=dict(required=True, choices=['absent', 'present']),
category=dict(required=False, choices=[ 'ipv4_address', 'ipv6_address', 'ipv4_subnet', 'ipv6_subnet', 'ipv4_range', 'ipv6_range', 'ipv4_fqdn', 'ipv6_fqdn', 'object', 'object_group' ]),
validate_certs=dict(required=False, choices=['no', 'yes'], default='yes'),
value=dict(required=False)
),
required_together = (
['category','entry_state','value'],
),
mutually_exclusive=(['category', 'members'],),
supports_check_mode=False)
m_args = module.params
if not has_rasa:
module.fail_json(msg='Missing required rasa module (check docs)')
if m_args['validate_certs'] == 'yes':
validate_certs = True
else:
validate_certs = False
dev = ASA(
device=m_args['host'],
username=m_args['username'],
password=m_args['password'],
verify_cert=validate_certs
)
desired_data = {}
desired_data['name'] = m_args['name']
if m_args['description']:
desired_data['description'] = m_args['description']
member_data = {}
if m_args['entry_state']:
member_data['kind'] = object_kind[m_args['category']]
kind_type = object_kind_type[m_args['category']]
member_data[kind_type] = m_args['value']
if kind_type == 'objectId':
if m_args['category'] == 'object_group':
ref_link = 'https://%s/api/objects/networkobjectgroups/%s' % (m_args['host'], m_args['value'])
else:
ref_link = 'https://%s/api/objects/networkobjects/%s' % (m_args['host'], m_args['value'])
member_data['refLink'] = ref_link
desired_data['members'] = [member_data]
if m_args['members']:
pass
try:
data = dev.get_networkobjectgroup(m_args['name'])
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if data.status_code == 200:
if m_args['state'] == 'absent':
changed_status = delete_object(dev, module, m_args['name'])
elif m_args['state'] == 'present' and m_args['entry_state']:
change_description = False
if m_args['description']:
current_data = data.json()
try:
if m_args['description'] == current_data['description']:
change_description = False
else:
change_description = True
except:
change_description = True
found = find_member(data.json(), member_data, module)
if found and m_args['entry_state'] == 'present':
changed_status = False
elif found and m_args['entry_state'] == 'absent':
changed_status = remove_object(dev, module, m_args['name'], member_data)
elif m_args['entry_state'] == 'present':
changed_status = add_object(dev, module, m_args['name'], member_data)
elif m_args['entry_state'] == 'absent':
changed_status = False
if change_description:
changed_status = modify_description(dev, module, m_args['name'],m_args['description'])
elif m_args['state'] == 'present' and m_args['members']:
module.fail_json(msg='This feature is eagerly awaiting to be developed')
else:
#Remove after members are implemented
module.fail_json(msg='Unknown error check arguments')
elif data.status_code == 401:
module.fail_json(msg='Authentication error')
elif data.status_code == 404:
if m_args['state'] == 'absent':
changed_status = False
elif m_args['state'] == 'present':
changed_status = create_object(dev, module, desired_data)
else:
module.fail_json(msg="Unsupported return code %s" % data.status_code)
return_msg = {}
return_msg['changed'] = changed_status
module.exit_json(**return_msg)
def modify_description(dev, module, net_object, description):
data = {}
data['description'] = description
try:
result = dev.update_networkobjectgroup(net_object, data)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code != 204:
module.fail_json(msg='Unable to change description - %s' % result.status_code)
return True
def remove_object(dev, module, net_object, member_data):
try:
result = dev.remove_member_networkobjectgroup(net_object,[member_data])
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code != 204:
module.fail_json(msg='Unable to remove object - %s' % result.status_code)
return True
def update_object(dev, module, desired_data):
try:
result = dev.update_networkobject(desired_data['name'], desired_data)
except:
err = sys.exc_info()[0]
module.fail_json(msg='Unable to connect to device: %s' % err)
if result.status_code == 204:
return_status = { 'changed': True }
else:
module.fail_json(msg='Unable to update object code: - %s' % result.status_code)
return return_status
main()
|
networklore/ansible-cisco-asa
|
library/cisco_asa_network_objectgroup.py
|
cisco_asa_network_objectgroup.py
|
py
| 10,509 |
python
|
en
|
code
| 30 |
github-code
|
6
|
31365864132
|
from propagator import scheduler
from propagator import Propagator, Cell
from propagator.primitives import *
from propagator.content.interval import Interval
from propagator.content.supported import Supported
from propagator.decorators import compound
from propagator.logging import debug, warn, error, info
"""
A propagator network that calculates the approximated height of a
building from measurements made using a barometer, a stopwatch and a
ruler. Each subsequent measurement enhances the accuracy of the answer,
and also propagates back and enhances the other initial measurements.
It uses the `propagator.content.supported.Supported` class as content for the
`Cell` objects. The cells then can track the provenance of its data,
based on the supports given when new content is added or merged into
them.
This example is present (as Scheme code) in section 6 of The Art of the
Propagator, "Dependencies".
"""
def product(x, y, total):
multiplier(x, y, total)
divider(total, x, y)
divider(total, y, x)
def quadratic(x, x_to_2):
squarer(x, x_to_2)
sqrter(x_to_2, x)
def similar_triangles(s_ba, h_ba, s, h):
@compound(neighbors=[s_ba, h_ba, s, h])
def similar_triangles_helper():
ratio = Cell('ratio')
product(s_ba, ratio, h_ba)
product(s, ratio, h)
return similar_triangles_helper
def fall_duration(t, h):
@compound(neighbors=[t])
def fall_duration_helper():
g = Cell('g')
one_half = Cell('one half')
t_to_2 = Cell('t^2')
g_times_t_to_2 = Cell('gt^2')
(constant(Interval(9.789, 9.832)))(g)
(constant(Interval(0.5, 0.5)))(one_half)
quadratic(t, t_to_2)
product(g, t_to_2, g_times_t_to_2)
product(one_half, g_times_t_to_2, h)
return fall_duration_helper
if __name__ == '__main__':
scheduler.initialize()
# We now build a sequence of sample dependency tracking systems of
# increasing complexity. We start with a relatively simple system
# that only tracks and reports the provenance of its data.
#
# How do we want our provenance system to work? We can make cells
# and define networks as usual, but if we add supported values as inputs,
# we get supported values as outputs:
barometer_height = Cell('barometer height')
barometer_shadow = Cell('barometer shadow')
building_height = Cell('building height')
building_shadow = Cell('building shadow')
similar_triangles(barometer_shadow, barometer_height, building_shadow, building_height)
building_shadow.add_content(Supported(Interval(54.9, 55.1), ['shadows']))
barometer_height.add_content(Supported(Interval(0.3, 0.32), ['shadows']))
barometer_shadow.add_content(Supported(Interval(0.36, 0.37), ['shadows']))
scheduler.run()
print(building_height.content)
# Supported(Interval(44.51351351351351, 48.977777777777774), {'shadows'})
# Indeed, our estimate for the height of the building depends on our
# measurements of the barometer and the shadow. We can try
# dropping the barometer off the roof, but if we do a bad job of
# timing its fall, our estimate won’t improve.
fall_time = Cell('fall time')
fall_duration(fall_time, building_height)
fall_time.add_content(Supported(Interval(2.9, 3.3), {'lousy fall time'}))
scheduler.run()
print(building_height.content)
# Supported(Interval(45.51351351351351, 48.977777777777774), {'shadows'})
# What’s more, the dependency tracker tells us that it was a lousy timing
# job, because the resulting answer doesn’t actually depend on the fall
# timing measurement. If we do it better, then we can get a finer estimate,
# which then will depend on the improved fall timing measurement.
fall_time.add_content(Supported(Interval(2.9, 3.1), {'better fall time'}))
scheduler.run()
print(building_height.content)
# Supported(Interval(44.51351351351351, 47.24276000000001), {'shadows', 'better fall time'})
# If we then give a barometer to the superintendent, we can watch the
# superintendent’s information supercede and obsolesce the results of our
# measurements...
building_height.add_content(Supported(45, {'superintendent'}))
scheduler.run()
print(building_height.content)
# Supported(45, {'superintendent'})
# ...and see which of the measurements themselves we can infer more about
# based on the now known height of the building.
print(barometer_height.content)
#Supported(Interval(0.3, 0.30327868852459017), {'shadows', 'better fall time', 'superintendent'})
print(barometer_shadow.content)
#Supported(Interval(0.366, 0.37), {'shadows', 'better fall time', 'superintendent'})
print(building_shadow.content)
#Supported(Interval(54.9, 55.1), {'shadows'})
# [Translator note] This result is different from the paper. See the
# Translation notes in the README.
print(fall_time.content)
#Supported(Interval(3.025522031629098, 3.0321598338046556), {'superintendent'})
|
duasfl8r/propagator.py
|
examples/dependencies.py
|
dependencies.py
|
py
| 5,062 |
python
|
en
|
code
| 32 |
github-code
|
6
|
39607784723
|
import os
import logging
from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from core import batch_loader
from core.management.commands import configure_logging
configure_logging('process_coordinates_logging.config',
'process_coordinates_%s.log' % os.getpid())
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Process word coordinates for a batch by name from a batch list file"
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('batch_list_filename')
def handle(self, batch_list_filename, *args, **options):
if len(args)!=0:
raise CommandError('Usage is process_coordinates %s' % self.args)
loader = batch_loader.BatchLoader()
batch_list = open(batch_list_filename)
_logger.info("batch_list_filename: %s" % batch_list_filename)
for line in batch_list:
batch_name = line.strip()
_logger.info("batch_name: %s" % batch_name)
parts = batch_name.split("_")
if len(parts)==4:
loader.process_coordinates(batch_name, strict=False)
else:
_logger.warning("invalid batch name '%s'" % batch_name)
|
open-oni/open-oni
|
core/management/commands/process_coordinates.py
|
process_coordinates.py
|
py
| 1,291 |
python
|
en
|
code
| 43 |
github-code
|
6
|
27064968458
|
from .common import *
import os
DEBUG = True
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR,"media/")
STATIC_ROOT = os.path.join(BASE_DIR,"sfiles/")
STATICFILES_DIRS = (os.path.join(BASE_DIR,"static/"),)
TIME_ZONE = "Asia/Kolkata"
ALLOWED_HOSTS = ["127.0.0.1","admin.as.com","api.as.com","192.168.0.6","192.168.0.36",]
INTERNAL_IPS = ALLOWED_HOSTS
USE_L10N = True
#DATABASE SETTINGS
DATABASES = {
'default': {
"ENGINE": 'django.db.backends.postgresql',
"NAME": "efs",
"USERNAME": "anton",
"PASSWORD": "bucho",
"ATOMIC_REQUEST":True,
"HOST": "",
"PORT": "",
}
}
FCM_DJANGO_SETTINGS = {
"APP_VERBOSE_NAME": "EFS",
# default: _('FCM Django')
"FCM_SERVER_KEY": "AAAAnY6Y7pE:APA91bE3n7_eU6kB2KYk_DkBuvbN_p4no_4CzjKn1zJhwJVCNh5tIJERAW18euVJJijUeIccajg3CSYWWi_e-Jip3aWyr3nHgHLUN1iEKxpKGPWh8FuDaBv-vVWnXs86NEqwkXBoQEgY",
# true if you want to have only one active device per registered user at a time
# default: False
"ONE_DEVICE_PER_USER": True,
# devices to which notifications cannot be sent,
# are deleted upon receiving error response from FCM
# default: False
"DELETE_INACTIVE_DEVICES": True,
}
#REST FRAMEWORK settings
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
#'firebase_auth.authentication.FirebaseToken.FokenAuthentication',
),
'DEFAULT_FILTER_BACKENDS': (
#'django_filters.rest_framework.DjangoFilterBackend',
),
"DEFAULT_PAGINATION_CLASS":"utils.pagination.CustomPagination",
"PAGE_SIZE":50,
#"DATE_INPUT_FORMATS": ["%Y-%m-%d"],
}
REST_AUTH_SERIALIZERS = {
#'TOKEN_SERIALIZER':'modules.acc_api.serializer.TokenSerializer',
'USER_DETAILS_SERIALIZER':'account.serializer.RestAuthSerializer',
}
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis:/var/run/redis/redis.sock',
#'LOCATION':"redis://134.209.149.129:6379/1"
},
}
SESSION_ENGINE = 'redis_sessions.session'
SESSION_REDIS_UNIX_DOMAIN_SOCKET_PATH = "/var/run/redis/redis.sock"
AUTH_USER_MODEL = "account.User"
# ROOT_URLCONF = 'src.urls'
# ROOT_HOSTCONF = 'src.hosts'
# DEFAULT_HOST = 'www'
# MIDDLEWARE += [
# #'django_hosts.middleware.HostsRequestMiddleware',
# #'django_hosts.middleware.HostsResponseMiddleware',
# ]
from .urls import *
API_KEY_CUSTOM_HEADER = "HTTP_X_API_KEY"
CORS_ALLOW_ALL_ORIGINS = True
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_CONFIGS = {
'default': {
# 'skin': 'moono',
# 'skin': 'office2013',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YourCustomToolbarConfig': [
{'name': 'document', 'items': [
'Source', '-', 'Save', 'NewPage', 'Preview', 'Print', '-', 'Templates'
]},
{'name': 'clipboard', 'items': [
'Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo'
]},
{'name': 'editing', 'items': ['Find', 'Replace', '-', 'SelectAll']},
{'name': 'forms',
'items': [
'Form', 'Checkbox', 'Radio', 'TextField', 'Textarea',
'Select', 'Button', 'ImageButton', 'HiddenField'
]},
'/',
{'name': 'basicstyles',
'items': [
'Bold', 'Italic', 'Underline', 'Strike', 'Subscript',
'Superscript', '-', 'RemoveFormat'
]},
{'name': 'paragraph',
'items': [
'NumberedList', 'BulletedList', '-', 'Outdent', 'Indent',
'-', 'Blockquote', 'CreateDiv', '-', 'JustifyLeft',
'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-',
'BidiLtr', 'BidiRtl', 'Language'
]},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert',
'items': [
'Image', 'Table', 'HorizontalRule',
'Smiley', 'SpecialChar', 'PageBreak', 'Iframe'
]},
'/',
{'name': 'styles', 'items': ['Styles', 'Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
{'name': 'about', 'items': ['About']},
'/', # put this to force next toolbar on new line
{'name': 'yourcustomtools', 'items': [
# put the name of your editor.ui.addButton here
'Preview',
'Maximize',
]},
],
'toolbar': 'YourCustomToolbarConfig', # put selected toolbar config here
# 'toolbarGroups': [{ 'name': 'document', 'groups': [ 'mode', 'document', 'doctools' ] }],
# 'height': 291,
# 'width': '100%',
# 'filebrowserWindowHeight': 725,
# 'filebrowserWindowWidth': 940,
# 'toolbarCanCollapse': True,
# 'mathJaxLib': '//cdn.mathjax.org/mathjax/2.2-latest/MathJax.js?config=TeX-AMS_HTML',
'tabSpaces': 4,
'extraPlugins': ','.join([
'uploadimage', # the upload image feature
# your extra plugins here
'div',
'autolink',
'autoembed',
'embedsemantic',
'autogrow',
# 'devtools',
'widget',
'lineutils',
'clipboard',
'dialog',
'dialogui',
'elementspath'
]),
}
}
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "smtp.gmail.com"
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = "[email protected]"
EMAIL_HOST_PASSWORD = "esf@#2022"
# EMAIL_HOST_USER = "[email protected]"
# EMAIL_HOST_PASSWORD = "Delhi@#2021"
DEFAULT_AUTO_EMAIL = "[email protected]"
|
digivaarta/efs_backend
|
src/settings/dev.py
|
dev.py
|
py
| 5,762 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37681986889
|
# from IPython.display import clear_output
# Starting game
def play_game():
play = None
while play not in ['Y', 'N']:
play = input('Play Game? (Y / N) ').upper()
if play == 'Y': return True
return False
# Players selecting symbols to play X or O
def select_symbol():
# clear_output()
print(chr(27)+'[2j')
print('\033c')
print('\x1bc')
symbol = None
while symbol not in ['X', 'O']:
symbol = input('Player1, please select X or O: ').upper()
print(f'Player1 plays with {symbol}')
if symbol == 'X': return {1:'X', 2:'O'}
return {1:'O', 2:'X'}
# Printing game board
def display_board(board):
# clear_output()
print(chr(27)+'[2j')
print('\033c')
print('\x1bc')
for i in board:
print(i)
# Game move
def move(board, player):
available = list(filter(lambda i: check_dict[i] not in ['X','O'], check_dict.keys()))
cell = 'initial'
while not cell.isdigit() or cell not in available:
cell = input(f'Player {player}, please select a cell {available}: ')
if cell.isdigit() and int(cell) in available:
break
cell = int(cell)
for i in range(0,3):
for j in range(0,3):
if board[i][j] == cell: board[i][j] = player_symbol[player]
check_dict[cell] = player_symbol[player]
display_board(board)
if player == 1: player = 2
else: player = 1
return {'board': board, 'player': player}
# Game check (continue or game over)
def game_check(check_dict, player):
if player == 1: player = 2
else: player = 1
combinations = [(1,2,3), (4,5,6), (7,8,9),
(1,4,7), (2,5,8), (3,6,9),
(1,5,9), (3,5,7)]
for i in combinations:
res = ''
for j in i:
res = res + check_dict[j]
if res == 'XXX' or res == 'OOO':
print(f'Player {player} won!')
return False
if len(list(filter(lambda i: i == '', check_dict.values()))) == 0:
print('Nobody won! Play again!')
return False
return True
# GAME FLOW
while play_game():
player = 1
board = [[1,2,3],
[4,5,6],
[7,8,9]]
player_symbol = select_symbol()
check_dict = dict((i, '') for i in range(1,10))
display_board(board)
while game_check(check_dict, player):
res = move(board, player)
board = res['board']
player = res['player']
|
paul-b19/python-tic-tac-toe
|
tic_tac_toe.py
|
tic_tac_toe.py
|
py
| 2,279 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20594488232
|
import torch
import torch.nn as nn
def domain_loss(visual_domain_logits, textual_domain_logits):
criterion = nn.CrossEntropyLoss()
batch_size = visual_domain_logits.shape[0]
visual_domain_labels = torch.zeros(batch_size).long().cuda()
textual_domain_labels = torch.ones(batch_size).long().cuda()
loss = criterion(visual_domain_logits, visual_domain_labels) + criterion(
textual_domain_logits, textual_domain_labels
)
return loss
|
CCNU-DigitalLibrary/CCNU-DigitalLibrary
|
MCM-HC/lib/models/losses/domain_loss.py
|
domain_loss.py
|
py
| 467 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15892112680
|
from argparse import ArgumentParser
import os
import logging
from sys import stdin, stdout
import yaml
import gc
import torch
from probing.inference import Inference
class NotAnExperimentDir(ValueError):
pass
def find_last_model(experiment_dir):
model_pre = os.path.join(experiment_dir, 'model')
if os.path.exists(model_pre):
return model_pre
saves = filter(lambda f: f.startswith(
'model.epoch_'), os.listdir(experiment_dir))
last_epoch = max(saves, key=lambda f: int(f.split("_")[-1]))
return os.path.join(experiment_dir, last_epoch)
def find_in_out_file_name(experiment_dir, prefix='test'):
cfg = os.path.join(experiment_dir, 'config.yaml')
if not os.path.exists(cfg):
raise NotAnExperimentDir(f"{cfg} does not exist")
with open(cfg) as f:
train_fn = yaml.load(f, Loader=yaml.FullLoader)['train_file']
inf = train_fn.replace('/train', f'/{prefix}')
outf = os.path.join(experiment_dir, f'{prefix}.out')
accf = os.path.join(experiment_dir, f'{prefix}.word_accuracy')
return inf, outf, accf
def skip_dir(experiment_dir, test_out):
if not os.path.exists(test_out):
return False
model_fn = find_last_model(experiment_dir)
return os.path.getmtime(model_fn) < os.path.getmtime(test_out)
def compute_accuracy(reference, prediction):
acc = 0
samples = 0
with open(reference) as r, open(prediction) as p:
for rline in r:
try:
pline = next(p)
except StopIteration:
logging.error(f"Prediction file {prediction} shorter "
f"than reference {reference}")
return acc / samples
if not rline.strip() and not pline.strip():
continue
rlabel = rline.rstrip("\n").split("\t")[-1]
plabel = pline.rstrip("\n").split("\t")[-1]
acc += (rlabel == plabel)
samples += 1
return acc / samples
def parse_args():
p = ArgumentParser()
p.add_argument("experiment_dirs", nargs="+", type=str,
help="Experiment directory")
p.add_argument("--run-on-dev", action="store_true")
p.add_argument("--run-on-test", action="store_true")
p.add_argument("--max-samples", default=None, type=int)
return p.parse_args()
def main():
args = parse_args()
for experiment_dir in args.experiment_dirs:
if not os.path.isdir(experiment_dir):
logging.info(f"{experiment_dir} not directory, skipping")
continue
if args.run_on_test:
try:
test_in, test_out, test_acc = find_in_out_file_name(experiment_dir, 'test')
if not skip_dir(experiment_dir, test_out):
logging.info(f"Running inference on {experiment_dir}")
inf = Inference(experiment_dir, test_in, max_samples=args.max_samples)
with open(test_out, 'w') as f:
inf.run_and_print(f)
acc = compute_accuracy(test_in, test_out)
logging.info(f"{experiment_dir} test acc: {acc}")
with open(test_acc, 'w') as f:
f.write(f"{acc}\n")
gc.collect()
torch.cuda.empty_cache()
except NotAnExperimentDir:
logging.info(f"{experiment_dir}: no config.yaml, skipping")
if args.run_on_dev:
try:
dev_in, dev_out, dev_acc = find_in_out_file_name(experiment_dir, 'dev')
if not skip_dir(experiment_dir, dev_out):
inf = Inference(experiment_dir, dev_in, max_samples=args.max_samples)
with open(dev_out, 'w') as f:
inf.run_and_print(f)
acc = compute_accuracy(dev_in, dev_out)
logging.info(f"{experiment_dir} dev acc: {acc}")
with open(dev_acc, 'w') as f:
f.write(f"{acc}\n")
gc.collect()
torch.cuda.empty_cache()
except NotAnExperimentDir:
logging.info(f"{experiment_dir}: no config.yaml, skipping")
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
|
juditacs/probing
|
src/probing/batch_inference.py
|
batch_inference.py
|
py
| 4,385 |
python
|
en
|
code
| 3 |
github-code
|
6
|
26466276605
|
"""
This file contains Arm interface and its implemented classes.
Users (typically Bandits) interact with Arms through the pull() method, which:
- returns a reward value
- advances state of the arm's parameters (if valid)
"""
import numpy as np
class Arm:
""" A bandit arm
Should keep track of the internal state of the arm
and return the appropriate reward when pulled.
"""
def __init__(self, **kwargs):
raise NotImplementedError
@property
def name(self):
raise NotImplementedError
def pull(self):
""" Pull the arm
Pulls the bandit arm, returns a double representing the
award, and advances internal state (if any).
"""
raise NotImplementedError
class WhiteNoiseArm(Arm):
def __init__(self, name, rng):
self.__name = name
self._rng = rng
@property
def name(self):
return self.__name
def pull(self):
return self._rng()
class BernoulliArm(WhiteNoiseArm):
"""Generates iid observations from a Bernoulli white noise"""
def __init__(self, prob):
WhiteNoiseArm.__init__(self,
name='bernoulli_arm',
rng=lambda: np.random.binomial(n=1, p=prob))
self.prob = prob
class GaussianArm(WhiteNoiseArm):
"""Generates iid observations from a Gaussian white noise"""
def __init__(self, mu, sigma):
WhiteNoiseArm.__init__(self,
name='gaussian_arm',
rng=lambda: np.random.normal(loc=mu, scale=sigma))
self.mu = mu
self.sigma = sigma
class LinearInterpolationArm(Arm):
""" Linear interpolation arm
"""
def __init__(self, means, periods, iteration, noise_func=None, **kwargs):
self.__name = "lin_interp_arm"
self.num_periods = len(means)
self.means = means
self.iteration = iteration
self.periods = periods
if noise_func is None:
self.noise_func = lambda mean: np.random.normal(loc=mean)
else:
self.noise_func = noise_func
if np.size(periods) != self.num_periods:
raise ValueError("periods not correct size")
return
@property
def name(self):
return self.__name
def pull(self):
iter_to_end_period = self.iteration % np.sum(self.periods)
end_period = 0
while (iter_to_end_period >= 0):
iter_to_end_period -= self.periods[end_period]
end_period += 1
start_period = end_period - 1
end_period = end_period % self.num_periods
start_frac = np.abs(iter_to_end_period) / self.periods[start_period]
arm_mean = (
start_frac * self.means[start_period] +
(1.0 - start_frac) * self.means[end_period]
)
reward = self.noise_func(arm_mean)
self.iteration += 1
return reward
|
solstat/bandits
|
arm.py
|
arm.py
|
py
| 2,980 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3989607811
|
from typing import Any, Dict, List, Self, Union
from attrs import define as _attrs_define
from attrs import field as _attrs_field
from ..constants.trading import (
ConditionalCloseOrderType,
OrderType,
SelfTradePreventionStrategy,
TimeInForce,
Trigger,
TypeOrder,
)
from ..security import get_nonce
from ..types import UNSET, Unset
@_attrs_define
class AddStandardOrderRequestBody:
"""
Attributes:
nonce (int): Nonce used in construction of `API-Sign` header. Default `../security.get_nonce`
ordertype (Ordertype): Order type
type (TypeOrder): Order direction (buy/sell)
volume (str): Order quantity in terms of the base asset
> Note: Volume can be specified as `0` for closing margin orders to automatically fill the requisite quantity.
pair (str): Asset pair `id` or `altname`
userref (Union[Unset, int]): User reference id
`userref` is an optional user-specified integer id that can be associated with any number of orders. Many
clients choose a `userref` corresponding to a unique integer id generated by their systems (e.g. a timestamp).
However, because we don't enforce uniqueness on our side, it can also be used to easily group orders by pair,
side, strategy, etc. This allows clients to more readily cancel or query information about orders in a
particular group, with fewer API calls by using `userref` instead of our `txid`, where supported.
displayvol (Union[Unset, str]): Used to create an iceberg order, this is the visible order quantity in terms of
the base asset. The rest of the order will be hidden, although the full `volume` can be filled at any time by
any order of that size or larger that matches in the order book. `displayvol` can only be used with the `limit`
order type, must be greater than `0`, and less than `volume`.
price (Union[Unset, str]): Price:
* Limit price for `limit` orders
* Trigger price for `stop-loss`, `stop-loss-limit`, `take-profit` and `take-profit-limit` orders
price2 (Union[Unset, str]): Secondary Price:
* Limit price for `stop-loss-limit` and `take-profit-limit` orders
> Note: Either `price` or `price2` can be preceded by `+`, `-`, or `#` to specify the order price as an offset
relative to the last traded price. `+` adds the amount to, and `-` subtracts the amount from the last traded
price. `#` will either add or subtract the amount to the last traded price, depending on the direction and order
type used. Relative prices can be suffixed with a `%` to signify the relative amount as a percentage.
trigger (Union[Unset, Trigger]): Price signal used to trigger `stop-loss`, `stop-
loss-limit`, `take-profit` and `take-profit-limit` orders
> Note: This `trigger` type will as well be used for associated conditional close orders.
Default: Trigger.LAST.
leverage (Union[Unset, str]): Amount of leverage desired (default: none)
reduce_only (Union[Unset, bool]): If `true`, order will only reduce a currently open position, not increase it
or open a new position.
stptype (Union[Unset, SelfTradePreventionStrategy]): Self trade prevention behavior definition:
* cancel-newest - if self trade is triggered, arriving order will be canceled
* cancel-oldest - if self trade is triggered, resting order will be canceled
* cancel-both - if self trade is triggered, both arriving and resting orders will be canceled
Default: SelfTradePreventionStrategy.CANCEL_NEWEST.
oflags (Union[Unset, str]): Comma delimited list of order flags
* `post` post-only order (available when ordertype = limit)
* `fcib` prefer fee in base currency (default if selling)
* `fciq` prefer fee in quote currency (default if buying, mutually exclusive with `fcib`)
* `nompp` disable [market price protection](https://support.kraken.com/hc/en-us/articles/201648183-Market-
Price-Protection) for market orders
* `viqc` order volume expressed in quote currency. This is supported only for market orders.
timeinforce (Union[Unset, TimeInForce]): Time-in-force of the order to specify how
long it should remain in the order book before being cancelled. GTC (Good-'til-cancelled) is default if the
parameter is omitted. IOC (immediate-or-cancel) will immediately execute the amount possible and cancel any
remaining balance rather than resting in the book. GTD (good-'til-date), if specified, must coincide with a
desired `expiretm`.
Default: TimeInForce.GTC.
starttm (Union[Unset, str]): Scheduled start time, can be specified as an absolute timestamp or as a number of
seconds in the future:
* `0` now (default)
* `<n>` = unix timestamp of start time
* `+<n>` = schedule start time `<n>` seconds from now
* Note that URL encoding of the `+` character changes it to a space, so please use `%2b` followed by the
number of seconds instead of `+`
expiretm (Union[Unset, str]): Expiration time, also can be specified as an absolute timestamp or as a number of
seconds in the future:
* `0` no expiration (default)
* `<n>` = unix timestamp of expiration time
* `+<n>` = expire `<n>` seconds from now, minimum 5 seconds
* Note that URL encoding of the `+` character changes it to a space, so please use `%2b` followed by the
number of seconds instead of `+`
closeordertype (Union[Unset, ConditionalCloseOrderType]): Conditional close order type
> Note: [Conditional close orders](https://support.kraken.com/hc/en-us/articles/360038640052-Conditional-Close)
are triggered by execution of the primary order in the same quantity and opposite direction, but once triggered
are __independent orders__ that may reduce or increase net position
closeprice (Union[Unset, str]): Conditional close order `price`
closeprice2 (Union[Unset, str]): Conditional close order `price2`
deadline (Union[Unset, str]): RFC3339 timestamp (e.g. 2021-04-01T00:18:45Z) after which the matching engine
should reject the new order request, in presence of latency or order queueing: min now() + 2 seconds, max now()
+ 60 seconds.
validate (Union[Unset, bool]): Validate inputs only. Do not submit order.
"""
ordertype: OrderType
type: TypeOrder
volume: str
pair: str
nonce: int = get_nonce()
userref: Union[Unset, int] = UNSET
displayvol: Union[Unset, str] = UNSET
price: Union[Unset, str] = UNSET
price2: Union[Unset, str] = UNSET
trigger: Union[Unset, Trigger] = Trigger.LAST
leverage: Union[Unset, str] = UNSET
reduce_only: Union[Unset, bool] = False
stptype: Union[
Unset, SelfTradePreventionStrategy
] = SelfTradePreventionStrategy.CANCEL_NEWEST
oflags: Union[Unset, str] = UNSET
timeinforce: Union[Unset, TimeInForce] = TimeInForce.GTC
starttm: Union[Unset, str] = UNSET
expiretm: Union[Unset, str] = UNSET
closeordertype: Union[Unset, ConditionalCloseOrderType] = UNSET
closeprice: Union[Unset, str] = UNSET
closeprice2: Union[Unset, str] = UNSET
deadline: Union[Unset, str] = UNSET
validate: Union[Unset, bool] = False
additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
nonce = self.nonce
ordertype = self.ordertype.value
type = self.type.value
volume = self.volume
pair = self.pair
userref = self.userref
displayvol = self.displayvol
price = self.price
price2 = self.price2
trigger: Union[Unset, str] = UNSET
if not isinstance(self.trigger, Unset):
trigger = self.trigger.value
leverage = self.leverage
reduce_only = self.reduce_only
stptype: Union[Unset, str] = UNSET
if not isinstance(self.stptype, Unset):
stptype = self.stptype.value
oflags = self.oflags
timeinforce: Union[Unset, str] = UNSET
if not isinstance(self.timeinforce, Unset):
timeinforce = self.timeinforce.value
starttm = self.starttm
expiretm = self.expiretm
closeordertype: Union[Unset, str] = UNSET
if not isinstance(self.closeordertype, Unset):
closeordertype = self.closeordertype.value
closeprice = self.closeprice
closeprice2 = self.closeprice2
deadline = self.deadline
validate = self.validate
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"nonce": nonce,
"ordertype": ordertype,
"type": type,
"volume": volume,
"pair": pair,
}
)
if userref is not UNSET:
field_dict["userref"] = userref
if displayvol is not UNSET:
field_dict["displayvol"] = displayvol
if price is not UNSET:
field_dict["price"] = price
if price2 is not UNSET:
field_dict["price2"] = price2
if trigger is not UNSET:
field_dict["trigger"] = trigger
if leverage is not UNSET:
field_dict["leverage"] = leverage
if reduce_only is not UNSET:
field_dict["reduce_only"] = reduce_only
if stptype is not UNSET:
field_dict["stptype"] = stptype
if oflags is not UNSET:
field_dict["oflags"] = oflags
if timeinforce is not UNSET:
field_dict["timeinforce"] = timeinforce
if starttm is not UNSET:
field_dict["starttm"] = starttm
if expiretm is not UNSET:
field_dict["expiretm"] = expiretm
if closeordertype is not UNSET:
field_dict["close[ordertype]"] = closeordertype
if closeprice is not UNSET:
field_dict["close[price]"] = closeprice
if closeprice2 is not UNSET:
field_dict["close[price2]"] = closeprice2
if deadline is not UNSET:
field_dict["deadline"] = deadline
if validate is not UNSET:
field_dict["validate"] = validate
return field_dict
@classmethod
def from_dict(cls: Self, src_dict: Dict[str, Any]) -> Self:
d = src_dict.copy()
nonce = d.pop("nonce", get_nonce())
ordertype = OrderType(d.pop("ordertype"))
type = TypeOrder(d.pop("type"))
volume = d.pop("volume")
pair = d.pop("pair")
userref = d.pop("userref", UNSET)
displayvol = d.pop("displayvol", UNSET)
price = d.pop("price", UNSET)
price2 = d.pop("price2", UNSET)
_trigger = d.pop("trigger", UNSET)
trigger: Union[Unset, Trigger]
trigger = UNSET if isinstance(_trigger, Unset) else Trigger(_trigger)
leverage = d.pop("leverage", UNSET)
reduce_only = d.pop("reduce_only", UNSET)
_stptype = d.pop("stptype", UNSET)
stptype: Union[Unset, SelfTradePreventionStrategy]
if isinstance(_stptype, Unset):
stptype = UNSET
else:
stptype = SelfTradePreventionStrategy(_stptype)
oflags = d.pop("oflags", UNSET)
_timeinforce = d.pop("timeinforce", UNSET)
timeinforce: Union[Unset, TimeInForce]
if isinstance(_timeinforce, Unset):
timeinforce = UNSET
else:
timeinforce = TimeInForce(_timeinforce)
starttm = d.pop("starttm", UNSET)
expiretm = d.pop("expiretm", UNSET)
_closeordertype = d.pop("close[ordertype]", UNSET)
closeordertype: Union[Unset, ConditionalCloseOrderType]
if isinstance(_closeordertype, Unset):
closeordertype = UNSET
else:
closeordertype = ConditionalCloseOrderType(_closeordertype)
closeprice = d.pop("close[price]", UNSET)
closeprice2 = d.pop("close[price2]", UNSET)
deadline = d.pop("deadline", UNSET)
validate = d.pop("validate", UNSET)
add_standard_order_request_body = cls(
nonce=nonce,
ordertype=ordertype,
type=type,
volume=volume,
pair=pair,
userref=userref,
displayvol=displayvol,
price=price,
price2=price2,
trigger=trigger,
leverage=leverage,
reduce_only=reduce_only,
stptype=stptype,
oflags=oflags,
timeinforce=timeinforce,
starttm=starttm,
expiretm=expiretm,
closeordertype=closeordertype,
closeprice=closeprice,
closeprice2=closeprice2,
deadline=deadline,
validate=validate,
)
add_standard_order_request_body.additional_properties = d
return add_standard_order_request_body
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
tlg7c5/kraken-connector
|
kraken_connector/schemas/add_standard_order_request_body.py
|
add_standard_order_request_body.py
|
py
| 13,908 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19382433412
|
"""Support for Atome devices connected to a Linky Energy Meter."""
import asyncio
from .const import DATA_COORDINATOR, DOMAIN
PLATFORMS = ["sensor"]
DATA_LISTENER = "listener"
async def async_setup(hass, config):
"""Set up the KeyAtome component."""
# hass.data[DOMAIN] = {DATA_COORDINATOR: {}, DATA_LISTENER: {}}
return True
async def async_setup_entry(hass, config_entry):
"""Set up KeyAtome as config entry."""
hass.data.setdefault(DOMAIN, {DATA_COORDINATOR: {}, DATA_LISTENER: {}})
# just to initialize (if data has to be forward to plateform)
coordinator = None
# To manage options
hass.data[DOMAIN][DATA_LISTENER][
config_entry.entry_id
] = config_entry.add_update_listener(async_reload_entry)
# Useless
hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id] = coordinator
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a KeyAtome config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
# remove config flow coordinator
hass.data[DOMAIN][DATA_COORDINATOR].pop(config_entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(config_entry.entry_id)
remove_listener()
return unload_ok
async def async_reload_entry(hass, config_entry):
"""Handle an options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
|
jugla/keyatome
|
custom_components/keyatome/__init__.py
|
__init__.py
|
py
| 1,759 |
python
|
en
|
code
| 22 |
github-code
|
6
|
36341347314
|
# 한 개의 회의실이 있는데 이를 사용하고자 하는 N개의 회의에 대하여 회의실 사용표를 만들려고 한다.
# 각 회의 I에 대해 시작시간과 끝나는 시간이 주어져 있고, 각 회의가 겹치지 않게 하면서 회의실을 사용할 수 있는 회의의 최대 개수를 찾아보자.
# 단, 회의는 한번 시작하면 중간에 중단될 수 없으며 한 회의가 끝나는 것과 동시에 다음 회의가 시작될 수 있다.
# 회의의 시작시간과 끝나는 시간이 같을 수도 있다. 이 경우에는 시작하자마자 끝나는 것으로 생각하면 된다.
# 그리디 알고리즘을 이용
import sys
N = int(sys.stdin.readline())
result = []
count = 1
for _ in range(N):
time = list(map(int,sys.stdin.readline().split()))
result.append((time[0],time[1]))
result.sort(key=lambda x:(x[1],x[0]))
end_time = result[0][1]
for i in range(1,N):
if result[i][0] >= end_time:
count += 1
end_time = result[i][1]
print(count)
|
jujinyoung/CodingTest
|
bakjjun_codingTest/1931.py
|
1931.py
|
py
| 1,022 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
11032880428
|
#Знакомство с языком Python (семинары). Урок 3. Данные, функции и модули в Python
# 4. Напишите программу, которая будет преобразовывать десятичное число в двоичное.
# Пример: 45 -> 101101, 3 -> 11, 2 -> 10
import random
decNumber = 2 #random.randint(0, 2**100)
binNumber = str(bin(decNumber)).replace("0b", "")
print('\n', decNumber, '->', binNumber, '\n')
|
VeraNic/Python-practic
|
Prac3_4DecToBin.py
|
Prac3_4DecToBin.py
|
py
| 485 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
24639710236
|
import pandas as pd
from matplotlib import pyplot as plt
# plt.rcParams["figure.figsize"] = [12, 6]
plt.rcParams.update({'font.size': 11})
plt.rcParams["font.family"] = "Times New Roman"
############################ Model 1 ####################3
resnet50 = pd.read_csv(r'Dataset/resnet50.csv')
resnet50VAccu = resnet50['val_accuracy'].values.tolist()
vgg16 = pd.read_csv(r'Dataset/vgg16.csv')
vgg16VAccu = vgg16['val_accuracy'].values.tolist()
################### Comparision of 3 model ###################
axes = plt.axes()
plt.plot(range(1,len(resnet50VAccu)+1),resnet50VAccu,color='green',linewidth=2)
plt.plot(range(1,len(resnet50VAccu)+1),vgg16VAccu,color='red',linewidth=2)
plt.grid()
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
# plt.ylabel('Loss')
plt.legend(['Resnet50', 'vgg16'])
plt.savefig('2model comparision.png')
plt.show()
|
Mehedi-Bin-Hafiz/Rotten-fruit-detection-by-deep-learning
|
Graph/lineGraph.py
|
lineGraph.py
|
py
| 842 |
python
|
en
|
code
| 1 |
github-code
|
6
|
9152769558
|
from django.urls import path
from . import views
urlpatterns = [
path('',views.Home.as_view(),name='Home'),
path(r'stock/<str:Name>',views.Show_Details.as_view()),
path('ajax/get-info',views.get_info),
path('ajax/get-nifty',views.get_nifty),
path('ajax/get-topstocks',views.get_topstocks),
path('stock/ajax/Get-SelectedStock/',views.Get_SelectedStock),
]
|
Pggeeks/Live-StockScrenner-Django
|
stockapp/urls.py
|
urls.py
|
py
| 379 |
python
|
en
|
code
| 1 |
github-code
|
6
|
21904300234
|
import os
from pathlib import Path
import click
import numpy as np
import tensorflow as tf
from waymo_open_dataset.dataset_pb2 import Frame
from waymo_open_dataset.utils import frame_utils, transform_utils, range_image_utils
from waymo_open_dataset import dataset_pb2
from utils import save_frame, save_points
from visualization.visu_image import plot_points_on_image, save_camera_image
from visualization.visu_point_cloud import show_point_cloud
from multiprocessing import Pool
def convert_range_image_to_point_cloud(frame, range_images, camera_projections, range_image_top_pose, ri_index=(0, 1)):
"""
Modified from the codes of Waymo Open Dataset.
Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return, range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return, camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5 (number of lidars).
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
points = []
cp_points = []
points_NLZ = []
points_intensity = []
points_elongation = []
frame_pose = tf.convert_to_tensor(np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(range_image_top_pose.data), range_image_top_pose.shape.dims
)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2])
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation,
range_image_top_pose_tensor_translation)
for c in calibrations:
points_single, cp_points_single, points_NLZ_single, points_intensity_single, points_elongation_single \
= [], [], [], [], []
for cur_ri_index in ri_index:
range_image = range_images[c.name][cur_ri_index]
if len(c.beam_inclinations) == 0: # pylint: disable=g-explicit-length-test
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0])
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
pixel_pose_local = None
frame_pose_local = None
if c.name == dataset_pb2.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
range_image_NLZ = range_image_tensor[..., 3]
range_image_intensity = range_image_tensor[..., 1]
range_image_elongation = range_image_tensor[..., 2]
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.where(range_image_mask))
points_NLZ_tensor = tf.gather_nd(range_image_NLZ, tf.compat.v1.where(range_image_mask))
points_intensity_tensor = tf.gather_nd(range_image_intensity, tf.compat.v1.where(range_image_mask))
points_elongation_tensor = tf.gather_nd(range_image_elongation, tf.compat.v1.where(range_image_mask))
cp = camera_projections[c.name][0]
cp_tensor = tf.reshape(tf.convert_to_tensor(cp.data), cp.shape.dims)
cp_points_tensor = tf.gather_nd(cp_tensor, tf.where(range_image_mask))
points_single.append(points_tensor.numpy())
cp_points_single.append(cp_points_tensor.numpy())
points_NLZ_single.append(points_NLZ_tensor.numpy())
points_intensity_single.append(points_intensity_tensor.numpy())
points_elongation_single.append(points_elongation_tensor.numpy())
points.append(np.concatenate(points_single, axis=0))
cp_points.append(np.concatenate(cp_points_single, axis=0))
points_NLZ.append(np.concatenate(points_NLZ_single, axis=0))
points_intensity.append(np.concatenate(points_intensity_single, axis=0))
points_elongation.append(np.concatenate(points_elongation_single, axis=0))
return points, cp_points, points_NLZ, points_intensity, points_elongation
def save_camera_images(idx: int, frame: Frame, output_dir: Path) -> None:
for image in frame.images:
save_camera_image(idx, image, frame.camera_labels, output_dir)
def save_data(frame: Frame, idx: int, points: np.ndarray,
output_dir: Path) -> None:
save_frame(frame, idx, output_dir)
save_points(idx, points, output_dir)
def visualize_camera_projection(idx: int, frame: Frame, output_dir: Path,
pcd_return) -> None:
points, points_cp = pcd_return
points_all = np.concatenate(points, axis=0)
points_cp_all = np.concatenate(points_cp, axis=0)
images = sorted(frame.images, key=lambda i: i.name) # type: ignore
# distance between lidar points and vehicle frame origin
points_tensor = tf.norm(points_all, axis=-1, keepdims=True)
points_cp_tensor = tf.constant(points_cp_all, dtype=tf.int32)
mask = tf.equal(points_cp_tensor[..., 0], images[0].name)
points_cp_tensor = tf.cast(tf.gather_nd(
points_cp_tensor, tf.where(mask)), tf.float32)
points_tensor = tf.gather_nd(points_tensor, tf.where(mask))
projected_points_from_raw_data = tf.concat(
[points_cp_tensor[..., 1:3], points_tensor], -1).numpy()
plot_points_on_image(
idx, projected_points_from_raw_data, images[0], output_dir)
def pcd_from_range_image(frame: Frame):
def _range_image_to_pcd(ri_index: int = 0):
# points, points_cp = frame_utils.convert_range_image_to_point_cloud(
# frame, range_images, camera_projections, range_image_top_pose,
# ri_index=ri_index)
points, points_cp = convert_range_image_to_point_cloud(
frame, range_images, camera_projections, range_image_top_pose,
ri_index=ri_index)
return points, points_cp
parsed_frame = frame_utils.parse_range_image_and_camera_projection(frame)
range_images, camera_projections, _, range_image_top_pose = parsed_frame
frame.lasers.sort(key=lambda laser: laser.name)
return _range_image_to_pcd(), _range_image_to_pcd(1)
# def visualize_pcd_return(frame: Frame, pcd_return,
# visu: bool) -> None:
# points, points_cp = pcd_return
# points_all = np.concatenate(points, axis=0)
# # print(f'points_all shape: {points_all.shape}')
# # camera projection corresponding to each point
# points_cp_all = np.concatenate(points_cp, axis=0)
# # print(f'points_cp_all shape: {points_cp_all.shape}')
# if visu:
# show_point_cloud(points_all, frame.laser_labels)
def process_data(idx: int, frame: Frame, output_dir: Path, save: bool,
visu: bool) -> None:
print(f'Start to process frame {idx:03}')
# pylint: disable=no-member (E1101)
# frame = Frame()
# frame.ParseFromString(bytearray(data.numpy()))
range_images, camera_projections, range_image_top_pose = frame_utils.parse_range_image_and_camera_projection(frame)
points, cp_points, points_in_NLZ_flag, points_intensity, points_elogation = convert_range_image_to_point_cloud(
frame, range_images, camera_projections, range_image_top_pose, ri_index=(0, )
)
points_all = np.concatenate(points, axis=0)
points_in_NLZ_flag = np.concatenate(points_in_NLZ_flag, axis=0).reshape(-1, 1)
points_intensity = np.concatenate(points_intensity, axis=0).reshape(-1, 1)
points_elogation = np.concatenate(points_elogation, axis=0).reshape(-1, 1)
points_all[points_in_NLZ_flag.reshape(-1) == -1]
# pcd_return_1, pcd_return_2 = pcd_from_range_image(frame)
# visualize_pcd_return(frame, pcd_return_1, visu)
# visualize_pcd_return(frame, pcd_return_2, visu)
# concatenate 1st and 2nd return
# points, _ = concatenate_pcd_returns(pcd_return_1, pcd_return_2)
if visu:
save_camera_images(idx, frame, output_dir)
show_point_cloud(points_all, frame.laser_labels, idx, output_dir)
visualize_camera_projection(idx, frame, output_dir, (points, cp_points))
if save:
save_data(frame, idx, points, output_dir)
def process_segment(segment_path: str, output_dir: Path, save: bool,
visu: bool, parallelism: int=1) -> None:
data_set = tf.data.TFRecordDataset(segment_path, compression_type='')
frame_list = []
for idx, data in enumerate(data_set):
print(f'Loading frame: {idx}')
frame = Frame()
frame.ParseFromString(bytearray(data.numpy()))
frame_list.append(frame)
# multiprocessing?
if parallelism > 0:
arg_list = []
for idx, frame in enumerate(frame_list):
arg_list.append((idx, frame, output_dir, save, visu))
with Pool(parallelism) as pool:
pool.starmap(process_data, arg_list)
else:
for idx, frame in enumerate(frame_list):
process_data(idx, frame, output_dir, save, visu)
@click.command(help='Point Cloud Visualization Demo')
@click.option('--save/--no-save', 'save', default=False,
help='save frames and concatenated point clouds to disk')
@click.option('--visu/--no-visu', 'visu', default=False,
help='visualize point clouds and save images')
@click.argument('segment_path', type=click.Path(exists=True))
@click.argument('output_dir', type=click.Path(exists=True))
def main(save: bool, visu: bool, segment_path: str, output_dir: str) -> None:
if os.path.basename(segment_path).split('.')[-1] != 'tfrecord':
raise ValueError(f'segment file has to be of '
f'{tf.data.TFRecordDataset.__name__} type')
process_segment(segment_path, Path(output_dir), save, visu, 0)
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
main()
|
friskit-china/waymo-open-dataset-visualizer
|
main.py
|
main.py
|
py
| 11,384 |
python
|
en
|
code
| 1 |
github-code
|
6
|
23608824174
|
#Calculates the distance to the nearest massive galaxy
def distance_to_nearest_host(data):
distances = []
hostrvirs = []
for i in range(len(data)):
s = data['sim'].tolist()[i]
if s=='h148' or s=='h229' or s=='h242' or s=='h329': # if sat simulation, find distance to halo 1
h1dist = data['h1dist'].tolist()[i]*0.6776942783267969
distances.append(h1dist)
h1rvir = data['Rvir'][(data.sim==s) & (data.haloid==1)].tolist()[0]*0.6776942783267969
hostrvirs.append(h1rvir)
else: # if field simulation, find distance to nearest massive DM halo (currently > 0.5e12 Msol)
if s=='cptmarvel':
path = '/home/akinshol/Data/Sims/cptmarvel.cosmo25cmb.4096g5HbwK1BH/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096.dir/cptmarvel.cosmo25cmb.4096g5HbwK1BH.004096'
if s=='elektra':
path = '/home/akinshol/Data/Sims/elektra.cosmo25cmb.4096g5HbwK1BH/elektra.cosmo25cmb.4096g5HbwK1BH.004096.dir/elektra.cosmo25cmb.4096g5HbwK1BH.004096'
if s=='rogue':
path = '/home/akinshol/Data/Sims/rogue.cosmo25cmb.4096g5HbwK1BH/rogue.cosmo25cmb.4096g5HbwK1BH.004096.dir/rogue.cosmo25cmb.4096g5HbwK1BH.004096'
if s=='storm':
path = '/home/akinshol/Data/Sims/storm.cosmo25cmb.4096g5HbwK1BH/storm.cosmo25cmb.4096g5HbwK1BH.004096/storm.cosmo25cmb.4096g5HbwK1BH.004096'
coords = []
with open(path+'.coords','rb') as f:
while True:
try:
coords.append(pickle.load(f,encoding='latin1'))
except EOFError:
break
coords = pd.DataFrame(coords)
threshold = 5*10**(11) # this threshold can be adjusted,
# i tried to pick something similar to the virial masses of the host in the JL simulations
coords = coords[coords.mass > threshold]
halocoords = np.array([data['Xc'].tolist()[i],data['Yc'].tolist()[i],data['Zc'].tolist()[i]])
x = np.array(coords['Xc'])
y = np.array(coords['Yc'])
z = np.array(coords['Zc'])
Rvir = np.array(coords['Rv'])
c = np.array([x,y,z])
c = np.transpose(c)
dist = np.sqrt(np.sum((halocoords-c)**2, axis=1))*0.6776942783267969
distances.append(np.min(dist))
hostrvirs.append(Rvir[np.argmin(dist)]*0.6776942783267969)
return np.array(distances),np.array(hostrvirs)
|
CharlotteRuth/python_analysis
|
distance_to_nearest_host.py
|
distance_to_nearest_host.py
|
py
| 2,633 |
python
|
en
|
code
| 0 |
github-code
|
6
|
89150854
|
# --usage: print usage
def usage():
print('Usage: python3 marvin-data/marvin.py --building|--setup|--testing')
exit(1)
# --building: build the project
def building():
import json
import subprocess
with open('project-data/definition.json', 'r') as json_file:
with open('marvin-data/build_logs.txt', 'a') as logs_file:
data = json.load(json_file)
for command in data['build-commands']:
print("===> BUILD: Running command build '" + command + "'.")
print(command)
logs_file.write(command + '\n')
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate(timeout=120)
exit_code = process.wait()
except subprocess.TimeoutExpired:
print('===> BUILD: Command timed out.')
logs_file.write('Timed out.\n')
exit(1)
except Exception as e:
print('===> BUILD: Command failed with exception: ' + str(e))
logs_file.write('Failed with exception: ' + str(e) + '\n')
exit(1)
print(output.decode('utf-8') + error.decode('utf-8'))
logs_file.write(output.decode('utf-8') + error.decode('utf-8'))
if exit_code != 0:
print('===> BUILD: Command failed with exit code ' + str(exit_code) + '.')
exit(1)
print("===> BUILD: Done.")
exit(0)
# --setup: setup the project
def setup():
import json
import subprocess
with open('project-data/definition.json', 'r') as json_file:
data = json.load(json_file)
if 'setup-commands' not in data or len(data['setup-commands']) == 0:
print("===> SETUP: No setup commands.")
exit(0)
for command in data['setup-commands']:
print("===> SETUP: Running command setup \"" + command + "\".")
print(command)
try:
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate(timeout=120)
exit_code = process.wait()
except subprocess.TimeoutExpired:
print('===> SETUP: Command timed out.')
exit(1)
except Exception as e:
print('===> SETUP: Command failed with exception: ' + str(e))
exit(1)
print(output.decode('utf-8') + error.decode('utf-8'))
if exit_code != 0:
print('===> SETUP: Command failed with exit code ' + str(exit_code) + '.')
exit(1)
print("===> SETUP: Done.")
# --testing: run the tests
def testing():
import json
import subprocess
results = dict()
with open('project-data/definition.json') as json_file:
data = json.load(json_file)
for skill in data['skills']:
results[skill["name"]] = dict()
print("===> TESTING: Starting tests for skill '" + skill["name"] + "'.")
for test in skill["tests"]:
print("===> TESTING: Starting test '" + test["name"] + "'.")
print(test["command"])
results[skill["name"]][test["name"]] = dict()
try:
process = subprocess.Popen(test["command"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = process.communicate(timeout=60)
exit_code = process.wait()
except subprocess.TimeoutExpired:
print('===> TESTING: Command timed out.')
results[skill["name"]][test["name"]]['status'] = 'FAILED'
results[skill["name"]][test["name"]]['message'] = 'Timed out after 60 seconds.'
continue
except Exception as e:
print('===> TESTING: Command failed with exception: ' + str(e))
results[skill["name"]][test["name"]]['status'] = 'FAILED'
results[skill["name"]][test["name"]]['message'] = 'Failed with exception: ' + str(e)
continue
print(output.decode('utf-8'), error.decode('utf-8'))
results[skill["name"]][test["name"]]['status'] = 'PASSED' if output.decode('utf-8') == test["expected"] else 'FAILED'
if (output.decode('utf-8') != test["expected"]):
results[skill["name"]][test["name"]]['message'] = 'Expected:\n' + test["expected"] + '\nBut got:\n' + (output.decode('utf-8') + error.decode('utf-8'))
else:
results[skill["name"]][test["name"]]['message'] = test["expected"]
print("===> TESTING: Test '" + test["name"] + "' status: " + results[skill["name"]][test["name"]]['status'] + ".")
print("===> TESTING: Ending tests for skill '" + skill["name"] + "'.")
print("===> TESTING: Done.")
with open('marvin-data/results.json', 'w') as outfile:
json.dump(results, outfile)
print("===> TESTING: Results saved.")
exit(0)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
usage()
elif sys.argv[1] == '--building':
building()
elif sys.argv[1] == '--setup':
setup()
elif sys.argv[1] == '--testing':
testing()
else:
usage()
|
Lqvrent/SharedMarvin
|
Marvin/marvin.py
|
marvin.py
|
py
| 5,567 |
python
|
en
|
code
| 16 |
github-code
|
6
|
7167175651
|
T = int(input())
dp = [0, 0] * 41
dp[0] = [1, 0]
dp[1] = [0, 1]
N_List = []
for _ in range(T):
N_List.append(int(input()))
maxNum = max(N_List)
for i in range(2, maxNum+1):
dp[i] = [dp[i-2][0] + dp[i-1][0], dp[i-2][1] + dp[i-1][1]] # 바텀 업
for num in N_List:
print(dp[num][0],dp[num][1])
|
donchanee/Algorithm-PS
|
백준/1003.py
|
1003.py
|
py
| 315 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20501710133
|
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, ReduceLROnPlateau
from tensorflow.keras import layers, models
from tensorflow import lite
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import cv2
import datetime
# КОНФИГУРАЦИЯ НЕЙРОННОЙ СЕТИ
def get_model(input_size, classes=7):
model = models.Sequential()
model.add(layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', input_shape =input_size))
model.add(layers.Conv2D(64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(2, 2))
model.add(layers.Dropout(0.25))
model.add(layers.Conv2D(128, kernel_size=(3, 3), activation='relu', padding='same', kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.Conv2D(256, kernel_size=(3, 3), activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size=(2, 2)))
model.add(layers.Dropout(0.25))
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(classes, activation='softmax'))
# компиляция модели
model.compile(optimizer=Adam(learning_rate=0.0001),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
# ЗАГРУЗКА И ПРЕОБРАЗОВАНИЕ ИЗОБРАЖЕНИЙ
def dataset_load(im_paths, width, height, verbose):
data = []
labels = []
for (i, im_path) in enumerate(im_paths):
# загружаем изображение в переменную image
image = cv2.imread(im_path)
# определяем класс изображения из строки пути
# формат пути: ../dataset/{class}/{image}.jpg
label = im_path.split(os.path.sep)[-2]
# изменяем размер изображения на заданный (изображение должно быть квадратным)
image = cv2.resize(image, (width, height), interpolation=cv2.INTER_AREA)
# переводим изображение в массив numpy
image_array = img_to_array(image, data_format=None)
# добавляем массив изображения в список data
data.append(image_array)
# добавляем в список labels метку соответствующего изображения из списка data
labels.append(label)
# выводим на экран количество обработанных изображений в периодичностью verbose
if verbose > 0 and i > 0 and (i + 1) % verbose == 0:
print("[INFO] Обработано {}/{}".format(i + 1, len(im_paths)))
# возвращаем numpy массивы data и labels
return (np.array(data), np.array(labels))
# 1. ПОДГОТОВКА ДАННЫХ
# указываем название каталога набора данных в папке datasets
dataset_name = "faces"
# определяем пути набора данных, сохранения графика обучения и модели нейронной сети keras
dataset_path = os.path.join("datasets", dataset_name)
name_labels = ['interested', 'uninterested']
num_classes = len(name_labels)
plot_name = "{}_output/{}_plot.png".format(dataset_name, dataset_name)
weights_name = "{}_output/{}_weights.h5".format(dataset_name, dataset_name)
tflite_name = "{}_output/{}_weights.tflite".format(dataset_name, dataset_name)
# загружаем набор данных с диска, преобразуя изображения в массив
# и масштабируя значения пикселей из диапазона [0, 255] в диапазон [0, 1]
start_time = time.time()
image_paths = list(paths.list_images(dataset_path))
print("[INFO] Загрузка изображений ...")
(data, labels) = dataset_load(image_paths, width=48, height=48, verbose=500)
data = data.astype("float") / 255.0
# разделяем данные на обучающий и тестовый наборы (75% и 25%)
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)
print("[INFO] Форма матрицы признаков: {}".format(data.shape))
print("[INFO] Размер матрицы признаков: {:.1f}MB".format(data.nbytes / (1024 * 1000.0)))
# преобразуем метки из целых чисел в векторы
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)
print("[INFO] Время подготовки данных: {} сек".format(round(time.time() - start_time, 2)))
# 2.СБОРКА И КОМПИЛЯЦИЯ МОДЕЛИ НЕЙРОННОЙ СЕТИ
print("[INFO] Компиляция модели...")
model = get_model((48,48,1), 2)
# 3.ФОРМИРОВАНИЕ ДОПОЛНИТЕЛЬНЫХ ПАРАМЕТРОВ ОБУЧЕНИЯ
# Определение коллбэков для обучения нейронной сети
log_dir = "checkpoint/logs/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
checkpoint = ModelCheckpoint(filepath=weights_name,
save_best_only=True,
verbose=1,
mode='min',
moniter='val_loss')
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.2,
patience=6,
verbose=1,
min_delta=0.0001)
tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
csv_logger = CSVLogger('training.log')
callbacks = [checkpoint, reduce_lr, csv_logger]
# настройка метода увеличения выборки данных для обучения через модификацию существующих данных (аугментация)
aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15,
width_shift_range=0.2, height_shift_range=0.2,
shear_range=0.15, horizontal_flip=True, fill_mode="nearest")
# 4. ОБУЧЕНИЕ НЕЙРОННОЙ СЕТИ
num_epochs = 30
print("[INFO] Обучение нейронной сети...")
start_time = time.time()
hist = model.fit(aug.flow(trainX, trainY, batch_size=32),
validation_data=(testX, testY),
batch_size=64,
epochs=num_epochs,
callbacks=callbacks,
verbose=0)
print("[INFO] Время обучения: {} сек".format(round(time.time() - start_time, 2)))
# 5.ОЦЕНКА МОДЕЛИ НЕЙРОННОЙ СЕТИ
print("[INFO] Оценка нейронной сети...")
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1),
predictions.argmax(axis=1),
target_names=name_labels))
# построение и сохранение графика потерь и точности тренировок
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, num_epochs), hist.history["loss"], label="train_loss")
plt.plot(np.arange(0, num_epochs), hist.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, num_epochs), hist.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, num_epochs), hist.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig(plot_name)
# 6. СОХРАНЕНИЕ МОДЕЛИ НЕЙРОННОЙ СЕТИ
print("[INFO] Сохранение модели TFLite с квантованием...")
# конвертирование модели keras в квантованную модель tflite
converter = lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [lite.Optimize.DEFAULT]
tflite_model = converter.convert()
# сохранение модели tflite.
with open(tflite_name, 'wb') as f:
f.write(tflite_model)
|
aleksioprime/facerecognition
|
training_cnn.py
|
training_cnn.py
|
py
| 8,710 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
25442839211
|
from src.server.server import Server
import logging
logger = logging.getLogger('fmu_logger')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s_%(name)s_%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
class Web(object):
def __init__(self):
logger.debug('Web::__init__')
try:
# f = open(os.devnull, 'w')
# sys.stdout = sys.stderr = f
self.server = Server()
except Exception as e:
logger.debug(e)
def main():
web = Web()
if __name__ == "__main__":
main()
while True:
pass
|
jwayneroth/mpd-touch
|
web/web.py
|
web.py
|
py
| 607 |
python
|
en
|
code
| 5 |
github-code
|
6
|
8708427284
|
#/!/bin/python
import struct
import sys
import binascii
Signature = '\x89PNG\r\n\x1a\n'
#fichier_source, fichier_cible, fichier_dest, cle_chiffrement, algo = sys.argv[1:6]
fichier_source = 'date.txt'
fichier_cible = 'ressource.PNG'
fichier_dest = 'cyber.PNG'
cle_chiffrement = 'test12345'
algo = 'aes'
if algo.lower() == "3des":
from Crypto.Cipher import DES3
TB = 8
algo = DES3
else:
from Crypto.cipher import AES
TB = 16
algo = AES
padding = lambda s: s + (TB - len(s) % TB) * chr(BS - len(s) % TB) #padding
key = cle_chiffrement
with open(fichier_source, "rb") as f:
s = padding(f.read())
with open(fichier_cible, "rb") as f:
c = padding(f.read())
p = s[:TB] # premier bloc du plaintext
ecb_dec = algo.new(key, algo.MODE_ECB)
assert TB >= 2
taille = len(s) - TB
chuncktype = 'aaaa'
cypher = Signature + struct.pack(">I",size) + chunktype
cypher = ecb_dec.decrypt(cypher)
IV = "".join([chr(ord(cypher[i]) ^ ord(p[i])) for i in range(TB)])
cbc_enc = algo.new(key, algo.MODE_CBC, IV)
resultat = cbc_enc.encrypt(s)
# écriture du crc à la fin du chunck
resultat = resultat + struct.pack(">I", binascii.crc32(resultat[12:]) % 0x100000000)
# ajouter à la suite les données de c, en passant la signature
resultat = resultat + c[8:]
# on a le résultat, la clé et l'IV
cdc_dec = algo.new(key, algo.MODE_CBC, IV)
with open(fichier_dest, "wb") as f:
f.write(cbc_dec.decrypt(padding(resultat)))
#génération du script
|
buzagi/projet-pong
|
1.py
|
1.py
|
py
| 1,547 |
python
|
fr
|
code
| 2 |
github-code
|
6
|
7055484203
|
from time import sleep
from onvif import ONVIFCamera
exec(open("./fix_zeep.py").read())
class Camera(object):
def __init__(self, ip, login, password, port = 80):
# Подключение
self.mycam = ONVIFCamera(ip, port, login, password)
# Создание сервиса для управления движением
self.ptz = self.mycam.create_ptz_service()
# Получение профиля, в котором содержатся необходимые токены
# (Понадобятся в запросах)
media = self.mycam.create_media_service()
self.media_profile = media.GetProfiles()[0]
self._initContinuousMove()
def _initContinuousMove(self):
# Для получения пределов движения по осям X и Y необходимо запросить параметры конфигурации сервиса PTZ
request = self.ptz.create_type('GetConfigurationOptions')
request.ConfigurationToken = self.media_profile.PTZConfiguration.token
self.ptz_configuration_options = self.ptz.GetConfigurationOptions(request)
self.XMAX = self.ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Max
self.XMIN = self.ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].XRange.Min
self.YMAX = self.ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Max
self.YMIN = self.ptz_configuration_options.Spaces.ContinuousPanTiltVelocitySpace[0].YRange.Min
# Для управления камерой необходимо создать запрос типа ContinuousMove
self.request = self.ptz.create_type('ContinuousMove')
self.request.ProfileToken = self.media_profile.token
# Так как в созданном запросе атрибут Velosity = None,
# замещаем его объектом с аналогичной структурой
self.request.Velocity = self.ptz.GetStatus({'ProfileToken': self.media_profile.token}).Position
self.request.Velocity.Zoom.x = 0.0
self.ptz.Stop({'ProfileToken': self.media_profile.token})
def stop(self):
self.ptz.Stop({'ProfileToken': self.request.ProfileToken})
def _perform_move(self, timeout):
self.ptz.Stop({'ProfileToken': self.request.ProfileToken})
# Start continuous move
self.ptz.ContinuousMove(self.request)
# Wait a certain time
# sleep(timeout)
# Stop continuous move
# self.ptz.Stop({'ProfileToken': self.request.ProfileToken})
def move_up(self, timeout=0):
print('Moving UP')
self.request.Velocity.PanTilt.x = 0
self.request.Velocity.PanTilt.y = self.YMAX
self._perform_move(timeout)
def move_down(self, timeout=0):
print('Moving DOWN')
self.request.Velocity.PanTilt.x = 0
self.request.Velocity.PanTilt.y = self.YMIN
self._perform_move(timeout)
def move_right(self, timeout=0):
print('Moving RIGHT')
self.request.Velocity.PanTilt.x = self.XMAX
self.request.Velocity.PanTilt.y = 0
self._perform_move(timeout)
def move_left(self, timeout=0):
print ('Moving LEFT')
self.request.Velocity.PanTilt.x = self.XMIN
self.request.Velocity.PanTilt.y = 0
self._perform_move(timeout)
|
Maden23/CameraAudioKeyboard
|
ptzcamera.py
|
ptzcamera.py
|
py
| 3,524 |
python
|
ru
|
code
| 1 |
github-code
|
6
|
26776110880
|
"""Exceptions interface.
Exceptions allow for ignoring detected issues. This is commonly done to suppress false
positives or to ignore issues that a group has no intention of addressing.
The two types of exceptions are a list of filenames or regular expressions. If using
filename matching for the exception it is required that the reported issue contain the
absolute path to the file containing the issue to be ignored. The path for the issue is
set in the tool plugin that generates the issues.
"""
import fnmatch
import logging
import os
import re
from typing import Any, Dict, List, Match, Optional, Pattern
import yaml
from statick_tool.issue import Issue
from statick_tool.package import Package
class Exceptions:
"""Interface for applying exceptions."""
def __init__(self, filename: Optional[str]) -> None:
"""Initialize exceptions interface."""
if not filename:
raise ValueError(f"{filename} is not a valid file")
with open(filename, encoding="utf8") as fname:
try:
self.exceptions: Dict[Any, Any] = yaml.safe_load(fname)
except (yaml.YAMLError, yaml.scanner.ScannerError) as ex:
raise ValueError(f"{filename} is not a valid YAML file: {ex}") from ex
def get_ignore_packages(self) -> List[str]:
"""Get list of packages to skip when scanning a workspace."""
ignore: List[str] = []
if (
"ignore_packages" in self.exceptions
and self.exceptions["ignore_packages"] is not None
):
ignore = self.exceptions["ignore_packages"]
return ignore
def get_exceptions(self, package: Package) -> Dict[Any, Any]:
"""Get specific exceptions for given package."""
exceptions: Dict[Any, Any] = {"file": [], "message_regex": []}
if "global" in self.exceptions and "exceptions" in self.exceptions["global"]:
global_exceptions = self.exceptions["global"]["exceptions"]
if "file" in global_exceptions and global_exceptions["file"]:
exceptions["file"] += global_exceptions["file"]
if (
"message_regex" in global_exceptions
and global_exceptions["message_regex"]
):
exceptions["message_regex"] += global_exceptions["message_regex"]
# pylint: disable=too-many-boolean-expressions
if (
self.exceptions
and "packages" in self.exceptions
and self.exceptions["packages"]
and package.name in self.exceptions["packages"]
and self.exceptions["packages"][package.name]
and "exceptions" in self.exceptions["packages"][package.name]
):
package_exceptions = self.exceptions["packages"][package.name]["exceptions"]
if "file" in package_exceptions:
exceptions["file"] += package_exceptions["file"]
if "message_regex" in package_exceptions:
exceptions["message_regex"] += package_exceptions["message_regex"]
# pylint: enable=too-many-boolean-expressions
return exceptions
def filter_file_exceptions_early(
self, package: Package, file_list: List[str]
) -> List[str]:
"""Filter files based on file pattern exceptions list.
Only filters files which have tools=all, intended for use after the discovery
plugins have been run (so that Statick doesn't run the tool plugins against
files which will be ignored anyway).
"""
exceptions: Dict[Any, Any] = self.get_exceptions(package)
to_remove = []
for filename in file_list:
removed = False
for exception in exceptions["file"]:
if exception["tools"] == "all":
for pattern in exception["globs"]:
# Hack to avoid exceptions for everything on Travis CI.
fname = filename
prefix = "/home/travis/build/"
if pattern == "*/build/*" and fname.startswith(prefix):
fname = fname[len(prefix) :]
if fnmatch.fnmatch(fname, pattern):
to_remove.append(filename)
removed = True
break
if removed:
break
file_list = [filename for filename in file_list if filename not in to_remove]
return file_list
def filter_file_exceptions(
self, package: Package, exceptions: List[Any], issues: Dict[str, List[Issue]]
) -> Dict[str, List[Issue]]:
"""Filter issues based on file pattern exceptions list."""
for tool, tool_issues in list( # pylint: disable=too-many-nested-blocks
issues.items()
):
warning_printed = False
to_remove: List[Issue] = []
for issue in tool_issues:
if not os.path.isabs(issue.filename):
if not warning_printed:
self.print_exception_warning(tool)
warning_printed = True
continue
rel_path: str = os.path.relpath(issue.filename, package.path)
for exception in exceptions:
if exception["tools"] == "all" or tool in exception["tools"]:
for pattern in exception["globs"]:
# Hack to avoid exceptions for everything on Travis CI.
fname: str = issue.filename
prefix: str = "/home/travis/build/"
if pattern == "*/build/*" and fname.startswith(prefix):
fname = fname[len(prefix) :]
if fnmatch.fnmatch(fname, pattern) or fnmatch.fnmatch(
rel_path, pattern
):
to_remove.append(issue)
issues[tool] = [issue for issue in tool_issues if issue not in to_remove]
return issues
@classmethod
def filter_regex_exceptions(
cls, exceptions: List[Any], issues: Dict[str, List[Issue]]
) -> Dict[str, List[Issue]]:
"""Filter issues based on message regex exceptions list."""
for exception in exceptions: # pylint: disable=too-many-nested-blocks
exception_re = exception["regex"]
exception_tools = exception["tools"]
exception_globs = []
if "globs" in exception:
exception_globs = exception["globs"]
try:
compiled_re: Pattern[str] = re.compile(exception_re)
except re.error:
logging.warning(
"Invalid regular expression in exception: %s", exception_re
)
continue
for tool, tool_issues in list(issues.items()):
to_remove = []
if exception_tools == "all" or tool in exception_tools:
for issue in tool_issues:
if exception_globs:
for pattern in exception_globs:
if fnmatch.fnmatch(issue.filename, pattern):
match: Optional[Match[str]] = compiled_re.match(
issue.message
)
if match:
to_remove.append(issue)
else:
match_re: Optional[Match[str]] = compiled_re.match(
issue.message
)
if match_re:
to_remove.append(issue)
issues[tool] = [
issue for issue in tool_issues if issue not in to_remove
]
return issues
def filter_nolint(self, issues: Dict[str, List[Issue]]) -> Dict[str, List[Issue]]:
"""Filter out lines that have an explicit NOLINT on them.
Sometimes the tools themselves don't properly filter these out if there is a
complex macro or something.
"""
for tool, tool_issues in list(issues.items()):
warning_printed: bool = False
to_remove: List[Issue] = []
for issue in tool_issues:
if not os.path.isabs(issue.filename):
if not warning_printed:
self.print_exception_warning(tool)
warning_printed = True
continue
try:
with open(issue.filename, encoding="utf-8") as fid:
try:
lines = fid.readlines()
except UnicodeDecodeError as exc:
logging.warning(
"Could not read %s: %s", issue.filename, exc
)
continue
except FileNotFoundError as exc:
logging.warning("Could not read %s: %s", issue.filename, exc)
continue
if len(lines) <= 0:
continue
line_number = int(issue.line_number) - 1
if line_number < len(lines) and "NOLINT" in lines[line_number]:
to_remove.append(issue)
issues[tool] = [issue for issue in tool_issues if issue not in to_remove]
return issues
def filter_issues(
self, package: Package, issues: Dict[str, List[Issue]]
) -> Dict[str, List[Issue]]:
"""Filter issues based on exceptions list."""
exceptions = self.get_exceptions(package)
if exceptions["file"]:
issues = self.filter_file_exceptions(package, exceptions["file"], issues)
if exceptions["message_regex"]:
issues = self.filter_regex_exceptions(exceptions["message_regex"], issues)
issues = self.filter_nolint(issues)
return issues
@classmethod
def print_exception_warning(cls, tool: str) -> None:
"""Print warning about exception not being applied for an issue.
Warning will only be printed once per tool.
"""
logging.warning(
"[WARNING] File exceptions not available for %s tool "
"plugin due to lack of absolute paths for issues.",
tool,
)
|
sscpac/statick
|
statick_tool/exceptions.py
|
exceptions.py
|
py
| 10,625 |
python
|
en
|
code
| 66 |
github-code
|
6
|
71226766908
|
import unittest
from typing import Optional
from unittest import TestCase
from parameterized import parameterized
from robotlib.mathutils import Clipper, LinearExtrapolator
class TestClipper(TestCase):
@parameterized.expand([
# [min_value, max_value, x, expected_y]
[-5, 10, 0, 0],
[-5, 10, -4, -4],
[-5, 10, -5, -5],
[-5, 10, -6, -5],
[None, 10, -6, -6],
[-5, 10, 9, 9],
[-5, 10, 10, 10],
[-5, 10, 11, 10],
[-5, None, 11, 11],
])
def test_clip(
self,
min_value: Optional[float],
max_value: Optional[float],
x: float,
expected_y: float
) -> None:
clipper = Clipper(min_value, max_value)
actual_y = clipper.clip(x)
self.assertAlmostEqual(expected_y, actual_y)
def test_call(self) -> None:
clipper = Clipper(-2, 5)
x = 10
clip_result = clipper.clip(x)
call_result = clipper(x)
self.assertEqual(clip_result, call_result)
def test_get_min_value(self) -> None:
clipper = Clipper(3, 10)
actual = clipper.min_value
self.assertEqual(3, actual)
def test_set_min_value__good_value(self) -> None:
clipper = Clipper(None, 10)
clipper.min_value = 5
actual = clipper.min_value
self.assertEqual(5, actual)
def test_set_min_value__bad_value__raise_ValueError(self) -> None:
clipper = Clipper(5, 10)
with self.assertRaises(ValueError) as assert_context:
clipper.min_value = 20
self.assertEqual(
'Min value (20) cannot be greater than max value (10).',
str(assert_context.exception)
)
def test_get_max_value(self) -> None:
clipper = Clipper(3, 10)
actual = clipper.max_value
self.assertEqual(10, actual)
def test_set_max_value__good_value(self) -> None:
clipper = Clipper(5, None)
clipper.max_value = 10
actual = clipper.max_value
self.assertEqual(10, actual)
def test_set_max_value__bad_value__raise_ValueError(self) -> None:
clipper = Clipper(5, 10)
with self.assertRaises(ValueError) as assert_context:
clipper.max_value = 0
self.assertEqual(
'Max value (0) cannot be greater than min value (5).',
str(assert_context.exception)
)
class TestLinearExtrapolator(TestCase):
@parameterized.expand([
# [x, expected_y]
[0, 0],
# - Positive x
[1.0, -2],
[5.0, -10],
[9.5, -19],
[10.0, -20],
[10.5, -20],
# - Negative x
[-1, 2],
[-4.5, 9],
[-5.0, 10],
[-5.5, 10]
])
def test_extrapolate(
self,
x: float,
expected_y: float
) -> None:
extrapolator = LinearExtrapolator(
x0=1,
y0=-2,
x1=5,
y1=-10,
min_output=-20,
max_output=10
)
actual_y = extrapolator.extrapolate(x)
self.assertAlmostEqual(expected_y, actual_y)
if __name__ == '__main__':
unittest.main()
|
austin-bowen/robotlib
|
test/python/robotlib_tests/test_mathutils.py
|
test_mathutils.py
|
py
| 3,217 |
python
|
en
|
code
| 0 |
github-code
|
6
|
33526660433
|
#Crie um programa onde o usuário digite uma expressão qualquer que use parênteses. Seu aplicativo deverá analisar se a expressão passada está com os parênteses abertos e fechados na ordem correta.
abertos=0
fechados=0
expressão=input("Digite a expressão:")
for c in expressão:
if c=="(":
abertos+=1
elif c==")":
fechados+=1
if abertos==fechados:
print("Sua expressão está válida!")
else:
print("Sua expressão está errada!")
|
cauavsb/python
|
mundo-3-py/ex12.py
|
ex12.py
|
py
| 470 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
74221859388
|
# Thanks to Ethan (@sherbondy) for the awesome idea of using CSG!
# Much slower than the other version, but it uses like 1/3 of the geometry
# Refactored version. slightly slower but more readable.
import bpy
import mathutils
bpy.ops.object.select_all(action='DESELECT')
pos = bpy.context.scene.cursor_location
bpy.ops.mesh.primitive_cube_add(view_align=False, enter_editmode=False)
iterator = bpy.context.active_object
iterator.name = 'Iterator'
bpy.ops.mesh.primitive_cube_add(view_align=False, enter_editmode=False)
menger = bpy.context.active_object
menger.name = 'MengerSponge'
def apply_modifier():
bpy.ops.object.modifier_add(type='BOOLEAN')
bpy.context.object.modifiers["Boolean"].operation = 'DIFFERENCE'
bpy.context.object.modifiers["Boolean"].object = bpy.data.objects["Iterator"]
bpy.ops.object.modifier_apply(apply_as='DATA', modifier="Boolean")
max_depth = 3
def cycle(array):
new_array = []
for i in range(len(array)):
new_array.append(array[(i+1)%len(array)])
return new_array
for depth in range (max_depth):
for i in range(3**depth):
for j in range(3**depth):
scale = [1.01, 1/3**(depth+1), 1/3**(depth+1)]
location = [0, -1+1/3**depth+2*i/3**depth, -1+1/3**depth+2*j/3**depth]
for k in range(3):
iterator.scale = scale
iterator.location = pos + mathutils.Vector(location)
apply_modifier()
scale = cycle(scale)
location = cycle(location)
bpy.ops.object.select_all(action='DESELECT')
iterator.select = True
bpy.ops.object.delete()
menger.select = True
|
elfakyn/Blender-iterated-fractals
|
menger_csg2.py
|
menger_csg2.py
|
py
| 1,646 |
python
|
en
|
code
| 0 |
github-code
|
6
|
811951116
|
# Top View of Binary Tree
# Given a pointer to the root of a binary tree, print the top view of the binary tree.
# The tree as seen from the top the nodes, is called the top view of the tree.
# For example :
# 1
# \
# 2
# \
# 5
# / \
# 3 6
# \
# 4
# Top View : 1 2 5 6
from collections import deque
def topView(root):
#Write your code here
stack = []
q = deque([(root, 0)])
output = {}
while q:
node, horizontalDistance = q.popleft()
if node:
if horizontalDistance not in output:
output[horizontalDistance] = node.info
if node.left:
q.append((node.left, horizontalDistance-1))
if node.right:
q.append((node.right, horizontalDistance+1))
for i in sorted(output):
print(output[i], end = " ")
|
Saima-Chaity/Leetcode
|
Tree/TopViewOfBinaryTree.py
|
TopViewOfBinaryTree.py
|
py
| 882 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8397560124
|
import copy
import numpy as np
from scipy.spatial.distance import cdist
class Server:
def __init__(self, parameters):
super().__init__()
self.k = parameters['kernel_size']
self.alpha = 1.0
self.d_out = parameters['d_out']
# centers, spreads, w and b can be broadcast to clients
self.centers, self.std = None, None
self.w = np.random.randn(self.k, self.d_out)
self.b = np.random.randn(1, self.d_out)
@staticmethod
def _sort_centers(centers):
"""
To sort the centers according to the distance from zero vector
Please note that this fun has not consider the direction of the centers, should be change
:param centers:
:return: sorted centers & index
"""
tmp_centers = copy.deepcopy(centers)
distance = np.sum(tmp_centers ** 2, axis=1)
sorted_index = np.argsort(distance)
tmp_centers = tmp_centers[sorted_index, :]
return tmp_centers, sorted_index
@staticmethod
def _dist(Mat1, Mat2):
"""
rewrite euclidean distance function in Matlab: dist
:param Mat1: matrix 1, M x N
:param Mat2: matrix 2, N x R
output: Mat3. M x R
"""
Mat2 = Mat2.T
return cdist(Mat1, Mat2)
def average(self, selected_clients):
stack_c, stack_w, stack_b, stack_s = [], [], [], []
num_data = 0
for i, client in enumerate(selected_clients):
tmp_c, tmp_w, tmp_b, tmp_s = client.compute_update()
nk = client.data_size()
num_data += nk
if i == 0:
stack_c, stack_w, stack_b, stack_s = nk * tmp_c, nk * tmp_w, \
nk * tmp_b, nk * tmp_s
else:
# stack_c = np.vstack((stack_c, tmp_c))
stack_c += nk * tmp_c
stack_w += nk * tmp_w
stack_b += nk * tmp_b
stack_s += nk * tmp_s
# k_means_c = KMeans(n_clusters=self.k).fit(stack_c)
# self.centers = k_means_c.cluster_centers_
self.centers = stack_c / num_data
self.centers, tmp_index = self._sort_centers(self.centers)
# self.w, self.b, self.std = stack_w / num_data, stack_b / num_data, stack_s / num_data
self.w, self.b, self.std = stack_w[tmp_index] / num_data, stack_b / num_data, stack_s[tmp_index] / num_data
def predict(self, test_x):
N = test_x.shape[0]
TestDistance = self._dist(self.centers, test_x.T)
TestSpreadMat = np.tile(self.std.reshape(-1, 1), (1, N))
TestHiddenOut = np.exp(-(TestDistance / TestSpreadMat) ** 2).T
Test_y = np.dot(TestHiddenOut, self.w) + self.b
return Test_y
def broadcast(self):
tmp_c, tmp_w, tmp_b, tmp_s = copy.deepcopy(self.centers), copy.deepcopy(self.w), \
copy.deepcopy(self.b), copy.deepcopy(self.std)
return {'centers': tmp_c, 'w': tmp_w, 'b': tmp_b, 'std': tmp_s}
|
VeritasXu/FDD-EA
|
libs/Server.py
|
Server.py
|
py
| 3,052 |
python
|
en
|
code
| 7 |
github-code
|
6
|
33350944365
|
#%%
import os
import gc
import sys
import pickle
from time import time
import datatable as dt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import torch.nn as nn
from torch import optim
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from torch.autograd import Variable
import torch.optim as optim
from torch.optim import Optimizer
from torch.optim.lr_scheduler import (CosineAnnealingWarmRestarts, CyclicLR, OneCycleLR,
ReduceLROnPlateau)
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
sns.set()
DEFAULT_FIG_WIDTH = 20
sns.set_context("paper", font_scale=1.2)
# WORKSPACE_FOLDER=/home/scao/Documents/kaggle-riiid-test
# PYTHONPATH=${WORKSPACE_FOLDER}:${WORKSPACE_FOLDER}/sakt:${WORKSPACE_FOLDER}/transformer
HOME = os.path.abspath(os.path.join('.', os.pardir))
print(HOME, '\n\n')
# HOME = "/home/scao/Documents/kaggle-riiid-test/"
MODEL_DIR = os.path.join(HOME, 'model')
DATA_DIR = os.path.join(HOME, 'data')
sys.path.append(HOME)
from utils import *
get_system()
from sakt import *
from iter_env import *
# from transformer_optimizers import *
# %%
'''
TO-DO:
features encoding:
1 how to address the problem with previous answers correctly not uniformly predicted
2 question tags
'''
DEBUG = True
TRAIN = False
PREPROCESS = False
TEST_SIZE = 0.05
NUM_SKILLS = 13523 # number of problems
MAX_SEQ = 180
ACCEPTED_USER_CONTENT_SIZE = 4
EMBED_SIZE = 128
NUM_HEADS = 8
BATCH_SIZE = 64
VAL_BATCH_SIZE = 2048
DEBUG_TEST_SIZE = 2500
DROPOUT = 0.1
SEED = 1127
get_seed(SEED)
'''
Columns placeholder and preprocessing params
'''
CONTENT_TYPE_ID = "content_type_id"
CONTENT_ID = "content_id"
TARGET = "answered_correctly"
USER_ID = "user_id"
PRIOR_QUESTION_TIME = 'prior_question_elapsed_time'
PRIOR_QUESTION_EXPLAIN = 'prior_question_had_explanation'
TASK_CONTAINER_ID = "task_container_id"
TIMESTAMP = "timestamp"
ROW_ID = 'row_id'
FILLNA_VAL = 14_000 # for prior question elapsed time, rounded average in train
TIME_SCALING = 1000 # scaling down the prior question elapsed time
TRAIN_COLS = [TIMESTAMP, USER_ID, CONTENT_ID, CONTENT_TYPE_ID, TARGET]
TRAIN_COLS_NEW = [TIMESTAMP, USER_ID, CONTENT_ID, CONTENT_TYPE_ID,
TARGET, PRIOR_QUESTION_TIME, PRIOR_QUESTION_EXPLAIN]
TRAIN_DTYPES = {TIMESTAMP: 'int64',
USER_ID: 'int32',
CONTENT_ID: 'int16',
CONTENT_TYPE_ID: 'bool',
TARGET:'int8',
PRIOR_QUESTION_TIME: np.float32,
PRIOR_QUESTION_EXPLAIN: 'boolean'}
if DEBUG:
NROWS_TEST = 25_000
NROWS_TRAIN = 5_000_000
NROWS_VAL = 500_000
else:
NROWS_TEST = 250_000
NROWS_TRAIN = 50_000_000
NROWS_VAL = 2_000_000
# %%
if PREPROCESS:
with timer("Loading train from parquet"):
train_df = pd.read_parquet(os.path.join(DATA_DIR, 'cv2_train.parquet'),
columns=list(TRAIN_DTYPES.keys())).astype(TRAIN_DTYPES)
valid_df = pd.read_parquet(os.path.join(DATA_DIR, 'cv2_valid.parquet'),
columns=list(TRAIN_DTYPES.keys())).astype(TRAIN_DTYPES)
if DEBUG:
train_df = train_df[:NROWS_TRAIN]
valid_df = valid_df[:NROWS_VAL]
with timer("Processing train"):
train_group = preprocess(train_df)
valid_group = preprocess(valid_df, train_flag=2)
else:
with open(os.path.join(DATA_DIR, 'sakt_group_cv2.pickle'), 'rb') as f:
group = pickle.load(f)
train_group, valid_group = train_test_split(group, test_size = TEST_SIZE, random_state=SEED)
print(f"valid users: {len(valid_group.keys())}")
print(f"train users: {len(train_group.keys())}")
# %%
class SAKTDataset(Dataset):
def __init__(self, group, n_skill, max_seq=MAX_SEQ):
super(SAKTDataset, self).__init__()
self.samples, self.n_skill, self.max_seq = {}, n_skill, max_seq
self.user_ids = []
for i, user_id in enumerate(group.index):
content_id, answered_correctly = group[user_id]
if len(content_id) >= ACCEPTED_USER_CONTENT_SIZE:
if len(content_id) > self.max_seq:
total_questions = len(content_id)
last_pos = total_questions // self.max_seq
for seq in range(last_pos):
index = f"{user_id}_{seq}"
self.user_ids.append(index)
start = seq * self.max_seq
end = (seq + 1) * self.max_seq
self.samples[index] = (content_id[start:end],
answered_correctly[start:end])
if len(content_id[end:]) >= ACCEPTED_USER_CONTENT_SIZE:
index = f"{user_id}_{last_pos + 1}"
self.user_ids.append(index)
self.samples[index] = (content_id[end:],
answered_correctly[end:])
else:
index = f'{user_id}'
self.user_ids.append(index)
self.samples[index] = (content_id, answered_correctly)
def __len__(self):
return len(self.user_ids)
def __getitem__(self, index):
user_id = self.user_ids[index]
content_id, answered_correctly = self.samples[user_id]
seq_len = len(content_id)
content_id_seq = np.zeros(self.max_seq, dtype=int)
answered_correctly_seq = np.zeros(self.max_seq, dtype=int)
if seq_len >= self.max_seq:
content_id_seq[:] = content_id[-self.max_seq:]
answered_correctly_seq[:] = answered_correctly[-self.max_seq:]
else:
content_id_seq[-seq_len:] = content_id
answered_correctly_seq[-seq_len:] = answered_correctly
target_id = content_id_seq[1:] # question till the current one
label = answered_correctly_seq[1:]
x = content_id_seq[:-1].copy() # question till the previous one
# encoded answers till the previous one
x += (answered_correctly_seq[:-1] == 1) * self.n_skill
return x, target_id, label
# %%
train_dataset = SAKTDataset(train_group, n_skill=NUM_SKILLS, max_seq=MAX_SEQ)
train_dataloader = DataLoader(train_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True)
val_dataset = SAKTDataset(valid_group, n_skill=NUM_SKILLS, max_seq=MAX_SEQ)
val_dataloader = DataLoader(val_dataset,
batch_size=VAL_BATCH_SIZE,
shuffle=False)
print(f"Length of the train loader is {len(train_dataloader)}")
#%%
sample_batch = next(iter(train_dataloader))
sample_batch[0].shape, sample_batch[1].shape, sample_batch[2].shape
# %%
'''
Debugging
'''
content_id, answered_correctly = train_group[train_group.keys()[0]]
seq_len = len(content_id)
content_id_seq = np.zeros(MAX_SEQ, dtype=int)
answered_correctly_seq = np.zeros(MAX_SEQ, dtype=int)
if seq_len >= MAX_SEQ:
content_id_seq[:] = content_id[-MAX_SEQ:]
answered_correctly_seq[:] = answered_correctly[-MAX_SEQ:]
else:
content_id_seq[-seq_len:] = content_id
answered_correctly_seq[-seq_len:] = answered_correctly
# question till the current one, should be the same with sample_batch[1][0]
target_id = content_id_seq[1:]
# whether answered correctly, same with sample_batch[2][0]
label = answered_correctly_seq[1:] #
x = content_id_seq[:-1].copy() # question till the previous one
# encoded answers till the previous question
# if a user answered correctly it is added 13523
x += (answered_correctly_seq[:-1] == 1) * NUM_SKILLS
# %% Merging questions
# questions_df = pd.read_csv(os.path.join(DATA_DIR, 'questions.csv'))
# questions_df['part'] = questions_df['part'].astype(np.int32)
# questions_df['bundle_id'] = questions_df['bundle_id'].astype(np.int32)
# train_debug = pd.merge(train_df, questions_df[['question_id', 'part']],
# left_on = 'content_id', right_on = 'question_id', how = 'left')
# %% model
class FFN(nn.Module):
def __init__(self, state_size = MAX_SEQ,
forward_expansion = 1,
bn_size=MAX_SEQ - 1,
dropout=0.2):
super(FFN, self).__init__()
self.state_size = state_size
self.lr1 = nn.Linear(state_size, forward_expansion * state_size)
self.relu = nn.ReLU()
self.bn = nn.BatchNorm1d(bn_size)
self.lr2 = nn.Linear(forward_expansion * state_size, state_size)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.relu(self.lr1(x))
x = self.bn(x)
x = self.lr2(x)
return self.dropout(x)
def future_mask(seq_length):
future_mask = (np.triu(np.ones([seq_length, seq_length]), k = 1)).astype('bool')
return torch.from_numpy(future_mask)
class TransformerBlock(nn.Module):
def __init__(self, embed_dim,
heads = 8,
dropout = DROPOUT,
forward_expansion = 1):
super(TransformerBlock, self).__init__()
self.multi_att = nn.MultiheadAttention(embed_dim=embed_dim,
num_heads=heads, dropout=dropout)
self.dropout = nn.Dropout(dropout)
self.layer_normal = nn.LayerNorm(embed_dim)
self.ffn = FFN(embed_dim,
forward_expansion = forward_expansion,
dropout=dropout)
self.layer_normal_2 = nn.LayerNorm(embed_dim)
def forward(self, value, key, query, att_mask):
att_output, att_weight = self.multi_att(value, key, query, attn_mask=att_mask)
att_output = self.dropout(self.layer_normal(att_output + value))
att_output = att_output.permute(1, 0, 2)
# att_output: [s_len, bs, embed] => [bs, s_len, embed]
x = self.ffn(att_output)
x = self.dropout(self.layer_normal_2(x + att_output))
return x.squeeze(-1), att_weight
class Encoder(nn.Module):
def __init__(self, n_skill, max_seq=100,
embed_dim=128,
dropout = DROPOUT,
forward_expansion = 1,
num_layers=1,
heads = 8):
super(Encoder, self).__init__()
self.n_skill, self.embed_dim = n_skill, embed_dim
self.embedding = nn.Embedding(2 * n_skill + 1, embed_dim)
self.pos_embedding = nn.Embedding(max_seq - 1, embed_dim)
self.e_embedding = nn.Embedding(n_skill+1, embed_dim)
self.layers = nn.ModuleList([TransformerBlock(embed_dim, heads=heads,
forward_expansion = forward_expansion) for _ in range(num_layers)])
self.dropout = nn.Dropout(dropout)
def forward(self, x, question_ids):
device = x.device
x = self.embedding(x)
pos_id = torch.arange(x.size(1)).unsqueeze(0).to(device)
pos_x = self.pos_embedding(pos_id)
x = self.dropout(x + pos_x)
x = x.permute(1, 0, 2) # x: [bs, s_len, embed] => [s_len, bs, embed]
e = self.e_embedding(question_ids)
e = e.permute(1, 0, 2)
for layer in self.layers:
att_mask = future_mask(e.size(0)).to(device)
x, att_weight = layer(e, x, x, att_mask=att_mask)
x = x.permute(1, 0, 2)
x = x.permute(1, 0, 2)
return x, att_weight
class SAKTModel(nn.Module):
def __init__(self,
n_skill,
max_seq=MAX_SEQ,
embed_dim=EMBED_SIZE,
dropout = DROPOUT,
forward_expansion = 1,
enc_layers=1,
heads = NUM_HEADS):
super(SAKTModel, self).__init__()
self.encoder = Encoder(n_skill,
max_seq,
embed_dim,
dropout,
forward_expansion,
num_layers=enc_layers,
heads=heads)
self.pred = nn.Linear(embed_dim, 1)
def forward(self, x, question_ids):
x, att_weight = self.encoder(x, question_ids)
x = self.pred(x)
return x.squeeze(-1), att_weight
class TestDataset(Dataset):
def __init__(self, samples, test_df, n_skill, max_seq=100):
super(TestDataset, self).__init__()
self.samples = samples
self.user_ids = [x for x in test_df["user_id"].unique()]
self.test_df = test_df
self.n_skill, self.max_seq = n_skill, max_seq
def __len__(self):
return self.test_df.shape[0]
def __getitem__(self, index):
test_info = self.test_df.iloc[index]
user_id = test_info['user_id']
target_id = test_info['content_id']
content_id_seq = np.zeros(self.max_seq, dtype=int)
answered_correctly_seq = np.zeros(self.max_seq, dtype=int)
if user_id in self.samples.index:
content_id, answered_correctly = self.samples[user_id]
seq_len = len(content_id)
if seq_len >= self.max_seq:
content_id_seq = content_id[-self.max_seq:]
answered_correctly_seq = answered_correctly[-self.max_seq:]
else:
content_id_seq[-seq_len:] = content_id
answered_correctly_seq[-seq_len:] = answered_correctly
x = content_id_seq[1:].copy()
x += (answered_correctly_seq[1:] == 1) * self.n_skill
questions = np.append(content_id_seq[2:], [target_id])
return x, questions
# %% Loading models
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'\nUsing device: {device}')
model_file = MODEL_DIR+'sakt_seq_180_auc_0.7689.pth'
model = SAKTModel(n_skill=NUM_SKILLS,
max_seq=MAX_SEQ,
embed_dim=EMBED_SIZE,
forward_expansion=1,
enc_layers=1,
heads=NUM_HEADS,
dropout=DROPOUT)
n_params = get_num_params(model)
print(f"Current model has {n_params} parameters.")
model = model.to(device)
model.load_state_dict(torch.load(model_file, map_location=device))
#%% Loading mock test set
with timer("Loading private simulated test set"):
all_test_df = pd.read_parquet(DATA_DIR+'cv2_valid.parquet')
all_test_df = all_test_df[:DEBUG_TEST_SIZE]
all_test_df['answer_correctly_true'] = all_test_df[TARGET]
# %% mock test
predicted = []
def set_predict(df):
predicted.append(df)
# reload all user group for cv2
with timer('loading cv2'):
with open(os.path.join(DATA_DIR, 'sakt_group_cv2.pickle'), 'rb') as f:
group = pickle.load(f)
#%%
def iter_env_run(all_test_df, n_iter=1):
'''
Running mock test for n_iter iterations using tito's iter_env simulator and cv2_train user group.
'''
iter_test = Iter_Valid(all_test_df, max_user=1000)
prev_test_df = None
prev_group = None
batch_user_ids = []
# reload all user group for cv2
with open(os.path.join(DATA_DIR, 'sakt_group_cv2.pickle'), 'rb') as f:
group = pickle.load(f)
for _ in range(n_iter):
test_df, sample_prediction_df = next(iter_test)
if prev_test_df is not None:
prev_test_df['answered_correctly'] = eval(test_df['prior_group_answers_correct'].iloc[0])
prev_test_df = prev_test_df[prev_test_df.content_type_id == False]
prev_group = prev_test_df[['user_id', 'content_id', 'answered_correctly']]\
.groupby('user_id').apply(lambda r: (
r['content_id'].values,
r['answered_correctly'].values))
for prev_user_id in prev_group.index:
prev_group_content = prev_group[prev_user_id][0]
prev_group_answered_correctly = prev_group[prev_user_id][1]
if prev_user_id in group.index:
group[prev_user_id] = (np.append(group[prev_user_id][0], prev_group_content),
np.append(group[prev_user_id][1], prev_group_answered_correctly))
else:
group[prev_user_id] = (prev_group_content, prev_group_answered_correctly)
if len(group[prev_user_id][0]) > MAX_SEQ:
new_group_content = group[prev_user_id][0][-MAX_SEQ:]
new_group_answered_correctly = group[prev_user_id][1][-MAX_SEQ:]
group[prev_user_id] = (new_group_content, new_group_answered_correctly)
prev_test_df = test_df.copy()
test_df = test_df[test_df.content_type_id == False]
batch_user_ids.append(test_df.user_id.unique())
test_dataset = TestDataset(group, test_df, NUM_SKILLS, max_seq=MAX_SEQ)
test_dataloader = DataLoader(test_dataset, batch_size=len(test_df), shuffle=False)
item = next(iter(test_dataloader))
x = item[0].to(device).long()
target_id = item[1].to(device).long()
with torch.no_grad():
output, _ = model(x, target_id)
output = torch.sigmoid(output)
preds = output[:, -1]
test_df['answered_correctly'] = preds.cpu().numpy()
set_predict(test_df.loc[test_df['content_type_id'] == 0,
['row_id', 'answered_correctly']])
return test_df, output, item, group, prev_group, batch_user_ids
# %%
# user_common = set(batch_user_ids[0])
# for k in range(1, len(batch_user_ids)):
# user_common = user_common.intersection(set(batch_user_ids[k]))
# %%
'''
Current set up, cv2_valid first 25k rows
first 4 batches common user_id: 143316232, 1089397940, 1140583044 (placeholder user?)
'''
print(group[1089397940])
#%% iter number 1
test_df, output, item, group_updated, _, _ = iter_env_run(all_test_df, n_iter=1)
u_idx_loc = test_df.index.get_loc(test_df[test_df.user_id==1089397940].index[0])
print(f"local index of user 1089397940: {u_idx_loc}", '\n')
print(test_df.iloc[u_idx_loc], '\n')
print(item[1][u_idx_loc, -12:]) # user 1089397940 first batch in example_test (question sequence)
print(item[0][u_idx_loc, -12:]) # user 1089397940 first batch in example_test: skill sequence = prev_content_id * (correct or not) + 13523
print(output[u_idx_loc, -12:].cpu().numpy(),'\n') # user 1089397940 probability prediction
print(group_updated[1089397940][0][:12]) # in the first iteration the length is only 11
print(group_updated[1089397940][1][:12])
#%% iter number 2
test_df, output, item, group_updated, _, _ = iter_env_run(all_test_df, n_iter=2)
u_idx_loc = test_df.index.get_loc(test_df[test_df.user_id==1089397940].index[0])
print(f"local index of user 1089397940: {u_idx_loc}", '\n')
print(test_df.iloc[u_idx_loc], '\n')
print(item[1][u_idx_loc, -12:]) # user 1089397940 2nd batch in example_test (question sequence)
print(item[0][u_idx_loc, -12:]) # user 1089397940 2nd batch in example_test: skill sequence = prev_content_id * (correct or not) + 13523
print(output[u_idx_loc, -12:].cpu().numpy(),'\n') # user 1089397940 probability prediction
print(group_updated[1089397940][0][:12]) # in the 2nd iteration the length is only 11
print(group_updated[1089397940][1][:12])
# %%
test_df, output, item, group_updated, _, _ = iter_env_run(all_test_df, n_iter=3)
u_idx_loc = test_df.index.get_loc(test_df[test_df.user_id==1089397940].index[0])
print(f"local index of user 1089397940: {u_idx_loc}", '\n')
print(test_df.iloc[u_idx_loc], '\n')
print(item[1][u_idx_loc, -12:]) # user 1089397940 3rd batch in example_test (question sequence)
print(item[0][u_idx_loc, -12:]) # user 1089397940 3rd batch in example_test: skill sequence = prev_content_id * (correct or not) + 13523
print(output[u_idx_loc, -12:].cpu().numpy(),'\n') # user 1089397940 probability prediction
print(group_updated[1089397940][0][:12]) # in the 3rd iteration the length is only 11
print(group_updated[1089397940][1][:12])
# %%
|
scaomath/kaggle-riiid-test
|
sakt/debug_sakt_2.py
|
debug_sakt_2.py
|
py
| 20,260 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7874634889
|
import numpy as np
import json
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class LatentConverter():
def __init__(self, places_path):
place_list = []
place2idx = {}
with open(places_path, encoding='utf-8') as f:
for p_idx, place in enumerate(json.load(f)):
place = tuple(place)
place2idx[place] = p_idx
place_list.append(place)
self.places_path = places_path
self.place_len = len(place_list)
self.place_list = place_list
self.place2idx = place2idx
def get_normalized(self, reviewer_path):
with open(reviewer_path, encoding='utf-8') as f:
review_list = json.load(f)
stars_dict = {}
for review in review_list:
place = (review['place'], review['address'])
if place in self.place2idx:
stars_dict[place] = review['stars']
if len(stars_dict)==0:
print('Reviewer "' + reviewer_path + '":\n\tNone of the reviews overlaps with "' + self.places_path + '"')
mean, std = 0, 0
else:
stars_list = list(stars_dict.values())
mean, std = np.mean(stars_list), np.std(stars_list)
normalized = np.zeros(self.place_len)
for p_idx, place in enumerate(self.place_list):
if std==0:
normalized[p_idx] = 0
else:
normalized[p_idx] = (stars_dict[place]-mean)/std if place in stars_dict else 0
return normalized
def gen_proj(self, guides_normalized, latent_dim=20):
u, s, vh = np.linalg.svd(guides_normalized)
print('gen_proj: The first', latent_dim, 'latent dimensions are taken.')
print('Singular values:\n', s)
# guides_smoothed = np.matmul(np.matmul(u[:,:20], np.diag(s[:20])), vh[:20,:])
# print('Smoothed:\n', guides_smoothed)
# # for debug use
# guides_normalized = guides_normalized.transpose()
# guides_smoothed = guides_smoothed.transpose()
return u[:,:latent_dim].transpose()
def get_latent(self, proj, reviewer_path):
return np.matmul(proj, self.get_normalized(reviewer_path))
@staticmethod
def visualize(guides_latent, dims=(0,1,2)):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = guides_latent[dims[0],:]
Y = guides_latent[dims[1],:]
Z = guides_latent[dims[2],:]
ax.scatter(X, Y, Z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
class ReviewReader():
def __init__(self, guides_path, reviews_path):
self.reviews_path = reviews_path
# guide_list[<number>] = <guide ID>
guide_list = []
with open(guides_path, encoding='utf-8') as f:
for i in f:
guide_list.append(i[:-1])
self.guide_len = len(guide_list)
# guide_file_dict[<guide ID>] = <file name>
guide_file_dict = {}
for i in os.listdir(reviews_path):
guide_file_dict[i.split()[2].split('=')[1].split('.')[0]] = i
# file_mame[<number>] = <file name>
file_mame = []
for num in range(len(guide_list)):
file_mame.append(guide_file_dict[guide_list[num]])
self.file_name = file_mame
def getPath(self, guideNum):
return os.path.join(self.reviews_path, self.file_name[guideNum])
def getReviews(self, guideNum):
with open(self.getPath(guideNum), encoding='utf-8') as f:
review_list = json.load(f)
return review_list
if __name__ == '__main__':
# initialize with a list of places
lc = LatentConverter('places.json')
# get reviews
rr = ReviewReader('guides.txt', '../data/reviews_guide')
# generate all normalized vectors
guides_normalized = np.zeros((lc.place_len, rr.guide_len))
for g_idx in range(rr.guide_len):
guides_normalized[:,g_idx] = lc.get_normalized(rr.getPath(g_idx))
# generate projection matrix
proj = lc.gen_proj(guides_normalized, latent_dim=20)
# project guides
guides_latent = np.matmul(proj, guides_normalized)
# save for future use
np.save('guides_normalized.npy', guides_normalized)
np.save('proj.npy', proj)
np.save('guides_latent.npy', guides_latent)
def example_get_latent():
# initialize with a list of places
lc = LatentConverter('places.json')
# load projection matrix
proj = np.load('proj.npy')
# get latent vector
return lc.get_latent(proj, '../data/reviews_guide/reviews_guide length=117 guideID=107297262039687837164.json')
def example_visualize():
# visualize the 0th, 1st, and 2nd latent dimension
LatentConverter.visualize(np.load('guides_latent.npy'), dims=(0,1,2))
|
e841018/DinnerSelector
|
preprocessing/LSI.py
|
LSI.py
|
py
| 4,229 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37078019949
|
end_points = ['.', '?', '!'] # 문장을 구별할 식별표(마침표, 물음표, 느낌표)
def cnt_end_points(str): # 문장 개수 세는 함수
cnt = 0
for _ in str:
if _ in end_points:
cnt += 1
return cnt
def is_name(name): # 숫자가 없고 첫글자는 대문자이고 나머지는 소문자인지 확인하여 이름인지 아닌지 boolean값 반환하는 함수
length = len(name)
if length == 1:
if name[0].isupper():
return True
else:
return False
elif length >= 2:
if name.isalpha() and name[0].isupper() and name[1::].islower():
return True
else:
return False
T = int(input())
for i in range(1, T+1):
N = int(input())
text = ''
while(cnt_end_points(text) != N): # 입력받은 문자열에서 문자의 개수가 N과 같을 때까지 문자열 입력받기
input_words = input()
text += (input_words + ' ')
sentences = []
sentence = ''
for j in text: # 묹자열 순회하면서 문장 단위로 끊기
if j in end_points:
sentences.append(sentence)
sentence = ''
else:
sentence += j
cnt_name_li = [] # 문장 별로 이름의 개수를 담을 빈리스트 cnt_name_li
for k in sentences: # 문장 순회
cnt_name = 0 # 문장별 이름 개수세기
words = k.split() # 띄어쓰기 단위로 문장 쪼갠 리스트 words
for l in words: # 단어들 순회
if is_name(l): # 이름인지 판별하고 이름이면 카운트
cnt_name += 1
cnt_name_li.append(cnt_name) # 문장의 이름 개수 리스트에 담기
print(f'#{i}', *cnt_name_li)
|
JeonggonCho/algorithm
|
SWEA/D3/7675. 통역사 성경이/통역사 성경이.py
|
통역사 성경이.py
|
py
| 1,756 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
1501952186
|
from numpy import *
import matplotlib.pyplot as plt
import functions
data = loadtxt("kplr006603043-2011145075126_slc.tab")
data_new = []
for i in data:
if i[0] > 1691.5 and i[0] < 1693.1:
data_new.append(i)
data = array(data_new)
mag = data[:,3]
flux = 10**((mag-median(mag))/-2.5)
o = open("lc2.dat","w")
output = [data[:,0],flux,data[:,4]]
output = transpose(output)
functions.write_table(output,o)
o.close()
plt.scatter(data[:,0],flux)
plt.show()
|
chelseah/LCFIT
|
primitive_lcfit_scripts/data/formatlc.py
|
formatlc.py
|
py
| 467 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35612174500
|
primeList=[]
def countPrimes(n):
prime=0
if(n>2):
for i in range(2,n):
count=0
end=i//2+1
for j in range(2,end):
if(i%j==0):
count+=1
break
if(count==0):
prime+=1
primeList.append(i)
return prime, primeList
else:
return 0
print(countPrimes(499979))
|
Anusuya-Balakrishnan/Python-Basic-Programs
|
leetCode/31.5.2022/prime.py
|
prime.py
|
py
| 423 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36649330794
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 08 08:15:31 2020
@author: alexanderfalk
"""
from copy import deepcopy
from itertools import permutations
# from solverNN import NearestNeightbour
import time
import solution
import sys
class TabuSearchTwoRoutes:
def __init__(self, instance, solution, time_limit=60):
self.solution = solution
self.instance = instance
self.time_limit = time_limit
self.tabu_list_solutions = []
self.tabu_list_solutions_max_size = 10
self.tabu_list_edges = []
self.tabu_list_edges_max_size = 25
self.threshold = 5
self.threshold_counter = 0
self.stopping_criteria = 5 # Number of times the threshold has been "breached"
def construct(self, start_time):
return self.algorithm(start_time)
def algorithm(self, start_time):
"""
Tabu Search is an algorithm, which takes memory into consideration and tries to accomodate the problem of being stuck in local optimum.
It is termed as a metaheuristic algorithm, meaning, it has a stopping criteria (such as time or epochs).
The algoritm works as following:
1. Create a solution by using a constructive algorithm (such as Nearest Neightbour) to establish a starting point
2. Store the solution as the best solution
3. Start making changes in the solution by swapping, deleting, adding or other modifications.
4. When a modification has been executed, the edge connecting two nodes are added to a Tabu List. The list contains edges which cannot be touched for n epochs. The length of the Tabu List is arbitrarily.
5. Execute step 2-4 repeately until the stopping criteria is met.
If no solutions are found within a threshold, a bad move will be made. The number of bad moves have a limit, which is the stopping criteria for the algorithm
"""
local_best = self.solution
stopping_counter = 0
candidate = deepcopy(local_best)
# Stopping criteria: threshold
while stopping_counter < self.stopping_criteria:
if time.time() - start_time > self.time_limit:
return local_best
# Improving a Candidate Solution
self.tabu_list_edges = []
# Enumerate the candidate solution for swapping
for i in range(len(candidate.routes)-1):
for j in range(1, len(candidate.routes)):
if i != j:
candidate.routes[i], candidate.routes[j] = self.improvement(candidate.routes[i], candidate.routes[j])
if len(self.tabu_list_edges) > self.tabu_list_edges_max_size:
self.tabu_list_edges.pop(0) # Remove entry which have been a tabu for n iterations
if candidate.cost() < local_best.cost() and candidate not in self.tabu_list_solutions:
local_best = candidate
candidate = deepcopy(local_best)
else:
stopping_counter += 1
self.tabu_list_solutions.append(candidate)
if len(self.tabu_list_solutions) > self.tabu_list_solutions_max_size:
self.tabu_list_solutions.pop(0)
return local_best
def distance(self, i, j):
return self.instance.pre_distance(i, j)
def improvement(self, route1, route2):
for i in range(1, len(route1) - 1):
A, B, C = route1[i-1], route1[i], route1[i+1]
for j in range(1, len(route2) - 1):
D, E, F = route2[j-1], route2[j], route2[j+1]
if self.distance(A, B) + self.distance(B, C) > self.distance(A, E) + self.distance(E, C) and self.distance(D, E) + self.distance(E, F) > self.distance(D, B) + self.distance(B, F):
if (C, E) not in self.tabu_list_edges and (E, C) not in self.tabu_list_edges and (B, F) not in self.tabu_list_edges and (F, B) not in self.tabu_list_edges:
route1[i] = E
route2[j] = B
if sum([self.instance.nodes[point]['rq'] for point in route1]) <= self.instance.capacity and sum([self.instance.nodes[point]['rq'] for point in route2]) <= self.instance.capacity:
self.tabu_list_edges.append((E, C))
self.tabu_list_edges.append((C, E))
self.tabu_list_edges.append((B, F))
self.tabu_list_edges.append((F, B))
self.threshold_counter = 0
return route1, route2
else:
route1[i] = B
route2[j] = E
else:
self.threshold_counter += 1
if self.threshold_counter == self.threshold / 2:
route1[i] = E
route2[j] = B
if sum([self.instance.nodes[point]['rq'] for point in route1]) <= self.instance.capacity and sum([self.instance.nodes[point]['rq'] for point in route2]) <= self.instance.capacity:
break
else:
route1[i] = B
route2[j] = E
else:
self.threshold_counter += 1
if self.threshold_counter == self.threshold:
route1[i] = E
route2[j] = B
if sum([self.instance.nodes[point]['rq'] for point in route1]) <= self.instance.capacity and sum([self.instance.nodes[point]['rq'] for point in route2]) <= self.instance.capacity:
self.threshold_counter = 0
return route1, route2
else:
route1[i] = B
route2[j] = E
return route1, route2
|
AlexanderFalk/2020_Project01_CS_HA
|
src/solverTabu2.py
|
solverTabu2.py
|
py
| 6,084 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18932883260
|
# === Úloha 25===
# Napíšte program, ktorý z intervalu <1, N> vypíše prvočísla. Na overenie toho, či číslo je prvočíslo, vytvorte funkciu s jedným parametrom.
N = 20
def je_prvocislo(x):
for i in range(2, x): # <2, x)
if x % i == 0:
return False
return True # je prvocislo len ak neni delitelne nicim inym nez len 1 a x
for i in range(2, N+1): # 1 neni prvocislo
if je_prvocislo(i):
print(i)
|
Plasmoxy/MaturitaInformatika2019
|
ulohyPL/u25.py
|
u25.py
|
py
| 427 |
python
|
sk
|
code
| 2 |
github-code
|
6
|
41509625545
|
import services.Color as Color
import time
def gradient_mode(config):
color_from = (0, 255, 30)
color_to = (0, 80, 20)
while True:
array = []
color_generator = Color.animate_color(color_from, color_to, 18)
for y in range(config["matrix_height"]):
current_color = next(color_generator)
array.append([])
for x in range(config["matrix_width"]):
array[y].append(current_color)
time.sleep(0.1)
yield array
|
memchenko/x-max-tree
|
modes/gradientMode.py
|
gradientMode.py
|
py
| 508 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15826756401
|
import pandas, main
from db import db
from config import Style
def add_item_to_list():
while True:
conn = db.create_connection()
cursor = conn.cursor()
main.speak("What is the name of the Item?")
ITEM = main.listen().capitalize()
main.speak("What is the category of the item?")
CATEGORY = main.listen().capitalize()
cursor.execute(f"INSERT INTO shopping_list (Item, Category) VALUES ('{ITEM}', '{CATEGORY}');")
conn.commit()
print(f"Item added to list.")
main.speak("Item added to shopping list.")
main.speak("Would you like to add another item?")
run_again = main.listen().lower()
while run_again not in ("yes", "no"):
main.speak("Please say yes or no.")
main.speak("Would you like to add another item?")
run_again = main.listen().lower()
if run_again == "yes":
add_item_to_list()
elif run_again == "no":
break
def update_item_in_list():
while True:
conn = db.create_connection()
cursor = conn.cursor()
print(pandas.read_sql_query("SELECT * from shopping_list", conn))
shop = cursor.execute("SELECT * from shopping_list;")
for item in shop:
main.speak(f"Item ID: {item[0]} Item: {item[1]}")
main.speak("What is the I.D. of the item?")
update_item = main.listen()
cursor.execute(f"SELECT * FROM shopping_list WHERE ItemID = {update_item};")
result = cursor.fetchall()
main.speak("Would you like to update the Item or the Category?")
x = main.listen().upper()
while x not in ('ITEM', 'CATEGORY'):
main.speak("Please state if you would like to update the Item or the Category.")
print("Please select 'ITEM' or 'CATEGORY'")
x = main.listen().upper()
if x == "ITEM":
main.speak("What is the new name for the item?")
ITEM = main.listen().capitalize()
CATEGORY = result[2]
elif x == "CATEGORY":
ITEM = result[1]
main.speak(f"What is the new category for the item {ITEM}")
CATEGORY = main.listen().capitalize()
cursor.execute(f"UPDATE shopping_list SET Item = '{ITEM}', Category = '{CATEGORY}' WHERE ItemID = {update_item};")
conn.commit()
print(f"Item updated.")
main.speak("Item updated.")
break
def delete_item_from_list():
while True:
conn = db.create_connection()
cursor = conn.cursor()
print(pandas.read_sql_query("SELECT * from shopping_list", conn))
shop = cursor.execute("SELECT * from shopping_list;")
for item in shop:
main.speak(f"Item ID: {item[0]} Item: {item[1]}")
main.speak("What is the I.D. of the item?")
update_item = main.listen()
cursor.execute(f"DELETE FROM shopping_list WHERE ItemID = {update_item};")
conn.commit()
print("Item deleted")
main.speak("Item deleted.")
break
def get_shopping_list():
while True:
conn = db.create_connection()
cursor = conn.cursor()
sql_query = cursor.execute("SELECT * from shopping_list;")
results = sql_query.fetchall()
if len(results) == 0:
print("No items in list")
main.speak("There are no items in the shopping list.")
break
else:
for item in results:
print(f"{Style.YELLOW}ITEM:{Style.RESET} {item[1]} {Style.YELLOW}CATEGORY:{Style.RESET} {item[2]}")
main.speak(f"Item: {item[1]} Category: {item[2]}")
break
|
PhantomCaboose/Python-Virtual_Assistant
|
features/shopping_list.py
|
shopping_list.py
|
py
| 3,773 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8167059453
|
import sys
import os
import numpy as np
from numpy.linalg import svd
from numpy.linalg import eig
from skimage import io
from skimage import transform
face_folder = sys.argv[1]
if face_folder[len(face_folder)-1] != '/':
face_folder += "/"
target = sys.argv[2]
image_data = []
for file in os.listdir(face_folder):
filepath = os.path.join(face_folder , file)
img = io.imread(filepath)
img = np.array(img)
img = img.flatten()
image_data.append(img)
image_data = np.array(image_data)
image_data_mean = np.mean(image_data,axis=0)
x = image_data - image_data_mean
U, s, V = np.linalg.svd(x.T , full_matrices=False)
target = face_folder + target
ori_img = io.imread(target)
ori_img = np.array(ori_img)
ori_img = np.reshape(ori_img , (1,1080000))
ori_img = ori_img - image_data_mean
weights = np.dot( ori_img , U[:,:4])
recon = image_data_mean + np.dot(weights, U[:,:4].T)
recon = np.reshape(recon,(600,600,3))
recon -= np.min(recon)
recon /= np.max(recon)
recon = (recon*255).astype(np.uint8)
io.imsave('reconstruction.png', recon)
|
muachilin/Machine-Learning
|
hw4/pca.py
|
pca.py
|
py
| 1,052 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5995774734
|
#!/usr/bin/python
from pylab import *
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from pyhdf.SD import SD,SDC
import healpy as hp
import os
def modisfile(month,day=None,year=2016,datadir="/home/kawahara/exomap/sot/data/aux/"):
import pandas as pd
if day is None:
dat=pd.read_csv(os.path.join(datadir,"modisM.txt"),delimiter=",")
mask=dat["YEAR"]==2016
mask2=dat["MONTH"]==month
ext=(dat[mask&mask2])
hdffile=(ext["FILE"])
out=str(hdffile.tolist()[0])
day=None
else:
dat=pd.read_csv(os.path.join(datadir,"modisE.txt"),delimiter=",")
try:
mask=dat["YEAR"]==2016
mask2=dat["MONTH"]==month
mask3=dat["DAY"]==day
ext=(dat[mask&mask2&mask3])
hdffile=(ext["FILE"])
out=str(hdffile.tolist()[0])
except:
mask=dat["YEAR"]==2016
mask2=dat["MONTH"]==month
ext=(dat[mask&mask2])
i=np.argmin(np.abs(ext["DAY"]-day))
hdffile=(ext["FILE"])
out=str(hdffile.tolist()[i])
day=ext["DAY"].tolist()[i]
print("Nearest Day is used day=",day)
return out,month,day
def read_cloud(hdffile,N=1):
print(hdffile)
f = SD(hdffile,SDC.READ)
v=f.select("Cloud_Fraction_Mean_Mean")
vr=v.attributes()["valid_range"]
fv=v.attributes()["_FillValue"]
ao=v.attributes()["add_offset"]
sf=v.attributes()["scale_factor"]
a=np.array(v[::N,::N],dtype=float)
a[a==fv] = None
a=(a-ao)*sf
return a
def to_healpix(a,nside=16):
Nphi,Ntheta=np.shape(a)
npix=hp.nside2npix(nside)
hmap=np.zeros(npix)
for ipix in range(0,npix):
theta,phi=hp.pix2ang(nside,ipix)
itheta=int(theta/np.pi*180)
iphi=int(phi/np.pi*180)
hmap[ipix]=float(a[itheta,iphi])
return hmap
if __name__ == "__main__":
import os
import plotdymap
import matplotlib
fontsize=16
matplotlib.rcParams.update({'font.size':fontsize})
thetaE,phiE=plotdymap.bound_earth("/home/kawahara/exomap/sot/data/earth_boundary.npz")
year=2016
month=5
day=11
hdffile,month,day=modisfile(month,day,year=year)
hdfdir="/home/kawahara/exomap/data/modis/MYD08"
hdffile=os.path.join(hdfdir,hdffile)
a=read_cloud(hdffile,N=1)
hmap=to_healpix(a,nside=16)
hp.mollview(hmap, title=str(year)+"-"+str(month)+"-"+str(day),flip="geo",cmap=plt.cm.pink,min=0.5,max=1.0)
hp.projplot(thetaE, phiE,".",c="#66CC99")
hp.projtext(-60,-25,"A",coord="G",lonlat=True,color="cyan",fontsize=26) #amazon
# hp.projtext(-130,30,"B",coord="G",lonlat=True,color="cyan",fontsize=26) #north america
plt.savefig("cf"+str(year)+"_"+str(month)+"_"+str(day)+".pdf")
plt.show()
|
HajimeKawahara/sot
|
src/sot/dymap/analyzeMYD.py
|
analyzeMYD.py
|
py
| 2,825 |
python
|
en
|
code
| 4 |
github-code
|
6
|
9205961554
|
from .MS_Celeb_1M import MSCeleb
from .glintasia import GlintAsia
__data_factory = {
# image classification models
'Ms_celeb': MSCeleb,
'GlintAsia': GlintAsia,
'default': MSCeleb,
}
def get_names():
return list(__data_factory.keys())
def init_database(name, *args, **kwargs):
if name not in get_names():
raise KeyError('Unknown model: {}'.format(name))
return __data_factory[name](*args, **kwargs)
|
heroinlin/face_recognition
|
datasets/__init__.py
|
__init__.py
|
py
| 439 |
python
|
en
|
code
| 7 |
github-code
|
6
|
4495959966
|
from Funcionarios import Funcionarios
from Fornecedores import Fornecedores
func = Funcionarios()
forn = Fornecedores()
class CateProd:
def __init__(self):
pass
def DadosCategorias(self):
arq = open('Categorias.txt','r')
lin_cate = arq.readlines()
lis_cate = []
for x in lin_cate:
dados = x.split('{/')
lis_cate.append(dados)
arq.close()
return lis_cate
def DadosProdutos(self):
criar = open('Produtos.txt','a')
criar.close()
arq = open('Produtos.txt','r')
lin_prod = arq.readlines()
lis_prod = []
for x in lin_prod:
dados = x.split('{/')
lis_prod.append(dados)
arq.close()
if len(lis_prod) == 0:
lis_prod = 'vazio'
return lis_prod
def ArqCat_VazioouNao(self):
criar = open('Categorias.txt','a')
criar.close()
arq = open('Categorias.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
if len(lis) == 0:
return 0
else:
return 1
arq.close()
def ArqPro_VazioouNao(self):
criar = open('Produtos.txt','a')
criar.close()
arq = open('Produtos.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
if len(lis) == 0:
return 0
else:
return 1
arq.close()
def Cat_CadastradoOuNao(self,UserCat):
arq = open('Categorias.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
limite = 0
for p in range(len(lis)):
if str(UserCat) != lis[p][0]:
limite += 1
if limite == len(lis):
resposta = "not"
return resposta
else:
resposta ="yes"
return resposta
arq.close()
def DescCat_CadasOuNao(self,UserDescCat):
arq = open('Categorias.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
limite = 0
for p in range(len(lis)):
if UserDescCat != lis[p][1]:
limite += 1
if limite == len(lis):
resposta = "not"
return resposta
else:
resposta ="yes"
return resposta
arq.close()
#APAGAR EM CASO DE ERRO COMECA AQUI
def RelaCompra(self,CNPJ,MAT,COD,QUA,PRE):
arq = open('Relatorio Compras.txt','a')
arq.write(str(CNPJ) + '{/' + str(MAT) + '{/' + str(COD) + '{/' + str(QUA) + '{/' + str(PRE) + '\n')
arq.close
def CNPJCadastrado(self,CNPJ,lis):
limite = 0
for p in range(len(lis)):
if CNPJ != lis[p][0]:
limite += 1
if limite == len(lis):
return 0
if CNPJ == lis[p][0]:
return 1
def MatriculaCadastrada(self,MAT,lista):
limite = 0
for p in range(len(lista)):
if str(MAT) != lista[p][0]:
limite += 1
if limite == len(lista):
return 0
if str(MAT) == lista[p][0]:
return 1
#TERMINA AQUI
def DescProd_CadasOuNao(self,UserDescProd):
arq = open('Produtos.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
limite = 0
for p in range(len(lis)):
if UserDescProd != lis[p][1]:
limite += 1
if limite == len(lis):
resposta = "not"
return resposta
else:
resposta ="yes"
return resposta
arq.close()
def Prod_CadastradoOuNao(self,UserProd):
arq = open('Produtos.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
limite = 0
for p in range(len(lis)):
if str(UserProd) != lis[p][0]:
limite += 1
if limite == len(lis):
return "not"
else:
return "yes"
arq.close()
def AdicionarCategorias(self):
codigo = int(input("Informe o codigo da categoria: "))
ListaVazia = CateProd.ArqCat_VazioouNao(self)
if ListaVazia == 0 or ListaVazia == 1:
arq = open('Categorias.txt','a')
decisao = CateProd.Cat_CadastradoOuNao(self,codigo)
if decisao == 'yes':
print("Codigo cadastrado, favor escolher uma nova opcao no Menu\n")
CateProd.MenuCategorias(self)
elif decisao == 'not':
descricao = input("Descricao da Categoria: ")
dec = CateProd.DescCat_CadasOuNao(self,str(descricao))
if dec == 'yes':
print("Descriao cadastrada, favor escolher uma nova opcao no Menu\n")
CateProd.MenuCategorias(self)
else:
arq.write(str(codigo) + '{/' + descricao + '\n')
arq.close()
def AdicionarProdutos(self):
CNPJ = input("MENU DE COMPRAS\nCNPJ do fornecedor: ")
if len(str(CNPJ)) < 13:
CNPJ = (str(0)) * (13 - (len(str(CNPJ)))) + str(CNPJ)
val = CateProd.CNPJCadastrado(self, CNPJ, forn.DadosFornecedores())
if val == 0:
print("CNPJ nao cadastrado, informe um CPNJ valido\n")
CateProd.MenuProdutos(self)
else:
mat = int(input("Matricula do vendedor: "))
val2 = CateProd.MatriculaCadastrada(self, mat, func.DadosFuncionarios())
if val2 == 0:
print("Matricula nao cadastrada\n ")
CateProd.MenuProdutos(self)
else:
#EM CASO DE ERRO, APAGAR DE CIMA ATE AQUI
cp = int(input("Informe o codigo do produto: "))
ListaVazia = CateProd.ArqPro_VazioouNao(self)
if ListaVazia == 0 or ListaVazia == 1:
arq = open('Produtos.txt','a')
decisao = CateProd.Prod_CadastradoOuNao(self,cp)
if decisao == 'yes':
print("Codigo cadastrado, favor escolher uma nova opcao no Menu\n")
CateProd.MenuProdutos(self)
else:
dp = input("Descricao do produto: ")
dec = CateProd.DescProd_CadasOuNao(self,str(dp))
if dec == 'yes':
print("Descriao cadastrada, favor escolher uma nova opcao no Menu\n")
CateProd.MenuCategorias(self)
cap = input("Codigo da Categoria: ")
fp = input("Foto do produto: ")
emaxp = int(input("Estoque maximo do produto: "))
eminp = int(input("Estoque minimo do produto: "))
while eminp >= emaxp:
eminp = int(input("O valor do estoque minimo precisa ser menor que estoque maximo\nEstoque Minimo: "))
calcomp = float(input("Valor de compra: "))
valvenp = float(input("Valor de venda: "))
while valvenp <= calcomp:
valvenp = float(input("O valor de venda do produto precisa ser maior que o valor de compra\nValor de venda do produto: "))
arq.write(str(cp) + '{/' + dp + '{/' + cap + '{/' + fp + '{/' + str(emaxp) + '{/' + str(eminp) + '{/' + str(valvenp) + '{/' + str(calcomp) + '\n')
prect = emaxp * calcomp
CateProd.RelaCompra(self, CNPJ, mat, cp, emaxp, prect)
arq.close()
def CodCate_Editar_Remover(self,cod_cate_user,lis_cate):
limite = 0
for x in range(len(lis_cate)):
if cod_cate_user != lis_cate[x][0]:
limite += 1
if limite == len(lis_cate):
Pos_Cod_Cate = 'Categoria nao cadastrada'
return Pos_Cod_Cate
else:
Pos_Cod_Cate = x
return Pos_Cod_Cate
def CodProd_Editar_Remover(self,cod_prod_user,lis_prod):
limite = 0
for x in range(len(lis_prod)):
if cod_prod_user != lis_prod[x][0]:
limite += 1
if limite == len(lis_prod):
Pos_Cod_Prod = "Produto nao cadastrado"
return Pos_Cod_Prod
else:
Pos_Cod_Prod = x
return Pos_Cod_Prod
def Consultar_Categorias(self,cod,lis_cate):
limite = 0
for p in range(len(lis_cate)):
if cod != lis_cate[p][0]:
limite += 1
if limite == len(lis_cate):
return("Categoria nao cadastrada")
if cod == lis_cate[p][0]:
d = p
return("\nCodigo: %s\nCategoria: %s" % (lis_cate[d][0],lis_cate[d][1]))
def Consultar_Produtos(self,cod,lis_prod):
limite = 0
for p in range(len(lis_prod)):
if cod != lis_prod[p][0]:
limite += 1
if limite == len(lis_prod):
return("Produto nao cadastrada")
if cod == lis_prod[p][0]:
d = p
return("\nCodigo: %s\nCategoria: %s\nFoto: %s\nDescricao: %s\nEstoque Maximo: %s\nEstoque Minimo: %s\nValor base venda: %s\nValor base compra: %s" % (lis_prod[d][0],lis_prod[d][1],lis_prod[d][2],lis_prod[d][3],lis_prod[d][4],lis_prod[d][5],lis_prod[d][6],lis_prod[d][7]))
def Cat_EditarRemover(self,decisao,Pos_Cod_Cate,lis_cate):
criar = open('Categorias.txt','a')
criar.close
if decisao.lower() == "remover":
if Pos_Cod_Cate == 'Categoria nao cadastrada':
print(Pos_Cod_Cate)
else:
del lis_cate[Pos_Cod_Cate]
elif decisao.lower() == 'editar':
if Pos_Cod_Cate == 'Categoria nao cadastrada':
print(Pos_Cod_Cate)
else:
newdesc = input("Descricao: ")
lis_cate[Pos_Cod_Cate][1] = newdesc +'\n'
return lis_cate
def Prod_EditarRemover(self,decisao,Pos_Cod_Prod,lis_prod):
criar = open('Produtos.txt','a')
criar.close
if decisao.lower() == "remover":
if Pos_Cod_Prod == "Produto nao cadastrado":
print(Pos_Cod_Prod)
else:
del lis_prod[Pos_Cod_Prod]
elif decisao.lower() == 'editar':
if Pos_Cod_Prod == "Produto nao cadastrado":
print(Pos_Cod_Prod)
else:
newcate = input("Codigo da Categoria: ")
newfoto = input("Foto: ")
newdesc = input("Descricao: ")
newestmax = int(input("Estoque Maximo: "))
newestmin = int(input("Estoque Minimo: "))
while newestmin >= newestmax:
newestmin = int(input("O valor do estoque minimo precisa ser menor que estoque maximo\nEstoque Minimo: "))
newvc = float(input("Valor compra: "))
newvv = float(input("Valor venda: "))
while newvv <= newvc:
newvv = float(input("O valor de venda do produto precisa ser maior que o valor de compra\nValor de venda do produto: "))
lis_prod[Pos_Cod_Prod][1] = newcate
lis_prod[Pos_Cod_Prod][2] = newfoto
lis_prod[Pos_Cod_Prod][3] = newdesc
lis_prod[Pos_Cod_Prod][4] = str(newestmax)
lis_prod[Pos_Cod_Prod][5] = str(newestmin)
lis_prod[Pos_Cod_Prod][6] = str(newvv)
lis_prod[Pos_Cod_Prod][7] = str(newvc)
return lis_prod
def Cat_SalvarDadosEditados(self,lis_cate):
arq = open('Categorias.txt','w')
lista = lis_cate
for x in lista:
p = '{/'.join(x)
arq.write(p)
arq.close()
def Prod_SalvarDadosEditados(self,lis_prod):
arq = open('Produtos.txt','w')
lista = lis_prod
for x in lista:
p = '{/'.join(x)
arq.write(p)
arq.close()
def ChamadaMenu(self):
print("1 - Trabalhar com Categorias\n2 - Trabalhar com Produtos\n")
user = int(input("Escolha sua opcao: "))
if user == 1:
CateProd.MenuCategorias(self)
elif user == 2:
CateProd.MenuProdutos(self)
else:
print("Opcao invalida")
CateProd.ChamadaMenu(self)
def MenuCategorias(self):
print("\nMENU DE CATEGORIAS\n\n1 - Adicionar\n2 - Consultar\n3 - Editar\n4 - Remover\n5 - Voltar\n")
user = int(input("Escolha sua opcao: "))
if user == 1:
CateProd.AdicionarCategorias(self)
CateProd.MenuCategorias(self)
elif user == 2:
codcat = input("Informe o codigo da categoria que deseja consultar: ")
resultado = CateProd.ArqCat_VazioouNao(self)
if resultado == 0:
print("Nao existem dados de categorias, adicione um categoria no menu de categorias")
CateProd.MenuCategorias(self)
else:
print(CateProd.Consultar_Categorias(self,codcat, CateProd.DadosCategorias(self)))
CateProd.MenuCategorias(self)
elif user == 3:
CODCATeditar = input("Informe o codigo da categoria que deseja editar: ")
resultado = CateProd.ArqCat_VazioouNao(self)
if resultado == 0:
print("Nao existem dados de categorias, adicione um categoria no menu de categorias")
CateProd.MenuCategoriasself()
else:
decisao = CateProd.Cat_EditarRemover(self,'editar',CateProd.CodCate_Editar_Remover(self,CODCATeditar,CateProd.DadosCategorias(self)),CateProd.DadosCategorias(self))
CateProd.Cat_SalvarDadosEditados(self,decisao)
CateProd.MenuCategorias(self)
elif user == 4:
user = input("Informe o codigo da categoria que deseja remover: ")
resultado = CateProd.ArqCat_VazioouNao(self)
if resultado == 0:
print("Nao existem dados de categorias, adicione um categoria no menu de categorias")
CateProd.MenuCategorias(self)
else:
decisao2 = CateProd.Cat_EditarRemover(self,'remover',CateProd.CodCate_Editar_Remover(self,user,CateProd.DadosCategorias(self)),CateProd.DadosCategorias(self))
CateProd.Cat_SalvarDadosEditados(self, decisao2)
CateProd.MenuCategorias(self)
elif user == 5:
CateProd.ChamadaMenu(self)
else:
print("\nOPCAO INVALIDA\n")
CateProd.MenuCategorias(self)
def MenuProdutos(self):
print("\nMENU DE PRODUTOS\n\n1 - Adicionar\n2 - Consultar\n3 - Editar\n4 - Remover\n5 - Voltar\n")
user = int(input("Escolha sua opcao: "))
if user == 1:
CateProd.AdicionarProdutos(self)
CateProd.MenuProdutos(self)
elif user == 2:
codprod = input("Informe o codigo da produto que deseja consultar: ")
resultado = CateProd.ArqPro_VazioouNao(self)
if resultado == 0:
print("Nao existem dados de nenhum produto, adicione um produto no menu de produtos")
CateProd.MenuProdutos(self)
else:
print(CateProd.Consultar_Produtos(self,codprod, CateProd.DadosProdutos(self)))
CateProd.MenuProdutos(self)
elif user == 3:
CODPROD = input("Informe o codigo do produto que deseja editar: ")
resultado = CateProd.ArqPro_VazioouNao(self)
if resultado == 0:
print("Nao existem dados de nenhum produto, adicione um produto no menu de produtos")
CateProd.MenuProdutos(self)
else:
decisao = CateProd.Prod_EditarRemover(self,'editar',CateProd.CodProd_Editar_Remover(self,CODPROD,CateProd.DadosProdutos(self)),CateProd.DadosProdutos(self))
CateProd.Prod_SalvarDadosEditados(self,decisao)
CateProd.MenuProdutos(self)
elif user == 4:
user = input("Informe o codigo do produto que deseja editar: ")
resultado = CateProd.ArqPro_VazioouNao(self)
if resultado == 0:
print("Nao existem dados de nenhum produto, adicione um produto no menu de produtos")
CateProd.MenuProdutos(self)
else:
decisao2 = CateProd.Prod_EditarRemover(self,'remover',CateProd.CodProd_Editar_Remover(self,user,CateProd.DadosProdutos(self)),CateProd.DadosProdutos(self))
CateProd.Prod_SalvarDadosEditados(self,decisao2)
CateProd.MenuProdutos(self)
elif user == 5:
CateProd.ChamadaMenu(self)
else:
print("\nOPCAO INVALIDA\n")
CateProd.MenuProdutos(self)
|
Ander20n/Codigos-Faculdade
|
Projeto IP/CategoriasProdutos.py
|
CategoriasProdutos.py
|
py
| 17,921 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
15852362512
|
from . import client
class Mintage:
def __init__(self, URI):
self.URI = URI
def getMintageData(
self,
selfAddr : str,
prevHash : str,
tokenName : str,
tokenSymbol : str,
totalSupply : str,
decimals : int,
pledgeAmount : int,
**kwargs) -> str:
"""
Return mintage data by mintage parameters
Parameters
----------
selfAddr : str
prevHash : str
tokenName : str
tokenSymbol : str
totalSupply : str
decimals : int
pledgeAmount : int
e.g.
----
"selfAddr": "qlc_1t1uynkmrs597z4ns6ymppwt65baksgdjy1dnw483ubzm97oayyo38ertg44",
"prevHash": "758f79b656340c329cb5b11302865c5ff0b0c99fd8a268d6b8760170e33e8cd1",
"tokenName": "QM",
"tokenSymbol": "QM",
"totalSupply": "1000000000",
"decimals": 8,
"pledgeAmount": 1000000
"""
params = {
"selfAddr" : selfAddr,
"prevHash" : prevHash,
"tokenName" : tokenName,
"tokenSymbol" : tokenSymbol,
"totalSupply" : totalSupply,
"decimals" : decimals,
"pledgeAmount" : pledgeAmount,
}
for k, _ in kwargs.items():
params["selfAddr"] = k["selfAddr"]
params["prevHash"] = k["prevHash"]
params["tokenName"] = k["tokenName"]
params["tokenSymbol"] = k["tokenSymbol"]
params["totalSupply"] = k["totalSupply"]
params["decimals"] = k["decimals"]
params["pledgeAmount"] = k["pledgeAmount"]
return client.Client(self.URI).post("mintage_getMintageBlock", [params])
def getMintageBlock(
self,
selfAddr : str,
prevHash : str,
tokenName : str,
tokenSymbol : str,
totalSupply : str,
decimals : int,
pledgeAmount : int,
**kwargs) -> dict:
"""
Return contract send block by mintage parameters
Parameters
----------
selfAddr : str
prevHash : str
tokenName : str
tokenSymbol : str
totalSupply : str
decimals : int
pledgeAmount : int
e.g.
----
"selfAddr": "qlc_1t1uynkmrs597z4ns6ymppwt65baksgdjy1dnw483ubzm97oayyo38ertg44",
"prevHash": "758f79b656340c329cb5b11302865c5ff0b0c99fd8a268d6b8760170e33e8cd1",
"tokenName": "QM",
"tokenSymbol": "QM",
"totalSupply": "1000000000",
"decimals": 8,
"pledgeAmount": 1000000
"""
params = {
"selfAddr" : selfAddr,
"prevHash" : prevHash,
"tokenName" : tokenName,
"tokenSymbol" : tokenSymbol,
"totalSupply" : totalSupply,
"decimals" : decimals,
"pledgeAmount" : pledgeAmount,
}
for k, _ in kwargs.items():
params["selfAddr"] = k["selfAddr"]
params["prevHash"] = k["prevHash"]
params["tokenName"] = k["tokenName"]
params["tokenSymbol"] = k["tokenSymbol"]
params["totalSupply"] = k["totalSupply"]
params["decimals"] = k["decimals"]
params["pledgeAmount"] = k["pledgeAmount"]
return client.Client(self.URI).post("mintage_getMintageBlock", [params])
def getRewardBlock(
self,
Type : str,
token : str,
address : str,
balance : str,
previous : str,
link : str,
message : str,
data : str,
network : str,
storage : str,
oracle : str,
timestamp : str,
extra : str,
representative : str,
work : str,
signature : str,
**kwargs):
"""
Return contract reward block by contract send block
Parameters
----------
type : str
token : str
address : str
balance : str
previous : str
link : str
message : str
data : str
network : str
storage : str
oracle : str
timestamp : str
extra : str
representative : str
work : str
signature : str
e.g.
----
{
"type": "ContractSend",
"token": "45dd217cd9ff89f7b64ceda4886cc68dde9dfa47a8a422d165e2ce6f9a834fad",
"address": "qlc_1t1uynkmrs597z4ns6ymppwt65baksgdjy1dnw483ubzm97oayyo38ertg44",
"balance": "59999000000000000",
"previous": "758f79b656340c329cb5b11302865c5ff0b0c99fd8a268d6b8760170e33e8cd1",
"link": "de32f02da71ef2fccd06634bfe29d3a7514a1880873478382704e3edeeaff982",
"message": "0000000000000000000000000000000000000000000000000000000000000000",
"data": "RtDOi36yRn34tcH5dS2ThaV+eOeDzObdqEz883OcFX49k3CRAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA7msoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAlFNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJRTQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
"vote": "0",
"network": "0",
"storage": "0",
"oracle": "0",
"timestamp": 1552522398,
"extra": "0000000000000000000000000000000000000000000000000000000000000000",
"representative": "qlc_1t1uynkmrs597z4ns6ymppwt65baksgdjy1dnw483ubzm97oayyo38ertg44",
"work": "0000000000000000",
"signature": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
}
"""
params = {
"type" : Type,
"token" : token,
"address" : address,
"balance" : balance,
"previous" : previous,
"link" : link,
"message" : message,
"data" : data,
"network" : network,
"storage" : storage,
"oracle" : oracle,
"timestamp" : timestamp,
"extra" : extra,
"representative" : representative,
"work" : work,
"signature" : signature
}
for k, _ in kwargs.items():
params["address"] = k["address"]
params["balance"] = k["balance"]
params["data"] = k["data"]
params["extra"] = k["extra"]
params["link"] = k["link"]
params["message"] = k["message"]
params["network"] = k["network"]
params["oracle"] = k["oracle"]
params["povHeight"] = k["povHeight"]
params["previous"] = k["previous"]
params["representative"] = k["representative"]
params["signature"] = k["signature"]
params["storage"] = k["storage"]
params["timestamp"] = k["timestamp"]
params["token"] = k["token"]
params["type"] = k["type"]
params["vote"] = k["vote"]
params["work"] = k["work"]
return client.Client(self.URI).post("mintage_getRewardBlock", [params])
|
realForbis/qlc-python-SDK
|
pyqlc/mintage.py
|
mintage.py
|
py
| 7,086 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19499771211
|
# -*- coding: utf-8 -*-
from typing import List
import math
class Solution:
def nearestValidPoint(self, x: int, y: int, points: List[List[int]]) -> int:
smallest_distance = math.inf
index = -1
for ix, c in enumerate(points):
cx = c[0]
cy = c[1]
if x == cx or y == cy:
di = abs(x - cx) + abs(y - cy)
if di < smallest_distance:
smallest_distance = di
index = ix
return index
# vim: autoindent tabstop=4 shiftwidth=4 expandtab softtabstop=4
|
michaeldye/mdye-python-samples
|
src/mdye_leetcode/solution_1779.py
|
solution_1779.py
|
py
| 591 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30399091692
|
from socket import *
serverName = 'localhost'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_DGRAM)
message = input('Insira sua mensagem para a civilização extraterrena:')
clientSocket.sendto(message.encode(), (serverName, serverPort))
modifiedMessage, serverAdress = clientSocket.recvfrom(2048)
print(modifiedMessage.decode())
clientSocket.close()
|
AllanCFE/Redes
|
clientSocket.py
|
clientSocket.py
|
py
| 364 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29915222521
|
# -*- coding: utf-8 -*-
"""User views."""
from flask import Blueprint, render_template, request, flash, redirect, url_for
from flask_login import login_required, current_user
from food_journal.user.models import User
from food_journal.user.forms import EditProfileForm
blueprint = Blueprint("user", __name__, url_prefix="/users", static_folder="../static")
@blueprint.route("/")
@login_required
def members():
"""List members."""
return render_template("users/members.html")
@blueprint.route("/<username>")
@login_required
def profile(username):
"""Return user's profile page"""
print("here")
user = User.query.filter_by(username=username).first_or_404()
return render_template("users/profile.html", user=user)
@blueprint.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.update(about_me=form.about_me.data)
flash("Your changes have been saved.")
return redirect(url_for('user.edit_profile'))
elif request.method == 'GET':
form.about_me.data = current_user.about_me
return render_template('users/edit_profile.html', title='Edit Profile', form=form)
@blueprint.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found'.format(username))
return redirect(url_for('index'))
if user == current_user:
flash('You cannot follow yourself!')
return redirect(url_for('user', username=username))
current_user.follow(user)
current_user.save()
flash('You are now following {}!'.format(username))
return redirect(url_for('user.profile', username=username))
@blueprint.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('User {} not found.'.format(username))
return redirect(url_for('public.index'))
if user == current_user:
flash('You cannot unfollow yourself!')
return redirect(url_for('user.profile', username=username))
current_user.unfollow(user)
current_user.save()
flash('You are not following {}.'.format(username))
return redirect(url_for('user.profile', username=username))
|
ariesunique/food-journal
|
food_journal/user/views.py
|
views.py
|
py
| 2,403 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8401186293
|
from cards import Cards
from Deck import Deck
class Game:
def __init__(self,deck, numhands):
self.numhands = numhands
self.deck = deck
self.hands = []
self.gethand(self.deck,0,5)
def gethand (self,deck,start,hands):
allhands = []
print ("number of hands is " +str(self.numhands) )
i=0
while (i<self.numhands):
print (str(i) + " is the value of i and start is "+str(start))
nexthand = deck.shownumcards(start,start+5)
print (nexthand)
allhands = allhands+nexthand
i+=1
start+=5
#print(deck.showcards)
#print ('Your hand is '+ self.hands[0])
def deal (self,numhands):
for i in range(numberhands):
print (deck)
|
tennisha/fourcardpoker
|
Game.py
|
Game.py
|
py
| 817 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19682806766
|
#!/usr/bin/env python
#Splits a cif file containing multiple structures into separate cif files for each structure
#call as cif_splitter.cif <cif_file>
import sys
to_split=sys.argv[1]
with open(to_split, 'r') as ifile:
for line in ifile:
if '_database_code_depnum_ccdc_archive' in line:
print(line.split()[1] +' ' + line.split()[2])
with open("strucuture_ccdc_{0}".format(line.split()[2].replace("'", ".cif")), 'w') as ofile:
ofile.write('data_ \n')
stop=False
while stop==False:
try:
cif_line=next(ifile)
except StopIteration:
print("Reached EOF, stopping..")
break
if '########' not in cif_line:
ofile.write(cif_line)
else:
stop=True
|
EduardoSchiavo/utilities
|
cif_splitter.py
|
cif_splitter.py
|
py
| 928 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26336185630
|
from annotated_text import annotated_text
import Scripts.Utilities as Utils
def __show_single_announcement(st, announcement):
st.markdown(f"""
<div style="background-color: rgba(250, 202, 43, 0.2); color: "rgb(148, 124, 45)"; padding: 10px; border-radius: 10px; border: 1px solid rgba(250, 202, 43, 0.2)">
<h1 style="text-align: center; font-weight: 600; color: rgb(148, 124, 45)"> {announcement["title"]} </h1>
<h3 style="text-align: center; font-size: 15px; font-weight: 400; color: rgba(49, 51, 63, 0.6);"> Posted on : {announcement["date"]} </h3>
<br>
<h4 style="text-align: center; font-family: 'Source Code Pro', monospace"> {announcement["content"]} </h4>
</div>
""", unsafe_allow_html=True)
Utils.add_space(st)
def show_single_announcement(st, announcement):
Utils.website_heading(
st,
content=announcement["title"],
symbol=".",
font_size=40,
color="orange",
text_align="left"
)
st.caption("Posted on : " + announcement["date"])
st.text(announcement["content"])
Utils.add_space(st)
def show_announcements(st, announcements):
for announcement in announcements.keys():
show_single_announcement(st, announcements[announcement])
def get_announcements_data():
return Utils.get_firebase_data("announcements")
def show_page_heading(st):
Utils.website_heading(
st,
content="Announcements",
symbol="📢",
font_size=60,
color="orange",
text_align="center"
)
Utils.add_space(st)
def show_announcements_page(st):
announcements = get_announcements_data()
show_page_heading(st)
show_announcements(st, announcements)
|
PeaPals/docnets
|
Pages/announcements_page.py
|
announcements_page.py
|
py
| 1,745 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22903751015
|
"""
module with classes Player and Enemy
"""
import random
from module2.classes.exceptions import GameOver, EnemyDown, RestartGame
from module2.classes.settings import DEFAULT_LIVES_COUNT, ALLOWED_COMMANDS
class Enemy(object):
"""
class Enemy
"""
def __init__(self):
self.level = 1
self.lives = 1
@staticmethod
def select_attack():
"""
random choice of attack
"""
return random.randint(1, 3)
def decrease_lives(self, player):
"""
life reduction function, depends on Player-obj, created in game function
"""
self.lives -= 1
if self.lives == 0:
raise EnemyDown
class Player(object):
"""
class Player
"""
def __init__(self, name):
self.name = name
self.lives = DEFAULT_LIVES_COUNT
self.score = 0
self.allowed_attacks = ['1', '2', '3']
self.level = 1
self.allowed_commands = ALLOWED_COMMANDS
def command(self, command):
"""
function that defines actions depending on the entered command
"""
if command == self.allowed_commands[0]:
raise RestartGame
if command == self.allowed_commands[1]:
print('Allowed Commands:\nSTART - to restart Game\n'
'HELP - to watch allowed commands\n'
'SCORE - find out the score\n'
'EXIT - Quit Game')
elif command == self.allowed_commands[2]:
print(
'Your score: %s' % self.score
)
elif command == self.allowed_commands[3]:
raise KeyboardInterrupt
@staticmethod
def fight(attack, defense):
"""
function in which the result of the battle is calculated
"""
if attack == defense:
score = 0
elif (attack == 1 and defense == 3) or\
(attack == 2 and defense == 1) or\
(attack == 3 and defense == 2):
score = 1
else:
score = -1
return score
def decrease_lives(self):
"""
life reduction function, depends on Enemy-obj, created in game function
"""
self.lives -= 1
if self.lives == 0:
raise GameOver("Game Over!\nYour final score: %s" % self.score, self)
def attack(self, enemy_obj):
"""
accepts user input to attack and Enemy-obj's to defence 'select_attack' function result,
counts score and lives
"""
attack = 0
while attack not in self.allowed_attacks:
attack = input('Select Attack to Use: 1 - "ROGUE", 2 - "WARRIOR", '
'3 - "WIZARD" :\n').lower()
self.command(attack)
attack = int(attack)
defence = enemy_obj.select_attack()
score = self.fight(attack, defence)
if score == 0:
print(
"It's a draw!"
)
elif score == 1:
print(
"You attacked successfully!"
)
self.score += score
enemy_obj.decrease_lives(self)
elif score == -1:
print(
"You missed!"
)
def defence(self, enemy_obj):
"""
the same as 'attack' function, but here Enemy-obj attacks and Player-obj defenses
"""
attack = enemy_obj.select_attack()
defence = 0
while defence not in self.allowed_attacks:
defence = input('Select Defence to Use: 1 - "ROGUE",'
' 2 - "WARRIOR", 3 - "WIZARD"\n').lower()
self.command(defence)
defence = int(defence)
score = self.fight(attack, defence)
if score == 0:
print(
"It's a draw!"
)
elif score == 1:
print(
"He hit you"
)
self.decrease_lives()
elif score == -1:
print(
"You dodged attack!"
)
|
MarinaZh16/Module-2-OOP
|
classes/models.py
|
models.py
|
py
| 4,050 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26213405914
|
from typing import Generator, Any
import numpy as np
import pandas as pd
from sklearn.model_selection import GroupKFold
from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
from hw2.datasets.base import Dataset
class TrainDataset(Dataset):
def reduce_by_members(self, size: int, inplace: bool = False) -> "TrainDataset":
if not inplace:
dataset = TrainDataset(self._df).reduce_by_members(size, inplace=True)
return dataset
self._df = self._df.groupby("msno").head(size).reset_index(drop=True)
return self
def remove_by_mask(self, mask, inplace: bool = False) -> "TrainDataset":
if not inplace:
dataset = TrainDataset(self._df).remove_by_mask(mask, inplace=True)
return dataset
self._df = self._df[~mask]
return self
def sort_by(self, column: str, inplace: bool = False) -> "TrainDataset":
if not inplace:
dataset = TrainDataset(self._df).sort_by(column, inplace=True)
return dataset
self._df = self._df.sort_values(by="msno")
return self
def group_split(self, n_splits: int) -> Generator:
# Not used anymore. Split by query.
group_kfold = GroupKFold(n_splits=n_splits)
# df_sorted = self._df.sort_values(by="msno")
data = self._df.drop("target", axis=1)
groups = data.msno.cat.codes.to_numpy()
for train_index, test_index in group_kfold.split(data, groups=groups):
train_dataset = TrainDataset(self._df.iloc[train_index])
test_dataset = TrainDataset(self._df.iloc[test_index])
yield train_dataset, test_dataset
def split(self, n_splits: int, random_state: int) -> Generator:
np.random.seed(random_state)
splits = np.array_split(np.random.permutation(len(self._df)), n_splits)
for i in range(n_splits):
train_index = np.hstack([splits[j] for j in range(n_splits) if j != i])
test_index = splits[i]
train_dataset = self._df.iloc[sorted(train_index)].reset_index(drop=True)
test_dataset = self._df.iloc[sorted(test_index)].reset_index(drop=True)
# Remove leaks. Too long :(
cols = ["msno", "song_id"]
mask_2d = np.isin(test_dataset[cols], train_dataset[cols])
test_dataset = test_dataset[~np.all(mask_2d, axis=1)]
yield TrainDataset(train_dataset), TrainDataset(test_dataset)
def add_features(self, name: str, values: Any):
self._df[name] = values
def drop_features(self, name: str):
self._df = self._df.drop(columns=name)
@property
def queries(self) -> np.ndarray:
return self._df.msno.cat.codes.to_numpy()
@property
def labels(self) -> np.ndarray:
return self._df.target.to_numpy()
@staticmethod
def from_path(path: str) -> "TrainDataset":
df = pd.read_csv(path, dtype={
"msno": "category",
"song_id": "category",
"source_system_tab": "category",
"source_screen_name": "category",
"source_type": "category",
"target": np.int})
return TrainDataset(df)
|
Sushentsev/recommendation-systems
|
hw2/datasets/train.py
|
train.py
|
py
| 3,215 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11983564091
|
# -*- coding: utf-8 -*-
import sys
from ccm import *
from PyQt5.QtWidgets import QMainWindow, QApplication
class CCMWindow(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_CCMTask()
self.ui.setupUi(self)
self.ui.retranslateUi(self)
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = CCMWindow()
mainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
mainWindow.setFixedSize(mainWindow.width(), mainWindow.height())
mainWindow.show()
sys.exit(app.exec_())
|
yyFFans/DemoPractises
|
CCMtask/startui.py
|
startui.py
|
py
| 581 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27108312423
|
import math
import plotly
import dash_bootstrap_components as dbc
from dash import html, dcc
import dash
from django_plotly_dash import DjangoDash
from geopy.geocoders import ArcGIS
import plotly.graph_objects as go
import plotly.express as px
import multiprocessing
import re
import pandas as pd
class Mapa:
def __init__(self, df, col):
self.col = col
self.df = df
def criar_mapa(self):
app = DjangoDash("mapa",
add_bootstrap_links=True)
app.layout = html.Div(
dcc.Graph(id='mapa', figure=self.gerar_grafico()))
return app
def encontrar_coordenadas(self, x):
nom = ArcGIS()
coordenada = nom.geocode(x)
if coordenada:
return coordenada.latitude, coordenada.longitude
def requisicao(self, df):
df[['Latitude', 'Longitude']] = df[self.col].apply(lambda x: pd.Series(self.encontrar_coordenadas(x)))
return df
def gerar_grafico(self):
df = self.requisicao(self.df)
fig = go.Figure(go.Scattermapbox(
lat=df['Latitude'],
lon=df['Longitude'],
mode='markers',
marker=go.scattermapbox.Marker(
size=15,
color='rgb(0, 100, 58)',
opacity=0.7
),
text=df,
))
# Configura o layout do mapa
fig.update_layout(
mapbox_style='open-street-map',
mapbox_center_lon=0,
margin={'r': 0, 't': 0, 'l': 0, 'b': 0}
)
# Obtém os valores mínimos e máximos de latitude e longitude
lat_min, lat_max = df['Latitude'].min(), df['Latitude'].max()
lon_min, lon_max = df['Longitude'].min(), df['Longitude'].max()
# Calcula o centro do mapa
center_lat = (lat_min + lat_max) / 2
center_lon = (lon_min + lon_max) / 2
# Calcula a extensão das coordenadas
lat_extent = lat_max - lat_min
lon_extent = lon_max - lon_min
# Define o nível de zoom
zoom_lat = math.log10(360 / lat_extent) / math.log10(2)
zoom_lon = math.log10(360 / lon_extent) / math.log10(2)
zoom = min(zoom_lat, zoom_lon)
# Configura o layout do mapa com o zoom nas coordenadas marcadas
fig.update_layout(
mapbox={
'center': {'lon': center_lon, 'lat': center_lat},
'zoom': zoom
}
)
return fig
|
victoralmeida428/master-edition
|
apps/geoloc/dash.py
|
dash.py
|
py
| 2,470 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7822007724
|
import os
def extract_from_qmake(path, variable):
sources_list = []
def next_linelist(it):
line = next(it)
line = line.replace('\n', '').strip()
return [tok for tok in line.split() if tok != '']
with open(path, 'r') as qfile:
line_iter = iter(qfile)
try:
while True:
linelist = next_linelist(line_iter)
if len(linelist) > 1 and ''.join(linelist[0:2]) == variable + '+=':
continue_line = linelist[-1] == '\\'
sources_list += linelist[2:-1] if continue_line else linelist[2:]
while continue_line:
linelist = next_linelist(line_iter)
continue_line = linelist[-1] == '\\'
sources_list += linelist[:-1] if continue_line else linelist
except StopIteration:
pass
return sources_list
def insert_ctl_sources(file_handle, ctl_dir):
extract_sources_from_qmake = lambda x: extract_from_qmake(x, 'SOURCES')
qt_cpp_files = \
extract_sources_from_qmake(ctl_dir + '/modules/submodules/ctl_core.pri') + \
extract_sources_from_qmake(ctl_dir + '/modules/submodules/ocl_config.pri') + \
extract_sources_from_qmake(ctl_dir + '/modules/submodules/ocl_routines.pri') + \
extract_sources_from_qmake(ctl_dir + '/modules/submodules/gui_widgets.pri') + \
extract_sources_from_qmake(ctl_dir + '/modules/submodules/gui_widgets_3d.pri') + \
extract_sources_from_qmake(ctl_dir + '/modules/submodules/gui_widgets_charts.pri') + \
extract_sources_from_qmake(ctl_dir + '/modules/submodules/gui_widgets_ocl.pri')
cmake_cpp_files = [f.replace('$$PWD/../src', '${CTL_DIR}') for f in qt_cpp_files]
file_handle.writelines([' '*4 + f + '\n' for f in cmake_cpp_files])
def insert_ctl_forms(file_handle, ctl_dir):
extract_forms_from_qmake = lambda x: extract_from_qmake(x, 'FORMS')
qt_cpp_files = \
extract_forms_from_qmake(ctl_dir + '/modules/submodules/ctl_core.pri') + \
extract_forms_from_qmake(ctl_dir + '/modules/submodules/ocl_config.pri') + \
extract_forms_from_qmake(ctl_dir + '/modules/submodules/ocl_routines.pri') + \
extract_forms_from_qmake(ctl_dir + '/modules/submodules/gui_widgets.pri') + \
extract_forms_from_qmake(ctl_dir + '/modules/submodules/gui_widgets_3d.pri') + \
extract_forms_from_qmake(ctl_dir + '/modules/submodules/gui_widgets_charts.pri') + \
extract_forms_from_qmake(ctl_dir + '/modules/submodules/gui_widgets_ocl.pri')
cmake_cpp_files = [f.replace('$$PWD/../src', '${CTL_DIR}') for f in qt_cpp_files]
file_handle.writelines([' '*4 + f + '\n' for f in cmake_cpp_files])
def insert_ctl_src_dir(file_handle, ctl_dir):
file_handle.write(' '*4 + '/'.join([ctl_dir, 'modules', 'src']) + '\n')
def insert_pyctl_sources(file_handle):
cpp_files = [' '*4 + '/'.join([r.replace('\\', '/'), f]) + '\n'
for r, _, fl in os.walk('pysrc')
for f in fl if not f.endswith('.h')]
file_handle.writelines(cpp_files)
def writeline(l):
def w(h):
h.write(l + '\n')
return w
def create_cmakelists(ctl_dir):
ctl_dir = ctl_dir[:-1] if ctl_dir.endswith('/') else ctl_dir
with open('CMakeListsTemplate.txt', 'r') as template_handle, \
open('CMakeLists.txt', 'w') as generated_handle:
for template_line in [l.replace('\n', '') for l in template_handle]:
{
'$$insert_ctl_dir$$': lambda x: insert_ctl_src_dir(x, ctl_dir),
'$$insert_ctl_sources$$': lambda x: insert_ctl_sources(x, ctl_dir),
'$$insert_ctl_forms$$': lambda x: insert_ctl_forms(x, ctl_dir),
'$$insert_pyctl_sources$$': insert_pyctl_sources,
}.get(template_line, writeline(template_line))(generated_handle)
if __name__ == '__main__':
create_cmakelists('ctl_src')
|
phernst/pyctl
|
create_cmakelists.py
|
create_cmakelists.py
|
py
| 3,970 |
python
|
en
|
code
| 6 |
github-code
|
6
|
209689754
|
def list_op():
opcodes=[]
with open('input_day5.txt') as f:
for line in f:
for op in line.strip().split(','):
opcodes.append(int(op))
return opcodes
def sol(imput):
movs={1:4, 2:4, 3:2, 4:2, 7:4, 8:4, 5:3, 6:3}
opcodes=list_op()
pointer= 0
#print(opcodes)
while True:
#print(pointer)
opcode=f'{opcodes[pointer]:0>5}'
#print(opcode)
modei= [digit=='1' for digit in opcode[::-1][2:]]
opcode=int(opcode[-2:])
if opcode==99:
break
x,y,z=opcodes[pointer+1:pointer+4]
if opcode==3:
opcodes[x]=imput
else:
parameter1=(int(modei[0])*x) or (int(not modei[0])* opcodes[x])
if opcode==4:
print(f'Salida:{parameter1}')
else:
parameter2=(int(modei[1])*y) or (int(not modei[1])* opcodes[y])
if opcode==1:
opcodes[z]=parameter1+parameter2
elif opcode==2:
opcodes[z]=parameter1*parameter2
elif opcode==5:
if parameter1!=0:
pointer = parameter2-movs[opcode]
elif opcode==6:
if parameter1==0:
pointer =parameter2-movs[opcode]
elif opcode==7:
opcodes[z]=int(parameter1<parameter2)
elif opcode==8:
opcodes[z]=int(parameter1==parameter2)
else:
print(f'Unkwown opcode, te la pelas {opcode}')
pointer +=movs[opcode]
#part 1
print("Part 1")
sol(1)
#part 2
print("Part 2")
sol(5)
|
heyheycel/advent-of-code
|
2019/day5.py
|
day5.py
|
py
| 1,336 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7177357229
|
import json
import os
from eth_hash.auto import (
keccak,
)
from eth_utils import (
encode_hex,
)
from eth.tools.fixtures.fillers import (
fill_test,
)
from eth.tools.fixtures.fillers.formatters import (
filler_formatter,
)
from eth.tools.fixtures.helpers import (
get_test_name,
)
PARENT_DIR = os.path.dirname(os.path.abspath(__file__))
OUTPUT_DIR = os.path.join(PARENT_DIR, "json")
FILLER_PARENT_DIR = os.path.join(OUTPUT_DIR, "fillers")
TEST_PARENT_DIR = os.path.join(OUTPUT_DIR, "tests")
DIR_STRUCTURE = {}
if __name__ == "__main__":
for (filler_dir, test_dir), test_groups in DIR_STRUCTURE.items():
for test_group, tests in test_groups.items():
for filler, filler_kwargs in tests:
test_name = get_test_name(filler)
filename = test_name + ".json"
filler_src_path = os.path.join(filler_dir, test_group, filename)
filler_path = os.path.join(FILLER_PARENT_DIR, filler_src_path)
test_path = os.path.join(
TEST_PARENT_DIR, test_dir, test_group, filename
)
for path in [filler_path, test_path]:
os.makedirs(os.path.dirname(path), exist_ok=True)
formatted_filler = filler_formatter(filler)
filler_string = json.dumps(formatted_filler, indent=4, sort_keys=True)
with open(filler_path, "w") as filler_file:
filler_file.write(filler_string)
filler_hash = keccak(filler_string.encode("ascii"))
info = {
"source": filler_src_path,
"sourceHash": encode_hex(filler_hash),
}
test = fill_test(filler, info=info, **filler_kwargs or {})
with open(test_path, "w") as test_file:
json.dump(test, test_file, indent=4, sort_keys=True)
|
ethereum/py-evm
|
tests/fillers/build_json.py
|
build_json.py
|
py
| 1,932 |
python
|
en
|
code
| 2,109 |
github-code
|
6
|
10253684387
|
from __future__ import division
import time
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import cv2
from util import *
from darknet import Darknet
import pandas as pd
import colorsys
import random
import pickle as pkl
import argparse
def arg_parse():
"""
Parse arguments to the detect module
"""
parser = argparse.ArgumentParser(description='YOLO v3 Video Detection')
parser.add_argument("--dataset", dest="dataset",
help="Dataset on which the network has been trained", default="pascal")
parser.add_argument("--confidence", dest="confidence",
help="Object Confidence to filter predictions", default=0.5, type=float)
parser.add_argument("--nms_thresh", dest="nms_thresh",
help="NMS Threshhold", default=0.4, type=float)
parser.add_argument("--cfg", dest="cfgfile",
help="Config file", default="cfg/yolov3.cfg", type=str)
parser.add_argument("--weights", dest="weightsfile",
help="weightsfile", default="yolov3.weights", type=str)
parser.add_argument("--reso", dest="reso",
help="Input resolution of the network. Increase to increase accuracy. Decrease to increase learning speed",
default=416, type=int)
return parser.parse_args()
def main():
args = arg_parse()
confidence = args.confidence
nms_thresh = args.nms_thresh
start = 0
CUDA = torch.cuda.is_available()
classes = load_classes("data/coco.names")
num_classes = len(classes)
# Set up the neural network
print("Loading network.....")
model = Darknet(args.cfgfile)
model.load_weights(args.weightsfile)
print("Network successfully loaded")
model.net_info["height"] = args.reso
inp_dim = int(model.net_info["height"])
assert inp_dim % 32 == 0
assert inp_dim > 32
# if there's a GPU available, put the model on GPU
if CUDA:
model.cuda()
# set the model in evaluation mode
model.eval()
def write(x, img, color):
c1 = tuple(x[1:3].int())
c2 = tuple(x[3:5].int())
cls = int(x[-1])
label = "{0}".format(classes[cls])
cv2.rectangle(img, c1, c2, color, 4)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 1, 1)[0]
c2 = c1[0] + t_size[0] + 3, c1[1] + t_size[1] + 4
cv2.rectangle(img, c1, c2, color, -1)
cv2.putText(img, label, (c1[0], c1[1] + t_size[1] + 4), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (150, 150, 150), thickness=1)
# detection phaase
cap = cv2.VideoCapture(0)
assert cap.isOpened(), "Cannot capture source"
frames = 0
start = time.time()
hsv_tuples = [(x / num_classes, 1., 1.) for x in range(num_classes)]
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 200), int(x[1] * 200), int(x[2] * 200)), colors))
np.random.seed(10000)
np.random.shuffle(colors)
np.random.seed(None) # reset seed to default.
while cap.isOpened():
ret, frame = cap.read()
if ret:
frame = cv2.resize(frame, dsize=(1280, 960))
img = prep_image(frame, inp_dim)
print(f"IMG_SHAPE: {img.shape}")
im_dim = frame.shape[1], frame.shape[0]
im_dim = torch.FloatTensor(im_dim).repeat(1, 2)
if CUDA:
im_dim = im_dim.cuda()
img = img.cuda()
with torch.no_grad():
outputs = model(Variable(img, volatile=True), CUDA)
outputs = write_results(outputs, confidence, num_classes, nms_conf=nms_thresh)
if outputs != None:
im_dim = im_dim.repeat(outputs.size(0), 1)
scaling_factor = torch.min(inp_dim/im_dim, 1)[0].view(-1, 1)
outputs[:, [1,3]] -= (inp_dim - scaling_factor * im_dim[:, 0].view(-1,1)) / 2
outputs[:, [2,4]] -= (inp_dim - scaling_factor * im_dim[:, 1].view(-1,1)) / 2
outputs[:, 1:5] /= scaling_factor
for i in range(outputs.shape[0]):
outputs[i, [1,3]] = torch.clamp(outputs[i, [1,3]], 0.0, im_dim[i, 0])
outputs[i, [2,4]] = torch.clamp(outputs[i, [2,4]], 0.0, im_dim[i, 1])
for output in outputs:
color = colors[int(output[-1])]
write(output, frame, color)
cv2.imshow("frame", frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
frames += 1
print(time.time() - start)
print("FPS of the video is {:5.2f}".format( frames / (time.time() - start) ))
else:
break
if __name__ == '__main__':
main()
|
pokotsun/pytorch-yolov3-scratch
|
detect_video.py
|
detect_video.py
|
py
| 4,780 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13842647368
|
# only links are new should be crawled for additional links
# looks for all links that begin with /wiki/ (don't restrict to article links)
# collects the title, the 1st paragraph of content and the link to edit the page if available
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
pages = set()
def getLinks(pageUrl):
global pages
html = urlopen("http://en.wikipedia.org"+pageUrl)
bsObj = BeautifulSoup(html)
try:
print(bsObj.h1.get_text())
print(bsObj.find(id="mw-content-text").findAll("p")[0])
print(bsObj.find(id="ca-edit").find("span").find("a").attrs['href'])
except AttributeError:
print("this page is missing something")
for link in bsObj.findAll("a", href=re.compile("^(/wiki/)")):
if 'href' in link.attrs:
if link.attrs['href'] not in pages:
# encountered a new page
newPage = link.attrs['href']
print("--------------------\n"+newPage)
pages.add(newPage)
getLinks(newPage)
getLinks("")
|
ViolaZhou7/2016-09-python
|
crawlNewLinks.py
|
crawlNewLinks.py
|
py
| 1,111 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24347880200
|
import pytest
import zipfile
from io import BytesIO
from PIL import Image
from pathlib import Path
from zesje.raw_scans import create_copy, process_page
from zesje.scans import _process_scan, exam_metadata
from zesje.database import db, Exam, Student, Submission, Scan, Problem, ProblemWidget, ExamLayout, Copy, Page
@pytest.fixture
def app_with_data(app):
exam = Exam(name="", layout=ExamLayout.unstructured)
problem = Problem(exam=exam, name="Problem")
widget = ProblemWidget(problem=problem, x=0, y=0, width=0, height=0)
students = [Student(id=i + 1000000, first_name="", last_name="") for i in range(2)]
db.session.add(exam)
db.session.add(problem)
db.session.add(widget)
for student in students:
db.session.add(student)
db.session.commit()
yield app, exam, students
def test_create_copy(app_with_data):
app, exam, students = app_with_data
submission = Submission(exam=exam, student=students[0])
copy = create_copy(submission)
assert copy.id == copy.number
@pytest.fixture
def image_file():
with BytesIO() as image_bytes:
image = Image.new("RGB", (10, 10))
image.save(image_bytes, format="PNG")
yield image_bytes
@pytest.fixture
def zip_file(image_file):
with BytesIO() as zip_bytes:
with zipfile.ZipFile(zip_bytes, "w") as z:
z.writestr("1000000-1.png", image_file.getvalue())
z.writestr("1000001-1.png", image_file.getvalue())
zip_bytes.seek(0)
yield zip_bytes
def test_zip_process(app_with_data, zip_file):
app, exam, students = app_with_data
scan = Scan(exam=exam, name="test.zip", status="processing")
db.session.add(scan)
db.session.commit()
with open(str(scan.path), "wb") as file:
file.write(zip_file.getvalue())
_process_scan(scan.id, exam.layout)
for student in students:
sub = Submission.query.filter(Submission.student == student, Submission.exam == exam).one()
assert sub.validated
assert len(sub.copies) == 1
copy = sub.copies[0]
assert len(copy.pages) == 1
page = copy.pages[0]
assert page.number == 0
def test_reupload_page(app_with_data, zip_file):
app, exam, students = app_with_data
student = students[0]
file_name = "old.txt"
sub = Submission(exam=exam, student_id=student.id, validated=True)
copy = Copy(submission=sub, number=1)
page = Page(copy=copy, number=0, path=file_name)
db.session.add_all([sub, copy, page])
db.session.commit()
old_path = Path(app.config["DATA_DIRECTORY"]) / file_name
old_path.write_text("old image data")
image = Image.new("RGB", (10, 10))
page_info = (student.id, page.number, copy.number)
file_info = [f"{student.id}-{page.number+1}-{page.copy.number}.jpg"]
exam_config = exam_metadata(exam)
output_directory = app.config["DATA_DIRECTORY"]
process_page(image, page_info, file_info, exam_config, output_directory)
# Only a single page entry
assert Page.query.filter(Page.copy == copy, Page.number == page.number).one()
# Path was updated and only new image still exists
assert page.path != file_name
assert not old_path.exists()
assert Path(page.abs_path).exists()
|
zesje/zesje
|
tests/test_raw_scans.py
|
test_raw_scans.py
|
py
| 3,269 |
python
|
en
|
code
| 9 |
github-code
|
6
|
36294164710
|
import argparse
import os
import torch
from net.models import LeNet
from net.quantization import apply_weight_sharing
import util
parser = argparse.ArgumentParser(description='This program quantizes weight by using weight sharing')
parser.add_argument('model', type=str, help='path to saved pruned model')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--output', default='saves/model_after_weight_sharing.ptmodel', type=str,
help='path to model output')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
# Define the model
model = torch.load(args.model)
print('accuracy before weight sharing')
util.test(model, use_cuda)
# Weight sharing
apply_weight_sharing(model)
print('accuacy after weight sharing')
util.test(model, use_cuda)
# Save the new model
os.makedirs('saves', exist_ok=True)
torch.save(model, args.output)
|
mightydeveloper/Deep-Compression-PyTorch
|
weight_share.py
|
weight_share.py
|
py
| 977 |
python
|
en
|
code
| 383 |
github-code
|
6
|
75316088506
|
import re
import wx
from wx import GridSizer
from wx.lib.agw.supertooltip import SuperToolTip
from boaui.units import area, charge, inertia, length, mass, pressure, volume, tnt, density, torque
from .label import SmartLabel
from . import LayoutDimensions, SmartToolTip
from ..units import KEY_IMPERIAL, KEY_METRIC
from ..units.acceleration import AccelerationUnit
from ..units.angle import AngleUnit
from ..units.area_density import AreaDensityUnit
from ..units.area import AreaUnit
from ..units.charge import ChargeUnit
from ..units.density import DensityUnit
from ..units.force import ForceUnit
from ..units.inertia import InertiaUnit
from ..units.length import LengthUnit
from ..units.linear_density import LinearDensityUnit
from ..units.linear_pressure import LinearPressureUnit
from ..units.mass import MassUnit
from ..units.pressure import PressureUnit
from ..units.time import TimeUnit
from ..units.tnt import TntUnit
from ..units.torque import TorqueUnit
from ..units.velocity import VelocityUnit
from ..units.volume import VolumeUnit
__author__ = 'Joeny'
class SmartTextBox(wx.TextCtrl):
"""
Create a smarter text box that could capture keys and process them
to see if the format is correct.
The validation method goes through three process:
1. OnChar(): Capture ony the key character that are necessary.
2. wx.EVT_TEXT: Validate that the input is actually a number.
3. Validate(): Check against the tolerance level.
"""
def __init__(self, parent, key_up=None, message=None, enabled_message='',
disabled_messages=None, disabled_index=None, value=None, enable=None,
helptext=None, required=False,
normal=(255, 255, 255), format_error=(228, 115, 115), range_error=(244, 67, 54),
data=None, *args, **kwargs):
"""
Constructor
:param parent: parent ui
:param key_up: bind key up handler
:param message: add in tooltip message
:param enabled_message: message once the box is enabled
:param disabled_messages: list of array messages
:param disabled_index: index of the which messages to display
:param value: initial value for smart box
:param enable: enable box
:param helptext: add in context help button
:param required: requirement
:param normal: rgb
:param format_error: rgb
:param range_error: rgb
:param data: used to hold any unique data that can be assessed later
:param args:
:param kwargs:
"""
wx.TextCtrl.__init__(self, parent, *args, **kwargs)
if value is not None:
self.Value = str(value)
self.keys = kwargs.get('keys', {})
self.parent = parent
self.data = data
if key_up:
self.Bind(wx.EVT_KEY_UP, key_up, self)
self.tooltip = None
if message:
self.tooltip = wx.ToolTip(message)
self.SetToolTip(self.tooltip)
self.enabled_message = enabled_message
self.disabled_messages = disabled_messages
if disabled_index is None and self.disabled_messages:
self.disabled_index = 0
else:
self.disabled_index = disabled_index
if helptext:
self.SetHelpText(helptext)
self.required = required
self.color_normal = normal
self.color_format_error = format_error
self.color_range_error = range_error
if enable is not None:
self.Enable(enable)
def clear(self):
self.Clear()
@property
def min(self):
"""
Return the minimum value.
:return: minimum value
"""
return self.keys.get('min')
@min.setter
def min(self, value):
"""
Set the minimum value.
:param value:
"""
self.keys['min'] = value
@property
def max(self):
"""
Return the maximum value.
:return: return max value
"""
return self.keys.get('max')
@max.setter
def max(self, value):
"""
Set the maximum value.
:param: value
"""
self.keys['max'] = value
def set_value(self, value, fmt=None):
# type: (object, object) -> object
"""
Set the textbox value
:param value: text
:return:
"""
if value is not None:
if fmt:
self.Value = fmt%(value)
else:
self.Value = str(value)
else:
self.Value = ""
def get_value(self, key=None):
"""
Get the value
:param key:
:return:
"""
val = self.GetValue()
if key is not None:
# When key is strike we capture.
digit = chr(key)
pos = self.GetInsertionPoint()
if pos == len(val):
val += digit
else:
val = val[:pos] + digit + val[pos:]
return val
def Enable(self, *args, **kwargs):
"""
On enable, clean data if needed.
:param args:
:param kwargs:
"""
wx.TextCtrl.Enable(self, *args, **kwargs)
if self.disabled_messages:
if self.Value in self.disabled_messages:
self.Value = self.enabled_message
def Disable(self, *args, **kwargs):
"""
On disable, add message if needed.
:param args:
:param kwargs:
"""
wx.TextCtrl.Disable(self, *args, **kwargs)
if self.disabled_messages:
self.set_disable_message()
def set_normal_color(self):
"""
Set normal color.
"""
self.SetBackgroundColour(self.color_normal)
self.Refresh()
def set_format_error_color(self):
"""
Set format error color.
"""
self.SetBackgroundColour(self.color_format_error)
self.Refresh()
def set_range_error_color(self):
"""
Set range error color.
"""
self.SetBackgroundColour(self.color_range_error)
self.Refresh()
def set_disable_message(self):
"""
Set disable message.
:return:
"""
self.Value = self.disabled_messages[self.disabled_index]
def check_requirement(self):
"""
Check if the textbox has value
:return:
"""
if self.required:
if self.Enabled:
if self.get_value() is None:
# Set error box.
if hasattr(self, 'set_range_error_color'):
self.set_format_error_color()
return False
if hasattr(self, 'set_normal_color'):
self.set_normal_color()
# If not required, than return true.
return True
class SmartComboBox(wx.ComboBox):
"""
Smart ComboBox is used for units conversion.
"""
def __init__(self, parent, data=None, style=wx.CB_READONLY, value='', message=None, unit=None, unit_system=None,
enabled_message='', disabled_messages=None, disabled_index=None, enable=None,
helptext=None, required=False, *args, **kwargs):
"""
Constructor
:param parent: parent panel or frame
:param data: list of values
:param style: combobox style
:param value: display value
:param message: tooltip message
:param unit: Unit object
:param unit_system: 'imperial' or 'metric'
:param enabled_message: enable message
:param disabled_messages: disable message
:param disabled_index:
:param enable: enable combobox
:param helptext: add in context help
:param required:
:param args:
:param kwargs:
:return:
"""
wx.ComboBox.__init__(self, parent, style=style, *args, **kwargs)
self.convert = None
self.unit_system = unit_system
self.unit = unit
if data:
self.AppendItems(data)
if value:
self.Value = value
self.tooltip = None
if message:
self.tooltip = wx.ToolTip(message)
self.SetToolTip(self.tooltip)
self.previous_index = 0
self.enabled_message = enabled_message
self.disabled_messages = disabled_messages
if disabled_index is None and self.disabled_messages:
self.disabled_index = 0
else:
self.disabled_index = disabled_index
if unit:
# If unit is passed in, activate it.
self.activate()
self.current_dropbox_selection = None
self.Bind(wx.EVT_COMBOBOX_DROPDOWN, self.on_dropdown_open, self)
if helptext:
self.SetHelpText(helptext)
self.required = required
if enable is not None:
self.Enable(enable)
def bind_dropdown(self, handle):
"""
Bind dropdown event to handle.
"""
self.Bind(wx.EVT_COMBOBOX, handle)
def Enable(self, *args, **kwargs):
"""
On enable, clean data if needed.
:param args:
:param kwargs:
"""
wx.ComboBox.Enable(self, *args, **kwargs)
if self.disabled_messages:
if self.Value in self.disabled_messages:
for index, label in enumerate(self.Strings):
if label in self.disabled_messages:
self.Delete(index)
self.SetSelection(self.previous_index)
def Disable(self, *args, **kwargs):
"""
On disable, add message if needed.
:param args:
:param kwargs:
"""
wx.ComboBox.Disable(self, *args, **kwargs)
if self.disabled_messages:
self.previous_index = self.GetCurrentSelection()
self.Append(self.disabled_messages[self.disabled_index])
self.SetSelection(self.GetCount() - 1)
def on_dropdown_open(self, event=None):
"""
Event handler to store the current selection
:param:
"""
self.current_dropbox_selection = self.GetCurrentSelection()
def is_selection_change(self):
"""
Check if the dropbox selection different from the previous selection before the dropbox is open.
:return: boolean
"""
if self.current_dropbox_selection is self.GetSelection():
return False
else:
return True
def append(self, label, obj):
"""
Append data into combobox.
:param label: title
:param obj: object data
:return:
"""
self.Append(label, obj)
def set_selection_by_data(self, value):
"""
Set the selection given the data input.
:param value:
:return:
"""
for index, text in enumerate(self.Strings):
if self.HasClientData():
if self.GetClientData(index) == value:
self.SetSelection(index)
# Leave loop
return
def get_data(self):
"""
Get the data.
:return:
"""
if self.GetSelection() == -1:
return None
else:
return self.GetClientData(self.GetSelection())
def set_value(self, value):
# type: (object) -> object
"""
Set the value
:param value: string
:return:
"""
self.Value = str(value)
def get_value(self):
"""
Get the combobox value
:return:
"""
return self.Value
def activate(self):
"""
Activate Units.
:return:
"""
self.Clear()
self.AppendItems(self.unit.get_list())
self.SetSelection(self.unit.get_default_selection())
self.convert = self.unit.get_conversion_factor
def activate_acceleration(self, *args, **kwargs):
"""
Activate acceleration unit.
:param args:
:param kwargs:
:return:
"""
self.unit = AccelerationUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_angle(self, *args, **kwargs):
"""
Activate angle unit
:param args:
:param kwargs:
:return:
"""
self.unit = AngleUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_area_density(self, *args, **kwargs):
"""
Activate area density unit.
:param args:
:param kwargs:
:return:
"""
self.unit = AreaDensityUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_area(self, *args, **kwargs):
"""
Activate area unit.
:param kwargs:
"""
self.unit = AreaUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_charge(self, *args, **kwargs):
"""
Activate charge weight.
:param kwargs:
"""
self.unit = ChargeUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_density(self, *args, **kwargs):
"""
Activate density unit.
:param args:
:param kwargs:
"""
self.unit = DensityUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_force(self, *args, **kwargs):
"""
Active force unit.
:param args:
:param kwargs:
:return:
"""
self.unit = ForceUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_inertia(self, *args, **kwargs):
"""
Activate Inertia unit.
:param args:
:param kwargs:
"""
self.unit = InertiaUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_length(self, *args, **kwargs):
"""
Activate length unit.
:param args:
:param kwargs:
"""
self.unit = LengthUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_linear_density(self, *args, **kwargs):
"""
Activate linear density unit.
:param args:
:param kwargs:
:return:
"""
self.unit = LinearDensityUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_linear_pressure(self, *args, **kwargs):
"""
Activate linear pressure unit.
:param args:
:param kwargs:
:return:
"""
self.unit = LinearPressureUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_mass(self, *args, **kwargs):
"""
Activate mass units.
:param kwargs:
"""
self.unit = MassUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_pressure(self, *args, **kwargs):
"""
Activate pressure unit.
:param kwargs:
"""
self.unit = PressureUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_time(self, *args, **kwargs):
"""
Activate time unit.
:param args:
:param kwargs:
:return:
"""
self.unit = TimeUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_tnt(self, *args, **kwargs):
"""
Activate tnt unit.
:param args:
:param kwargs:
"""
self.unit = TntUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_torque(self, *args, **kwargs):
"""
Activate Torque unit.
:param args:
:param kwargs:
"""
self.unit = TorqueUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_velocity(self, *args, **kwargs):
"""
Activate Velocity unit.
:param args:
:param kwargs:
:return:
"""
self.unit = VelocityUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def activate_volume(self, *args, **kwargs):
"""
Activate volume unit.
:param args:
:param kwargs:
"""
self.unit = VolumeUnit(*args, **kwargs)
if kwargs.get('unit_list'):
unit_list = kwargs.get('unit_list')
self.unit.metric_list = unit_list['metric']
self.unit.imperial_list = unit_list['imperial']
self.unit.unit_system = self.unit_system
self.activate()
def get_factor(self, origin, destination):
"""
Get the factor.
:param origin: origin unit
:param destination: destination unit
"""
return self.convert(origin, destination)
def check_requirement(self):
"""
Check if the textbox has value
:return:
"""
if self.required:
if self.Enabled:
if self.get_value() is None:
return False
else:
return True
else:
# If textbox is not active, than it's not required.
return True
else:
# If not required, than return true.
return True
class SmartInputLayout(wx.BoxSizer):
"""
Create the horizontal layout of smart textbox.
/---------------- OVERALL WIDTH ----------------------/
| |
| |
******************************************************* ----/
* * * * * * * * |
******************************************************* |
* * * * * * * *
* * (1) * * (2) * * (3) * * OVERALL HEIGHT
* * * * * * * *
******************************************************* |
* * * * * * * * |
******************************************************* ----/
"""
MAKE_VERTICAL_STRETCHABLE = 1
def __init__(self, parent, max=None, min=None, layout=None, label=None, *args, **kwargs):
"""
Constructor.
:param parent:
:param width:
:param max: maximum value for the textbox
:param min: minimum value for the textbox
:param layout:
:param label: pass in wx.Label or SmartLabel
:param args:
:param kwargs:
"""
wx.BoxSizer.__init__(self, wx.VERTICAL)
self.components = []
self.parent = parent
self.hsizer = None
self._next_id = 0
self.INDEX_LABEL = None
self.INDEX_TEXTBOX = None
self.INDEX_POSTBOX = None
self.INDEX_COMBOBOX = None
self.INDEX_BUTTON = None
if layout:
self.layout = layout
else:
self.layout = LayoutDimensions()
self.layout.calculate()
# Add in the label.
if label:
self.label = label
elif kwargs.get('name'):
self.label = SmartLabel(self.parent, label=kwargs.get('name'))
else:
self.label = None
self.min = min
self.max = max
# Set minimum size.
size = self.GetSize()
size.Height = self.layout.overall_height
self.SetMinSize(size)
def rename(self, name=None):
"""
Rename
:param name:
:return:
"""
self.label = wx.StaticText(self.parent, label=name)
@property
def next_id(self):
"""
:return:
"""
nid = self._next_id
self._next_id += 1
return nid
@property
def label(self):
"""
:return:
"""
if self.INDEX_LABEL is None:
return None
return self.components[self.INDEX_LABEL]
@label.setter
def label(self, value):
"""
:param value:
:return:
"""
if value is None:
return
self.INDEX_LABEL = self.next_id
self.components.append(value)
@property
def textbox(self):
"""
:return:
"""
if self.INDEX_TEXTBOX is None:
return None
return self.components[self.INDEX_TEXTBOX]
@textbox.setter
def textbox(self, value):
"""
:param value:
:return:
"""
if value is None:
return
self.INDEX_TEXTBOX = self.next_id
self.components.append(value)
@property
def postbox(self):
"""
:return:
"""
if self.INDEX_POSTBOX is None:
return None
return self.components[self.INDEX_POSTBOX]
@postbox.setter
def postbox(self, value):
"""
:param value:
:return:
"""
if value is None:
return
self.INDEX_POSTBOX = self.next_id
self.components.append(value)
@property
def combobox(self):
"""
:return:
"""
if self.INDEX_COMBOBOX is None:
return None
return self.components[self.INDEX_COMBOBOX]
@combobox.setter
def combobox(self, value):
"""
:param value:
:return:
"""
if value is None:
return
self.INDEX_COMBOBOX = self.next_id
self.components.append(value)
def do_layout(self):
"""
Do Layout.
:return:
"""
# Start with the vertical margin.
self.AddSpacer(self.layout.top)
# Move from left to right.
self.hsizer = wx.BoxSizer(wx.HORIZONTAL)
# self.hsizer.SetMinSize(wx.Size(self.layout.overall_width, self.layout.height))
self.hsizer.AddSpacer(self.layout.left)
for id in range(0, len(self.components)):
"""
wx.BoxSizer.Add(window, proportion=0, flag=0, border=0, userData=None)
Append a child to the sizer
:param window: a window, a spacer or another sizer to be added to the sizer. Its initial size
(either set explicitly by the user or calculated internally) is interpreted as the minimal and
in many cases also the initial size.
:param proportion: (int) the parameter is used in wx.BoxSizer to indicate if a child of a sizer can
change its size in the main orientation of the wx.BoxSizer - where 0 stands for non changeable
and a value of more than zero is interpreted relative to the value of other children of the
same wx.BosSizer. For example, you might have a horizontal wx.BoxSizer with three children,
two of which are supposed to change their size with the sizer. Then the two stretchable
windows would get a value of 1 each to make item grow and shrink equally with the sizer's
horizontal dimension.
:param flag: (int): combinations of flags affecting sizer's behavior
:param border: (int): determines the border width, if the flag parameter is set to include any
border flag
:param userData: (object) allows an extra object to be attached to the sizer item, for use in
derived classes when sizing information
"""
self.components[id].SetMinSize(self.layout.get_size(id))
self.hsizer.AddSpacer(self.layout.interior)
self.hsizer.Add(self.components[id],
self.layout.stretch_factor[id],
wx.ALL | wx.EXPAND,
self.layout.border_width[id])
# Add blank space if no component exists.
for id_blank in range(id+1, len(self.layout.widths)):
self.hsizer.AddSpacer(self.layout.interior)
blank_label = wx.StaticText(self.parent, label="")
blank_label.SetMinSize(self.layout.get_size(id_blank))
self.hsizer.Add(blank_label,
self.layout.stretch_factor[id_blank],
wx.ALL | wx.EXPAND,
self.layout.border_width[id_blank])
self.hsizer.AddSpacer(self.layout.right)
self.Add(self.hsizer, 1, wx.EXPAND | wx.ALL, 0)
self.AddSpacer(self.layout.bottom)
def add(self, item, proportion=0, flag=0, border=0, userData=None):
"""
Appends a child item to the sizer.
:param item: The item can be one of three kind of objects:
* window: A wx.Window to be managed by the sizer. Its minimal size (either set explicitly by the user or
calculated internally when constructed with wx.DefaultSize) is interpreted as the minimal size to use
when laying out item in the sizer. This is particularly useful in connection with
wx.Window.SetSizeHints.
* sizer: The (child-)sizer to be added to the sizer. This allows placing a child sizer in a sizer and thus
to create hierarchies of sizers (for example a vertical box as the top sizer and several horizontal
boxes on the level beneath).
* size: A wx.Size or a 2-element sequence of integers that represents the width and height of a spacer to
be added to the sizer. Adding spacers to sizers gives more flexibility in the design of dialogs;
imagine for example a horizontal box with two buttons at the bottom of a dialog: you might want to
insert a space between the two buttons and make that space stretchable using the proportion value and
the result will be that the left button will be aligned with the left side of the dialog and the right
button with the right side - the space in between will shrink and grow with the dialog.
:param proportion: Although the meaning of this parameter is undefined in wx.Sizer, it is used in wx.BoxSizer
to indicate if a child of a sizer can change its size in the main orientation of the wx.BoxSizer - where 0
stands for not changeable and a value of more than zero is interpreted relative (a proportion of the total)
to the value of other children of the same wx.BoxSizer. For example, you might have a horizontal
wx.BoxSizer with three children, two of which are supposed to change their size with the sizer. Then the
two stretchable windows should each be given proportion value of 1 to make them grow and shrink equally
with the sizer's horizontal dimension. But if one of them had a proportion value of 2 then it would get a
double share of the space available after the fixed size items are positioned.
(type int)
:param flag: This parameter can be used to set a number of flags which can be combined using the binary OR
operator |. Two main behaviours are defined using these flags. One is the border around a window: the
border parameter determines the border width whereas the flags given here determine which side(s) of the
item that the border will be added. The other flags determine how the sizer item behaves when the space
allotted to the sizer changes, and is somewhat dependent on the specific kind of sizer used.
* wx.TOP
* wx.BOTTOM
* wx.LEFT
* wx.RIGHT
* wx.ALL
* wx.EXPAND
* wx.SHAPED
* wx.FIXED_MINSIZE
* wx.ALIGN_CENTER
* wx.ALIGN_LEFT
* wx.ALIGN_RIGHT
* wx.ALIGN_TOP
* wx.ALIGN_BOTTOM
* wx.ALIGN_CENTER_VERTICAL
* wx.ALIGN_CENTER_HORIZONTAL
(type int)
:param border: Determines the border width, if the flag parameter is set to include any border flag.
(type int)
:param userData: Allows an extra object to be attached to the sizer item, for use in derived classes when
sizing information is more complex than the proportion and flag will allow for.
(type=PyObject)
"""
self.Add(item, proportion, flag, border, userData)
def add_stretch_spacer(self, prop=1):
"""
Add a stretchable spacer.
:param prop:
:return:
"""
self.AddStretchSpacer(prop=prop)
def add_spacer(self, size):
"""
Add a spacer that is (size, size) pixels.
:param size:
:return:
"""
self.AddSpacer(size)
def fit(self, window):
"""
Tell the sizer to resize the window to match the sizer's minimal size. This is commonly done in the constructor
of the window itself in order to set its initial size to match the needs of the children as determined by the
sizer. Returns the new size.
For a top level window this is the total window size, not the client size.
:param window:
:return:
"""
self.Fit(window)
def enable(self):
"""
Must inherit enable input layout.
"""
pass
def disable(self):
"""
Must inherit disable input layout.
"""
pass
def validate(self):
"""
Must inherit validate().
"""
pass
def check_requirement(self):
"""
Check requirement
:return:
"""
requirement_satisfy = True
for item in self.components:
if hasattr(item, 'check_requirement'):
if item.check_requirement() is False:
requirement_satisfy = False
return requirement_satisfy
class SmartButton(wx.Button):
"""
Smarter Button Class
"""
def __init__(self, parent, label='', evt_button=None, message=None, enable=None, helptext=None, *args, **kwargs):
"""
Constructor.
:param parent:
:param label:
:param evt_button:
:param message:
:param enable:
:param helptext:
:param args:
:param kwargs:
:return:
"""
wx.Button.__init__(self, parent, label=label, *args, **kwargs)
if evt_button:
self.Bind(wx.EVT_BUTTON, evt_button)
self.tooltip = None
if message:
self.tooltip = wx.ToolTip(message)
self.SetToolTip(self.tooltip)
if helptext:
self.SetHelpText(helptext)
if enable is not None:
self.Enable(enable)
class SmartCheckBox(wx.CheckBox):
"""
**Smarter CheckBox**
"""
def __init__(self, parent, id=-1, label='', evt_click=None, message=None, enable=None, helptext=None,
*args, **kwargs):
"""
Constructor.
:param parent:
:param id:
:param label:
:param evt_click:
:param message:
:param helptext:
:param args:
:param kwargs:
:return:
"""
wx.CheckBox.__init__(self, parent, id=id, label=label, *args, **kwargs)
self.tooltip = None
if message:
self.tooltip = wx.ToolTip(message)
self.SetToolTip(self.tooltip)
if evt_click:
self.Bind(wx.EVT_CHECKBOX, evt_click)
if helptext:
self.SetHelpText(helptext)
if enable is not None:
self.Enable(enable)
def bind_click(self, handle):
"""
Bind check box click.
:param handle:
:return:
"""
self.Bind(wx.EVT_CHECKBOX, handle)
def get_value(self):
"""
Return the true/false
:return:
"""
return self.Value
def set_value(self, value):
# type: (object) -> object
self.SetValue(value)
|
JoenyBui/boa-gui
|
boaui/textbox/smart.py
|
smart.py
|
py
| 36,235 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19399912919
|
# 位1的个数
# https://leetcode-cn.com/leetbook/read/top-interview-questions-easy/xn1m0i/
class Solution:
def hammingWeight(self, n: int) -> int:
count = 0
while n is not 0:
count += n & 1
n = n >> 1
return count
r = Solution().hammingWeight(0b1111)
print(r)
|
Yigang0622/LeetCode
|
hammingWeight.py
|
hammingWeight.py
|
py
| 317 |
python
|
en
|
code
| 1 |
github-code
|
6
|
70756756988
|
from cavefinder.support.cstruct import *
# ---------------------
# | Dos Header |
# ---------------------
# | Pe Signature |
# ---------------------
# | COFF Header |
# *********************
# | Optional Header |
# *********************
# | Section Table |
# ---------------------
# | Mappable sections |
# ---------------------
#
MZ_MAGIC = 0x4D5A
MZ_CIGAM = 0x5A4D
PE_MAGIC = 0x50450000
PE_CIGAM = 0x00004550
class DosHeader(object, metaclass=CStruct):
e_magic = USHORT # 00: MZ Header signature
e_cblp = USHORT # 02: Bytes on last page of file
e_cp = USHORT # 04: Pages in file
e_crlc = USHORT # 06: Relocations
e_cparhdr = USHORT # 08: Size of header in paragraphs
e_minalloc = USHORT # 0a: Minimum extra paragraphs needed
e_maxalloc = USHORT # 0c: Maximum extra paragraphs needed
e_ss = USHORT # 0e: Initial (relative) SS value
e_sp = USHORT # 10: Initial SP value
e_csum = USHORT # 12: Checksum
e_ip = USHORT # 14: Initial IP value
e_cs = USHORT # 16: Initial (relative) CS value
e_lfarlc = USHORT # 18: File address of relocation table
e_ovno = USHORT # 1a: Overlay number
e_res = "8s" # 1c: Reserved words
e_oemid = USHORT # 24: OEM identifier (for e_oeminfo)
e_oeminfo = USHORT # 26: OEM information=0 e_oemid specific
e_res2 = "20s" # 28: Reserved words
e_lfanew = UINT # 3c: Offset to extended header
def __init__(self, stream: io.RawIOBase):
self.e_magic = unpack_type(USHORT, stream.read(2))
stream.seek(stream.tell() - sizeof(USHORT))
if self.e_magic == MZ_CIGAM or MZ_MAGIC:
self.unpack_from_io(stream)
else:
raise TypeError("Not a valid PE (Invalid DosHeader)")
def __str__(self):
return '\n'.join(["Dos Header:",
"Magic: {e_magic:#x}",
"Byte on last page: {e_cblp:#x}",
"Page in file: {e_cp:#x}",
"Relocations: {e_crlc:#x}",
"Size of header: {e_cparhdr:#x}",
"Min alloc: {e_minalloc:#x}",
"Max alloc: {e_maxalloc:#x}",
"ss: {e_ss:#x}",
"sp: {e_sp:#x}",
"Checksum: {e_csum:#x}",
"ip: {e_ip:#x}",
"cs: {e_cs:#x}",
"File address reloc table: {e_lfarlc:#x}",
"Overlay number: {e_ovno:#x}",
"OEM id: {e_oemid:#x}",
"OEM info: {e_oeminfo:#x}",
"Offset to extended header: {e_lfanew:#x}"]).format(**self.__dict__)
@property
def endianness(self):
return 'big' if self.e_magic == MZ_MAGIC else 'little'
COFF_MACHINE_UNKNOWN = 0 # unknown
COFF_MACHINE_I386 = 0x014c # Intel 386.
COFF_MACHINE_R3000 = 0x0162 # MIPS little-endian, 0x160 big-endian
COFF_MACHINE_R4000 = 0x0166 # MIPS little-endian
COFF_MACHINE_R10000 = 0x0168 # MIPS little-endian
COFF_MACHINE_WCEMIPSV2 = 0x0169 # MIPS little-endian WCE v2
COFF_MACHINE_ALPHA = 0x0184 # Alpha_AXP
COFF_MACHINE_POWERPC = 0x01F0 # IBM PowerPC Little-Endian
COFF_MACHINE_SH3 = 0x01a2 # SH3 little-endian
COFF_MACHINE_SH3E = 0x01a4 # SH3E little-endian
COFF_MACHINE_SH4 = 0x01a6 # SH4 little-endian
COFF_MACHINE_ARM = 0x01c0 # ARM Little-Endian
COFF_MACHINE_THUMB = 0x01c2
COFF_MACHINE_AMD64 = 0x8664
COFF_MACHINE_IA64 = 0x0200 # Intel 64
COFF_MACHINE_MIPS16 = 0x0266 # MIPS
COFF_MACHINE_MIPSFPU = 0x0366 # MIPS
COFF_MACHINE_MIPSFPU16 = 0x0466 # MIPS
COFF_MACHINE_ALPHA64 = 0x0284 # ALPHA64
class COFFHeader(object, metaclass=CStruct):
machine = USHORT
nsections = USHORT
timestamp = UINT
ptr_to_symtable = UINT
nsym = UINT
size_opheader = USHORT
characteristics = USHORT
def __init__(self, stream: io.RawIOBase, endianness):
self.unpack_from_io(stream, endianness)
def __str__(self):
return "\n".join(["Machine: {machine:#x} (%s)",
"Number of sections: {nsections}",
"Timestamp: {timestamp}",
"Ptr to symbol table: {ptr_to_symtable:#x}",
"Number of symbol: {nsym}",
"Size of optional header: {size_opheader}",
"Characteristics: {characteristics:#x}"]).format(**self.__dict__) % self.machine_str
@property
def machine_str(self):
val = {
COFF_MACHINE_UNKNOWN: "unknown",
COFF_MACHINE_I386: "Intel 386",
COFF_MACHINE_R3000: "MIPS little-endian",
COFF_MACHINE_R4000: "MIPS little-endian",
COFF_MACHINE_R10000: "MIPS little-endian",
COFF_MACHINE_WCEMIPSV2: "MIPS little-endian WCE v2",
COFF_MACHINE_ALPHA: "Alpha_AXP",
COFF_MACHINE_POWERPC: "PowerPC little-endian",
COFF_MACHINE_SH3: "SH3 little-endian",
COFF_MACHINE_SH3E: "SH3E little-endian",
COFF_MACHINE_SH4: "SH4 little-endian",
COFF_MACHINE_ARM: "ARM little-endian",
COFF_MACHINE_THUMB: "THUMB",
COFF_MACHINE_AMD64: "AMD 64",
COFF_MACHINE_IA64: "Intel 64",
COFF_MACHINE_MIPS16: "MIPS",
COFF_MACHINE_MIPSFPU: "MIPS",
COFF_MACHINE_MIPSFPU16: "MIPS",
COFF_MACHINE_ALPHA64: "ALPHA64"
}
return "Unknown: %02x" % self.machine if self.machine not in val else val[self.machine]
IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b
IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b
IMAGE_ROM_OPTIONAL_HDR_MAGIC = 0x107
IMAGE_SUBSYSTEM_UNKNOWN = 0x0
IMAGE_SUBSYSTEM_NATIVE = 0x1
IMAGE_SUBSYSTEM_WINDOWS_GUI = 0x2
IMAGE_SUBSYSTEM_WINDOWS_CUI = 0x3
IMAGE_SUBSYSTEM_OS2_CUI = 0x5
IMAGE_SUBSYSTEM_POSIX_CUI = 0x7
IMAGE_SUBSYSTEM_NATIVE_WINDOWS = 0x8
IMAGE_SUBSYSTEM_WINDOWS_CE_GUI = 0x9
class OptionalHeader(object, metaclass=CStruct):
magic = USHORT
major_linker_version = UCHAR
minor_linker_version = UCHAR
size_of_code = UINT
size_initialized_data = UINT
size_uninitialized_data = UINT
address_entry_point = UINT
base_code = UINT
image_base = ULONGLONG
section_alignment = UINT
file_alignment = UINT
major_osversion = USHORT
minor_osversion = USHORT
major_image_version = USHORT
minor_image_version = USHORT
major_subsystem_version = USHORT
minor_subsystem_version = USHORT
win32_version = UINT
size_image = UINT
size_headers = UINT
checksum = UINT
subsystem = USHORT
dll_characteristics = USHORT
size_stack_reserve = ULONGLONG
size_stack_commit = ULONGLONG
size_heap_reserve = ULONGLONG
size_heap_commit = ULONGLONG
loader_flags = UINT
number_rva_and_sizes = UINT
def __init__(self, stream: io.RawIOBase, endianness):
self.unpack_from_io(stream, endianness)
def subsytem_str(self):
val = {
IMAGE_SUBSYSTEM_UNKNOWN: "Unknown",
IMAGE_SUBSYSTEM_NATIVE: "Native",
IMAGE_SUBSYSTEM_WINDOWS_GUI: "Windows GUI",
IMAGE_SUBSYSTEM_WINDOWS_CUI: "Console",
IMAGE_SUBSYSTEM_OS2_CUI: "OS/2 Console",
IMAGE_SUBSYSTEM_POSIX_CUI: "Posix Console",
IMAGE_SUBSYSTEM_NATIVE_WINDOWS: "diver 9x native",
IMAGE_SUBSYSTEM_WINDOWS_CE_GUI: "Windows CE",
}
return "Unknown: %02x" % self.subsystem if self.subsystem not in val else val[self.subsystem]
@property
def wordsz(self):
if self.magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC:
return 32
elif self.magic == IMAGE_NT_OPTIONAL_HDR64_MAGIC:
return 64
return 0
class PeSectionHeader(object, metaclass=CStruct):
name = "8s"
physaddr_or_virtsize = UINT
virtual_addr = UINT
size_rawdata = UINT
ptr_rawdata = UINT
ptr_relocations = UINT
ptr_linenumbers = UINT
nrelocations = USHORT
nlinenumbers = USHORT
characteristics = UINT
def __init__(self, stream: io.RawIOBase, endianness):
self.unpack_from_io(stream, endianness)
def __str__(self):
return "\n".join(["Section Header",
"Name: {name}",
"Virtual address: {virtual_addr:#x}",
"Size of raw data: {size_rawdata:#x}",
"Pointer to raw data: {ptr_rawdata:#x}",
"Pointer to relocations: {ptr_relocations:#x}",
"Pointer to line numbers: {ptr_linenumbers:#x}",
"Number of relocations: {nrelocations:#x}",
"Number of line numbers: {nlinenumbers:#x}",
"Characteristics: {characteristics:#x}"]).format(**self.__dict__)
class PEHeader(object):
def __init__(self, stream: io.RawIOBase, offset=0):
if offset > 0:
stream.seek(offset)
self.signature = unpack_type(UINT, stream.read(4))
if self.signature != PE_MAGIC and self.signature != PE_CIGAM:
raise TypeError("Not a valid PE (Invalid NT signature)")
self.file_header = COFFHeader(stream, self.endianness)
# Parse optional header
self.optional_header = None
jmp_op = stream.tell()
if self.file_header.size_opheader > 0:
self.optional_header = OptionalHeader(stream, self.endianness)
stream.seek(jmp_op + self.file_header.size_opheader)
def __str__(self):
return str(self.file_header)
@property
def endianness(self):
return "big" if self.signature == PE_MAGIC else "little"
class Pe(object):
def __init__(self, stream: io.RawIOBase):
self.dos_header = DosHeader(stream)
self.pe_header = PEHeader(stream, self.dos_header.e_lfanew)
# Parse sections
self.sections = []
for _ in range(self.pe_header.file_header.nsections):
self.sections.append(PeSectionHeader(stream, self.pe_header.endianness))
def __str__(self):
return "\n".join(["Pe Header",
"Magic: 0x%02x",
"%s"]) % (self.pe_header.signature, str(self.pe_header))
@staticmethod
def verify(file: io.RawIOBase):
s_pos = file.tell()
magic = unpack_type(USHORT, file.read(2)) # MS_WORD
file.seek(s_pos)
return magic == MZ_MAGIC or magic == MZ_CIGAM
|
jacopodl/CaveFinder
|
cavefinder/support/mspe.py
|
mspe.py
|
py
| 10,962 |
python
|
en
|
code
| 15 |
github-code
|
6
|
30201145239
|
from uuid import uuid1
import click
from awsscripter.stack.helpers import catch_exceptions, confirmation
from awsscripter.stack.helpers import simplify_change_set_description
from awsscripter.stack.helpers import write, get_stack_or_env
from awsscripter.stack.stack_status import StackStatus, StackChangeSetStatus
@click.command(name="update")
@click.argument("path")
@click.option(
"-c", "--change-set", is_flag=True,
help="Create a change set before updating."
)
@click.option(
"-v", "--verbose", is_flag=True, help="Display verbose output."
)
@click.option(
"-y", "--yes", is_flag=True, help="Assume yes to all questions."
)
@click.pass_context
@catch_exceptions
def update_command(ctx, path, change_set, verbose, yes):
"""
Update a stack.
Updates a stack for a given config PATH. Or perform an update via
change-set when the change-set flag is set.
"""
stack, _ = get_stack_or_env(ctx, path)
if change_set:
change_set_name = "-".join(["change-set", uuid1().hex])
stack.create_change_set(change_set_name)
try:
# Wait for change set to be created
status = stack.wait_for_cs_completion(change_set_name)
# Exit if change set fails to create
if status != StackChangeSetStatus.READY:
exit(1)
# Describe changes
description = stack.describe_change_set(change_set_name)
if not verbose:
description = simplify_change_set_description(description)
write(description, ctx.obj["output_format"])
# Execute change set if happy with changes
if yes or click.confirm("Proceed with stack update?"):
stack.execute_change_set(change_set_name)
finally:
# Clean up by deleting change set
stack.delete_change_set(change_set_name)
else:
confirmation("update", yes, stack=path)
response = stack.update()
if response != StackStatus.COMPLETE:
exit(1)
|
xformation/awsscripter
|
awsscripter/cli/stack/update.py
|
update.py
|
py
| 2,044 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3231216071
|
#
# License: See LICENSE.md file
# GitHub: https://github.com/Baekalfen/PyBoy
#
__pdoc__ = {
"GameWrapperPokemonGen1.cartridge_title": False,
"GameWrapperPokemonGen1.post_tick": False,
}
import logging
from pyboy.utils import WindowEvent
from .base_plugin import PyBoyGameWrapper
logger = logging.getLogger(__name__)
try:
from cython import compiled
cythonmode = compiled
except ImportError:
cythonmode = False
class GameWrapperPokemonGen1(PyBoyGameWrapper):
"""
This class wraps Pokemon Red/Blue, and provides basic access for AIs.
If you call `print` on an instance of this object, it will show an overview of everything this object provides.
"""
cartridge_title = None
def __init__(self, *args, **kwargs):
self.shape = (20, 18)
super().__init__(*args, game_area_section=(0, 0) + self.shape, game_area_wrap_around=True, **kwargs)
self.sprite_offset = 0x1000
def enabled(self):
return self.pyboy_argv.get("game_wrapper") and ((self.pyboy.cartridge_title() == "POKEMON RED") or
(self.pyboy.cartridge_title() == "POKEMON BLUE"))
def post_tick(self):
self._tile_cache_invalid = True
self._sprite_cache_invalid = True
scanline_parameters = self.pyboy.botsupport_manager().screen().tilemap_position_list()
WX = scanline_parameters[0][2]
WY = scanline_parameters[0][3]
self.use_background(WY != 0)
def __repr__(self):
adjust = 4
# yapf: disable
return (
f"Pokemon Gen 1:\n" +
"Sprites on screen:\n" +
"\n".join([str(s) for s in self._sprites_on_screen()]) +
"\n" +
"Tiles on screen:\n" +
" "*5 + "".join([f"{i: <4}" for i in range(10)]) + "\n" +
"_"*(adjust*20+4) +
"\n" +
"\n".join(
[
f"{i: <3}| " + "".join([str(tile).ljust(adjust) for tile in line])
for i, line in enumerate(self.game_area())
]
)
)
# yapf: enable
|
muddi900/PyBoy
|
pyboy/plugins/game_wrapper_pokemon_gen1.py
|
game_wrapper_pokemon_gen1.py
|
py
| 2,149 |
python
|
en
|
code
| null |
github-code
|
6
|
18536492857
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 11 15:05:36 2019
@author: Ashley
"""
# Manuscript Malezieux, Kees, Mulle submitted to Cell Reports
# Figure S3 - Complex spikes
# Description: changes in complex spikes with theta and LIA, plotted separately
# %% import modules
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex
from itertools import compress
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import Divider, Size
from mpl_toolkits.axes_grid1.mpl_axes import Axes
# %% definitions
# bootstrap: one-factor ANOVA-like (for any number of groups):
# is between-group variance bigger than within-group?
def calculate_F(groups_list):
num_g = len(groups_list)
box = np.concatenate(groups_list)
GM = np.nanmedian(box)
gm = np.zeros(num_g)
gs = np.zeros(num_g)
denom = np.zeros(num_g)
for i in np.arange(num_g):
gm[i] = np.nanmedian(groups_list[i])
gs[i] = groups_list[i].size
denom[i] = np.nansum(np.abs(groups_list[i]-np.nanmedian(groups_list[i])))
F = (np.sum(gs*np.abs(GM-gm)))/(np.sum(denom))
return F
# one-way anova for many groups: resampling from big box; only when variances are the same
# returns real_F, p_boot
def boot_anova(groups_list, num_b):
# compute the real F
real_F = calculate_F(groups_list)
faux_F = np.zeros(num_b)
# compute size and variance of groups
groups_size = [np.nan]*len(groups_list)
groups_var = [np.nan]*len(groups_list)
for g in np.arange(len(groups_list)):
groups_size[g] = groups_list[g].size
groups_var[g] = MADAM(groups_list[g], np.nanmedian(groups_list[g]))
# if the largest variance is more than 2x the smallest, resample within groups
# demean each group and sample with replacement
if max(groups_var)/min(groups_var) > 2:
# subtract the median from each group before resampling
dm_groups_list = [np.nan] * len(groups_list)
for g in np.arange(len(groups_list)):
dm_groups_list[g] = groups_list[g] - np.nanmedian(groups_list[g])
# shuffle and deal from each group with replacement
for b in np.arange(num_b):
# deal into faux groups, each one the same size as in real data
f_groups_list = [None] * len(groups_list)
for g in np.arange(len(groups_list)):
group = dm_groups_list[g]
resample = group[np.random.randint(0, group.size, size=group.size)]
f_groups_list[g] = resample
faux_F[b] = calculate_F(f_groups_list)
p_boot = np.sum(faux_F > real_F)/num_b
# if the variances are mostly the same, resample from the big box without replacement
else:
box = np.concatenate(groups_list)
for b in np.arange(num_b):
np.random.shuffle(box)
box1 = np.copy(box)
# deal into fax groups, each one the same size as in real data
f_groups_list = list()
for g in np.arange(len(groups_list)):
f_groups_list.append(box1[0:int(groups_size[g])])
box1 = box1[int(groups_size[g]):]
faux_F[b] = calculate_F(f_groups_list)
p_boot = np.sum(faux_F > real_F)/num_b
return real_F, p_boot
# definition for self_calculated variance (called MADAM??)
# VERSION: accounts for nans when dividing by number of samples
def MADAM(data_pts, descriptor):
v = np.nansum(np.abs(data_pts-descriptor))/np.sum(~np.isnan(data_pts))
return v
def boot_t(t_g0, t_g1, num_b):
real_d = np.nanmedian(t_g1) - np.nanmedian(t_g0)
faux_d = np.zeros(num_b)
box = np.append(t_g0, t_g1)
for b in np.arange(num_b):
f_g0 = box[np.random.randint(0, box.size, size=t_g0.size)]
f_g1 = box[np.random.randint(0, box.size, size=t_g1.size)]
faux_d[b] = np.nanmedian(f_g1) - np.nanmedian(f_g0)
p = np.sum(np.abs(faux_d) > np.abs(real_d))/num_b
return real_d, p
def boot_pair_t(diff, num_b):
real_d = np.mean(diff)
faux_d = np.zeros(num_b)
for b in np.arange(num_b):
sample = np.random.choice([-1, 1], size = diff.size, replace=True)
faux_d[b] = np.mean(diff*sample)
p = np.sum(faux_d<real_d)/num_b
return real_d, p
# definiton for finding 95% confidence intervals for each bin in histogram
# Version: for a **mean** histogram of **several** histograms
# H_array must be arranged [samples, bins]
def CI_avg_hist(H_array, num_b, CI_perc):
real_H = np.nanmean(H_array, axis=0)
faux_H = np.full([H_array.shape[1], num_b], np.nan)
for b in np.arange(num_b):
samp = np.random.randint(0, H_array.shape[0], H_array.shape[0])
faux_H[:, b] = np.nanmean(H_array[samp, :], axis=0)
CI_low, CI_high = np.nanpercentile(faux_H, [(100-CI_perc)/2, 100-((100-CI_perc)/2)],
axis=1)
return real_H, CI_high, CI_low
# eta = event triggered averages. CHANGE: nans instead of removing events
def prepare_eta(signal, ts, event_times, win):
win_npts = [ts[ts < ts[0] + np.abs(win[0])].size,
ts[ts < ts[0] + np.abs(win[1])].size]
et_ts = ts[0:np.sum(win_npts)] - ts[0] + win[0]
et_signal = np.empty(0)
if event_times.size > 0:
if signal.ndim == 1:
et_signal = np.zeros((et_ts.size, event_times.size))
for i in np.arange(event_times.size):
if np.logical_or((event_times[i]+win[0]<ts[0]), (event_times[i]+win[1]>ts[-1])):
et_signal[:, i] = np.nan*np.ones(et_ts.size)
else:
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, i] = signal[(ind - win_npts[0]): (ind + win_npts[1])]
elif signal.ndim == 2:
et_signal = np.zeros((signal.shape[0], et_ts.size, event_times.size))
for i in np.arange(event_times.size):
if np.logical_or((event_times[i]+win[0]<ts[0]), (event_times[i]+win[1]>ts[-1])):
et_signal[:, :, i] = np.nan*np.ones([signal.shape[0], et_ts.size])
else:
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, :, i] = signal[:, (ind - win_npts[0]):
(ind + win_npts[1])]
return et_signal, et_ts
# eta = event triggered averages
# this code is for point processes, but times instead of inds
def prepare_eta_times(pt_times, event_times, win):
et_signal = []
if (pt_times.size > 0) & (event_times.size > 0):
# find pt_times that occur within window of each event_time
for i in np.arange(event_times.size):
ts_section = pt_times[(pt_times > event_times[i] + win[0]) &
(pt_times < event_times[i] + win[1])]
ts_section = ts_section - event_times[i]
et_signal.append(ts_section)
else:
et_signal = [np.empty(0) for k in np.arange(event_times.size)]
return et_signal
# eta = event triggered averages: Version: skip events too close to edge
def prepare_eta_skip(signal, ts, event_times, win):
win_npts = [ts[ts < ts[0] + np.abs(win[0])].size,
ts[ts < ts[0] + np.abs(win[1])].size]
et_ts = ts[0:np.sum(win_npts)] - ts[0] + win[0]
et_signal = np.empty(0)
if event_times.size > 0:
# remove any events that are too close to the beginning or end of recording
if event_times[0]+win[0] < ts[0]:
event_times = event_times[1:]
if event_times[-1]+win[1] > ts[-1]:
event_times = event_times[:-1]
if signal.ndim == 1:
et_signal = np.zeros((et_ts.size, event_times.size))
for i in np.arange(event_times.size):
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, i] = signal[(ind - win_npts[0]): (ind + win_npts[1])]
elif signal.ndim == 2:
et_signal = np.zeros((signal.shape[0], et_ts.size, event_times.size))
for i in np.arange(event_times.size):
# find index of closest timestamp to the event time
ind = np.argmin(np.abs(ts-event_times[i]))
et_signal[:, :, i] = signal[:, (ind - win_npts[0]):
(ind + win_npts[1])]
return et_signal, et_ts
# %% load data
dataset_folder = (r'C:\Users\akees\Documents\Ashley\Papers\MIND 1\Cell Reports\Dryad upload\Dataset')
cell_files = os.listdir(dataset_folder)
data = [{} for k in np.arange(len(cell_files))]
for i in np.arange(len(cell_files)):
full_file = os.path.join(dataset_folder, cell_files[i])
data[i] = np.load(full_file, allow_pickle=True).item()
states = [{'id':'theta', 'bef':-2.5, 'aft':0.5, 'samp_time':2, 't_win':[-3, 3]},
{'id':'LIA', 'bef':-4, 'aft':-1, 'samp_time':2, 't_win':[-4, 2]}]
ntl = ['nost', 'theta', 'LIA']
# %% process data - for dVm vs dFR analysis
# for each cell, find start and stop times for unlabeled times
for i in np.arange(len(data)):
state_start = np.concatenate([data[i]['theta_start'], data[i]['LIA_start']])
state_start = np.sort(state_start)
state_stop = np.concatenate([data[i]['theta_stop'], data[i]['LIA_stop']])
state_stop = np.sort(state_stop)
data[i]['nost_start'] = np.append(data[i]['Vm_ds_ts'][0], state_stop)
data[i]['nost_stop'] = np.append(state_start, data[i]['Vm_ds_ts'][-1])
# for each cell, make a new spike_times for specifically non-spikelets
for i in np.arange(len(data)):
data[i]['spike_times'] = np.delete(data[i]['sp_times'],
data[i]['spikelets_ind'])
# for each cell, calculate the isi (inter-spike-interval)
# for true spikes only
for i in np.arange(len(data)):
if data[i]['spike_times'].size > 0:
isi0 = data[i]['spike_times'][0] - data[i]['Vm_ds_ts'][0]
data[i]['isi'] = np.ediff1d(data[i]['spike_times'], to_begin=isi0)
else:
data[i]['isi'] = np.empty(0)
# find the (true) spikes that are within bursts
burst_isi = 0.006 # seconds (Mizuseki 2012 0.006)
for i in np.arange(len(data)):
if data[i]['spike_times'].size > 0:
burst_bool = 1*(data[i]['isi'] < burst_isi)
burst_sp = np.where(data[i]['isi'] < burst_isi)[0]
burst_sp0 = np.where(np.ediff1d(burst_bool) == 1)[0]
bursts = [None]*len(burst_sp0)
if burst_sp0.size > 0:
for j in np.arange(len(burst_sp0)-1):
inds = np.append(burst_sp0[j], burst_sp[np.logical_and(burst_sp > burst_sp0[j],
burst_sp < burst_sp0[j+1])])
bursts[j] = data[i]['spike_times'][inds]
# special case for the last burst:
j = len(burst_sp0)-1
inds = np.append(burst_sp0[j], burst_sp[burst_sp > burst_sp0[j]])
bursts[j] = data[i]['spike_times'][inds]
data[i]['bursts'] = bursts
else:
data[i]['bursts'] = [None]*0
# add windows triggered by start of some brain states
# collect relative times for (true) spikes, singles, doublets, bursts, and CS
for l in np.arange(len(states)):
for i in np.arange(len(data)):
t_Vm, t_ts = prepare_eta(data[i]['Vm_s_ds'], data[i]['Vm_ds_ts'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
t_sp = prepare_eta_times(data[i]['sp_times'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
t_spike = prepare_eta_times(data[i]['spike_times'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
spikelet_times = data[i]['sp_times'][data[i]['spikelets_ind'].astype(int)]
t_spikelet = prepare_eta_times(spikelet_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
single_times = data[i]['sp_times'][data[i]['singles_ind'].astype(int)]
t_single = prepare_eta_times(single_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
if data[i]['doublets_ind'].size > 0:
doublet_times = data[i]['sp_times'][data[i]['doublets_ind'][0]]
else:
doublet_times = np.empty(0)
t_doublet = prepare_eta_times(doublet_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
burst_times = np.array([d[0] for d in data[i]['bursts']])
t_burst = prepare_eta_times(burst_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
t_CS = prepare_eta_times(data[i]['CS_start'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
data[i][states[l]['id']+'_Vm'] = t_Vm
data[i][states[l]['id']+'_sp'] = t_sp # all spikes and spikelets
data[i][states[l]['id']+'_spike'] = t_spike # all spikes (no spikelets)
data[i][states[l]['id']+'_spikelet'] = t_spikelet
data[i][states[l]['id']+'_single'] = t_single
data[i][states[l]['id']+'_doublet'] = t_doublet
data[i][states[l]['id']+'_burst'] = t_burst
data[i][states[l]['id']+'_CS'] = t_CS
states[l]['t_ts'] = t_ts
# add windows triggered by start of some brain states
# collect relative times for (true) spikes, singles, doublets, bursts, and CS
for l in np.arange(len(states)):
for i in np.arange(len(data)):
single_times = data[i]['sp_times'][data[i]['singles_ind'].astype(int)]
if data[i]['doublets_ind'].size > 0:
doublet_times = np.concatenate(data[i]['sp_times'][data[i]['doublets_ind']])
else:
doublet_times = np.empty(0)
nonCS_times = np.sort(np.concatenate((single_times, doublet_times)))
t_nonCS = prepare_eta_times(nonCS_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
if len(data[i]['CS_ind']) > 0:
CS_times = data[i]['sp_times'][np.concatenate(data[i]['CS_ind'])]
else:
CS_times = np.empty(0)
t_CS = prepare_eta_times(CS_times,
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
data[i][states[l]['id']+'_CS_spikes'] = t_CS
data[i][states[l]['id']+'_nonCS_spikes'] = t_nonCS
# for each event in each cell, calculate the CS rate and CS index
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_rate = np.full(data[i][ntl[l]+'_start'].size, np.nan)
CS_perc = np.full(data[i][ntl[l]+'_start'].size, np.nan)
for j in np.arange(data[i][ntl[l]+'_start'].size):
start = data[i][ntl[l]+'_start'][j]
stop = data[i][ntl[l]+'_stop'][j]
num_spikes = np.sum(np.logical_and(data[i]['spike_times'] > start,
data[i]['spike_times'] < stop))
if len(data[i]['CS_ind']) > 0:
CS_spike_times = data[i]['sp_times'][np.concatenate(data[i]['CS_ind'])]
num_CS_spikes = np.sum(np.logical_and(CS_spike_times > start,
CS_spike_times < stop))
num_CS = np.sum(np.logical_and(data[i]['CS_start'] > start,
data[i]['CS_start'] < stop))
else:
num_CS_spikes = 0
num_CS = 0
CS_perc[j] = num_CS_spikes/num_spikes
CS_rate[j] = num_CS/(stop-start)
data[i][ntl[l]+'_CS_rate'] = CS_rate
data[i][ntl[l]+'_CS_perc'] = CS_perc
# %% event-based organization for dVm vs dFR
# make a dictionary to hold values collapsed over all cells
events = [{} for k in np.arange(len(states))]
# find Vm0, dVm and significance for each run, excluding when Ih is changed
for l in np.arange(len(states)):
all_c_p = np.empty(0)
all_Ih = np.empty(0)
all_Vm0 = np.empty(0)
all_dVm = np.empty(0)
all_dVm_p = np.empty(0)
for i in np.arange(len(data)):
samp_freq = 1/(data[i]['Vm_ds_ts'][1] - data[i]['Vm_ds_ts'][0])
num_ind = int(states[l]['samp_time']*samp_freq)
# find index of dIh_times
dIh_ind = data[i]['dIh_times']*samp_freq
dIh_ind = dIh_ind.astype(int)
c_p = np.zeros(data[i][states[l]['id']+'_start'].size)
Ih = np.zeros(data[i][states[l]['id']+'_start'].size)
Vm0 = np.zeros(data[i][states[l]['id']+'_start'].size)
dVm = np.zeros(data[i][states[l]['id']+'_start'].size)
dVm_p = np.zeros(data[i][states[l]['id']+'_start'].size)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
# find indices
bef_ind = int(np.sum(data[i]['Vm_ds_ts'] <
(data[i][states[l]['id']+'_start'][j] + states[l]['bef'])))
aft_ind = int(np.sum(data[i]['Vm_ds_ts'] <
(data[i][states[l]['id']+'_start'][j] + states[l]['aft'])))
# put nan if times are straddling a time when dIh is changed
dIh_true = np.where((dIh_ind > bef_ind) & (dIh_ind < aft_ind + num_ind))[0]
if dIh_true.size > 0:
Ih[j] = np.nan
Vm0[j] = np.nan
dVm[j] = np.nan
dVm_p[j] = np.nan
else:
if np.logical_or(l==0, l==1):
c_p[j] = data[i][states[l]['id']+'_cell_p']
else:
c_p[j] = data[i]['theta_cell_p']
Ih_ind = np.searchsorted(data[i]['Vm_Ih_ts'],
data[i][states[l]['id']+'_start'][j])
Ih[j] = data[i]['Vm_Ih'][Ih_ind]
# test whether Vm values are significantly different
# Welch's t-test: normal, unequal variances, independent samp
t, p = stats.ttest_ind(data[i]['Vm_ds'][bef_ind:bef_ind+num_ind],
data[i]['Vm_ds'][aft_ind:aft_ind+num_ind],
equal_var=False, nan_policy='omit')
dVm_p[j] = p
if (np.nanmean(data[i]['Vm_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmean(data[i]['Vm_ds'][bef_ind:bef_ind+num_ind])) > 0:
Vm0[j] = np.nanmin(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind])
dVm[j] = (np.nanmax(data[i]['Vm_s_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmin(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind]))
else:
Vm0[j] = np.nanmax(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind])
dVm[j] = (np.nanmin(data[i]['Vm_s_ds'][aft_ind:aft_ind+num_ind]) -
np.nanmax(data[i]['Vm_s_ds'][bef_ind:bef_ind+num_ind]))
data[i][states[l]['id']+'_c_p'] = c_p
data[i][states[l]['id']+'_Ih'] = Ih
data[i][states[l]['id']+'_Vm0'] = Vm0
data[i][states[l]['id']+'_dVm'] = dVm
data[i][states[l]['id']+'_dVm_p'] = dVm_p
all_c_p = np.append(all_c_p, c_p)
all_Ih = np.append(all_Ih, Ih)
all_Vm0 = np.append(all_Vm0, Vm0)
all_dVm = np.append(all_dVm, dVm)
all_dVm_p = np.append(all_dVm_p, dVm_p)
events[l]['c_p'] = all_c_p
events[l]['Ih'] = all_Ih
events[l]['Vm0'] = all_Vm0
events[l]['dVm'] = all_dVm
events[l]['dVm_p'] = all_dVm_p
# add windows triggered by start of some brain states
for l in np.arange(len(states)):
for i in np.arange(len(data)):
t_Vm, t_ts = prepare_eta(data[i]['Vm_s_ds'], data[i]['Vm_ds_ts'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
t_sp = prepare_eta_times(data[i]['sp_times'],
data[i][states[l]['id']+'_start'],
states[l]['t_win'])
data[i][states[l]['id']+'_Vm'] = t_Vm
data[i][states[l]['id']+'_sp'] = t_sp
states[l]['t_ts'] = t_ts
# add triggered windows to event dictionary
for l in np.arange(len(events)):
raster_sp = []
psth_sp = np.empty(0)
Vm = np.empty((states[l]['t_ts'].shape[0], 0))
duration = np.empty(0)
cell_id = np.empty(0)
for i in np.arange(len(data)):
cell_psth_sp = np.empty(0)
if data[i][states[l]['id']+'_start'].size > 0:
Vm = np.append(Vm, data[i][states[l]['id']+'_Vm'], axis=1)
duration = np.append(duration, (data[i][states[l]['id']+'_stop'] -
data[i][states[l]['id']+'_start']))
if isinstance(data[i]['cell_id'], str):
ind = data[i]['cell_id'].find('_')
cell_int = int(data[i]['cell_id'][:ind])*np.ones(data[i][states[l]['id']+'_start'].size)
cell_id = np.append(cell_id, cell_int)
else:
cell_int = data[i]['cell_id']*np.ones(data[i][states[l]['id']+'_start'].size)
cell_id = np.append(cell_id, cell_int)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
psth_sp = np.append(psth_sp, data[i][states[l]['id']+'_sp'][j])
cell_psth_sp = np.append(cell_psth_sp, data[i][states[l]['id']+'_sp'][j])
raster_sp.append(data[i][states[l]['id']+'_sp'][j])
data[i][states[l]['id']+'_psth_sp'] = cell_psth_sp
# remove nans
no_nan = np.logical_and([~np.isnan(Vm).any(axis=0)],
[~np.isnan(events[l]['Vm0'])]).flatten()
events[l]['Vm'] = Vm[:, no_nan]
events[l]['cell_id'] = cell_id[no_nan]
events[l]['duration'] = duration[no_nan]
events[l]['raster_sp'] = list(compress(raster_sp, no_nan))
events[l]['c_p'] = events[l]['c_p'][no_nan]
events[l]['Ih'] = events[l]['Ih'][no_nan]
events[l]['Vm0'] = events[l]['Vm0'][no_nan]
events[l]['dVm'] = events[l]['dVm'][no_nan]
events[l]['dVm_p'] = events[l]['dVm_p'][no_nan]
# %% process data - for CS/burst analysis
# for each (true) spike, determine which state it occurs in (and those in no state)
# Version: all spikes, not just those used for spike threshold analysis
for i in np.arange(len(data)):
nost_sp = np.ones(data[i]['spike_times'].size, dtype=bool)
for l in np.arange(len(states)):
state_sp = np.zeros(data[i]['spike_times'].size, dtype=bool)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
# find the spikes that occur in that event
temp_bool = np.all((data[i]['spike_times'] > data[i][states[l]['id']+'_start'][j],
data[i]['spike_times'] < data[i][states[l]['id']+'_stop'][j]),
axis=0)
state_sp = state_sp + temp_bool
data[i][states[l]['id']+'_spike_bool'] = np.squeeze(state_sp)
nost_sp = nost_sp*[state_sp == False]
data[i]['nost_spike_bool'] = np.squeeze(nost_sp)
# for each burst, determine which state it occurs in (and those in no state)
for i in np.arange(len(data)):
burst_start = np.array([d[0] for d in data[i]['bursts']])
nost_bst = np.ones(burst_start.size, dtype=bool)
for l in np.arange(len(states)):
state_bst = np.zeros(burst_start.size, dtype=bool)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
# find the bursts that start during that event
temp_bool = np.all((burst_start > data[i][states[l]['id']+'_start'][j],
burst_start < data[i][states[l]['id']+'_stop'][j]),
axis=0)
state_bst = state_bst + temp_bool
data[i][states[l]['id']+'_bst_bool'] = np.squeeze(state_bst)
nost_bst = nost_bst*[state_bst == False]
data[i]['nost_bst_bool'] = np.squeeze(nost_bst)
# for each cell, determine the % of spikes in bursts for theta, LIA, nost
ntl = ['nost', 'theta', 'LIA']
for i in np.arange(len(data)):
burst_perc = np.full(3, np.nan)
sp_times = data[i]['spike_times']
if len(data[i]['bursts']) > 0:
burst_times = np.concatenate(data[i]['bursts'])
else:
burst_times = 0
for l in np.arange(len(ntl)):
total_spikes = 0
burst_spikes = 0
for j in np.arange(data[i][ntl[l]+'_start'].size):
start = data[i][ntl[l]+'_start'][j]
stop = data[i][ntl[l]+'_stop'][j]
spikes = np.sum(np.logical_and(sp_times > start, sp_times < stop))
bursts = np.sum(np.logical_and(burst_times > start, burst_times < stop))
total_spikes = total_spikes + spikes
burst_spikes = burst_spikes + bursts
if total_spikes != 0:
burst_perc[l] = burst_spikes/total_spikes
data[i]['burst_perc'] = burst_perc
# for each CS, determine which state it occurs in (and those in no state)
for i in np.arange(len(data)):
nost_CS = np.ones(data[i]['CS_start'].size, dtype=bool)
for l in np.arange(len(states)):
state_CS = np.zeros(data[i]['CS_start'].size, dtype=bool)
for j in np.arange(data[i][states[l]['id']+'_start'].size):
# find the bursts that start during that event
temp_bool = np.all((data[i]['CS_start'] > data[i][states[l]['id']+'_start'][j],
data[i]['CS_start'] < data[i][states[l]['id']+'_stop'][j]),
axis=0)
state_CS = state_CS + temp_bool
data[i][states[l]['id']+'_CS_bool'] = np.squeeze(state_CS)
nost_CS = nost_CS*[state_CS == False]
data[i]['nost_CS_bool'] = np.squeeze(nost_CS)
# collect the CS features divided by state
keep_cells = np.where([isinstance(d['cell_id'], int) for d in data])[0]
CS_ntl = [{} for l in np.arange(len(ntl))]
for l in np.arange(len(ntl)):
num_sp = np.empty(0)
CS_dur = np.empty(0)
CS_height_Vm = np.empty(0)
CS_rel_ahp_Vm = np.empty(0)
for k in np.arange(keep_cells.size):
i = keep_cells[k]
num_sp = np.append(num_sp, np.array([d.size for d in data[i]['CS_ind']])[data[i][ntl[l]+'_CS_bool']])
CS_dur = np.append(CS_dur, (data[i]['CS_stop'] - data[i]['CS_start'])[data[i][ntl[l]+'_CS_bool']])
CS_height_Vm = np.append(CS_height_Vm, (data[i]['CS_max_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']]))
CS_rel_ahp_Vm = np.append(CS_rel_ahp_Vm, (data[i]['CS_stop_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']]))
CS_ntl[l]['num_sp'] = num_sp
CS_ntl[l]['CS_dur'] = CS_dur
CS_ntl[l]['CS_height_Vm'] = CS_height_Vm
CS_ntl[l]['CS_rel_ahp_Vm'] = CS_rel_ahp_Vm
# %% set figure parameters
# set colors
# states
c_run_theta = [0.398, 0.668, 0.547]
c_nonrun_theta = [0.777, 0.844, 0.773]
c_LIA = [0.863, 0.734, 0.582]
# response type
c_hyp = [0.184, 0.285, 0.430]
c_dep = [0.629, 0.121, 0.047]
c_no = [1, 1, 1]
c_lhyp = [0.62, 0.71, 0.84]
c_ldep = [0.97, 0.71, 0.67]
# dependent variables
c_sp = [0.398, 0.461, 0.703]
c_Vm = [0.398, 0.461, 0.703]
# other
c_lgry = [0.75, 0.75, 0.75]
c_mgry = [0.5, 0.5, 0.5]
c_dgry = [0.25, 0.25, 0.25]
c_wht = [1, 1, 1]
c_blk = [0, 0, 0]
c_bwn = [0.340, 0.242, 0.125]
c_lbwn = [0.645, 0.484, 0.394]
c_grn = [0.148, 0.360, 0.000]
c_dVm = [c_hyp, c_mgry, c_dep]
c_state = [c_mgry, c_run_theta, c_lbwn]
c_state_dark = [c_dgry, c_grn, c_bwn]
c_tl = [c_run_theta, c_lbwn]
c_tnl = [c_run_theta, c_blk, c_lbwn]
# set style defaults
mpl.rcParams['font.size'] = 8
mpl.rcParams['savefig.dpi'] = 1200
mpl.rcParams['lines.linewidth'] = 1.5
mpl.rcParams['font.sans-serif'] = "Arial"
mpl.rcParams['font.family'] = "sans-serif"
mpl.rcParams['axes.spines.right'] = False
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['axes.linewidth'] = 1
mpl.rcParams['xtick.major.size'] = 4
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['ytick.major.size'] = 4
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['boxplot.whiskerprops.linestyle'] = '-'
mpl.rcParams['patch.force_edgecolor'] = True
mpl.rcParams['patch.facecolor'] = 'b'
# set figure output folder
fig_folder = r'C:\Users\akees\Documents\Ashley\Figures\2020-05_Paper_MIND1\FigS3'
# set which states to plot
## all states
#d_l = [0, 1, 2]
# theta only
d_l = [0, 1]
## LIA only
#d_l = [0, 2]
# %% make hist isi figure
keep_cells = [isinstance(d['cell_id'], int) for d in data]
theta_cell_p = np.array([d['theta_cell_p'] for d in data])[keep_cells]
LIA_cell_p = np.array([d['LIA_cell_p'] for d in data])[keep_cells]
c_state_hist = [c_mgry, c_grn, c_bwn]
c_state_fill = [c_lgry, c_run_theta, c_lbwn]
# prep numbers for mean hist isi - divided between states
bins = np.arange(0, 200, 1)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
H[i, :, l] = np.histogram(1000*data[i]['isi'][data[i][ntl[l]+'_spike_bool']],
bins=bins, density=True)[0]
# remove extra recordings from cells
H = H[keep_cells, :, :]
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist isi
fig, ax = plt.subplots(1, figsize=[4.5, 2.2])
for l in d_l:
ax.plot(bins[:-1], H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(bins[:-1], H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
#ax.axvline(6, color=c_blk, linestyle='--')
#ax.set_xlim([0, 200])
ax.set_ylim([0, 0.27])
ax.set_yticks([0, 0.05, 0.1, 0.15, 0.2, 0.25])
ax.set_yticklabels([0, '', 0.1, '', 0.2, ''])
ax.set_ylabel('proportion')
ax.set_xscale('log')
ax.set_xlim([1, 100])
ax.set_xlabel('inter-spike interval (ms)')
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_hist_isi.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
isi = 1000*data[i]['isi'][data[i][ntl[l]+'_spike_bool']]
if isi.size > 10:
S[i, l] = np.nanmedian(isi)
# remove extra recordings from cells
S = S[keep_cells, :]
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make CS features figures - number of spikes per CS
keep_cells = [isinstance(d['cell_id'], int) for d in data]
theta_cell_p = np.array([d['theta_cell_p'] for d in data])[keep_cells]
LIA_cell_p = np.array([d['LIA_cell_p'] for d in data])[keep_cells]
c_state_hist = [c_mgry, c_grn, c_bwn]
c_state_fill = [c_lgry, c_run_theta, c_lbwn]
# prep numbers for mean hist of # spikes in CS - divided between states
bins = np.arange(0.5, 51.5, 1)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
num_sp = np.array([d.size for d in data[i]['CS_ind']])[data[i][ntl[l]+'_CS_bool']]
H[i, :, l] = np.histogram(num_sp, bins=bins)[0]
# normalize to total number of CS
H[i, :, l] = H[i, :, l]/np.sum(H[i, :, l])
# remove extra recordings from cells
H = H[keep_cells, :, :]
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist isi
# create a figure with axes of defined size
fig = plt.figure(figsize=[2, 2])
# The first items are for padding and the second items are for the axes.
# sizes are in inch.
h = [Size.Fixed(0.5), Size.Fixed(1.2)]
v = [Size.Fixed(0.5), Size.Fixed(1.2)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
#for l in np.arange(len(ntl)):
for l in d_l:
ax.plot(np.arange(1, bins.size), H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(np.arange(1, bins.size), H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
ax.set_ylim([0, 0.4])
ax.set_yticks([0, 0.2, 0.4])
ax.set_yticklabels([0, '', 0.4])
ax.set_xlim([3, 12])
ax.set_xticks([3, 6, 9, 12])
ax.set_xlabel('number of spikes')
ax.set_ylabel('proportion')
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_num_spikes.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
num_sp = np.array([d.size for d in data[i]['CS_ind']])[data[i][ntl[l]+'_CS_bool']]
if num_sp.size > 0:
#S[i, l] = stats.mode(num_sp)[0]
S[i, l] = np.nanmedian(num_sp)
# remove extra recordings from cells
S = S[keep_cells, :]
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make CS features figures - CS duration
# prep numbers for mean hist CS duration - divided between states
bins = np.arange(0, 0.5, 0.02)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_dur = (data[i]['CS_stop'] - data[i]['CS_start'])[data[i][ntl[l]+'_CS_bool']]
H[i, :, l] = np.histogram(CS_dur, bins=bins)[0]
# normalize to total number of CS
H[i, :, l] = H[i, :, l]/np.sum(H[i, :, l])
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist CS duration
# create a figure with axes of defined size
fig = plt.figure(figsize=[2, 2])
# The first items are for padding and the second items are for the axes.
# sizes are in inch.
h = [Size.Fixed(0.5), Size.Fixed(1.2)]
v = [Size.Fixed(0.5), Size.Fixed(1.2)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
for l in d_l:
ax.plot(bins[:-1], H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(bins[:-1], H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
ax.set_xlim([0, 0.2])
ax.set_ylim([0, 0.5])
ax.set_xticks([0, 0.1, 0.2])
ax.set_yticks([0, 0.25, 0.5])
ax.set_yticklabels([0, '', 0.5])
ax.set_xlabel('duration (ms)')
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_dur.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_dur = 1000*(data[i]['CS_stop'] - data[i]['CS_start'])[data[i][ntl[l]+'_CS_bool']]
if CS_dur.size > 0:
S[i, l] = np.nanmedian(CS_dur)
# remove extra recordings from cells
S = S[keep_cells, :]
## do the kruskall-wallace
#H, p_kw = stats.kruskal(S[:, 0], S[:, 1], S[:, 2], nan_policy='omit')
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA nondep cells
dif = S[:, 2][LIA_cell_p < 0.95] - S[:, 0][LIA_cell_p < 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make CS features figures - subthreshold depolarization during CS
# prep numbers for mean hist CS max-start Vm - divided between states
bins = np.arange(0, 40, 2)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_height_Vm = (data[i]['CS_max_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']])
H[i, :, l] = np.histogram(CS_height_Vm, bins=bins)[0]
# normalize to total number of CS
H[i, :, l] = H[i, :, l]/np.sum(H[i, :, l])
# remove extra recordings from cells
H = H[keep_cells, :, :]
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist isi
# create a figure with axes of defined size
fig = plt.figure(figsize=[2, 2])
# The first items are for padding and the second items are for the axes.
# sizes are in inch.
h = [Size.Fixed(0.5), Size.Fixed(1.2)]
v = [Size.Fixed(0.5), Size.Fixed(1.2)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
for l in d_l:
ax.plot(bins[:-1], H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(bins[:-1], H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
#ax.set_xlabel('CS height (mV)')
#ax.set_ylabel('proportion of CS')
ax.set_xlim([0, 35])
ax.set_xticks([0, 10, 20, 30])
ax.set_xlabel('subthreshold depolarization (mV)')
ax.set_ylim([0, 0.3])
ax.set_yticks([0, 0.1, 0.2, 0.3])
ax.set_yticklabels([0, '', '', 0.3])
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_height.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_height_Vm = (data[i]['CS_max_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']])
if CS_height_Vm.size > 0:
S[i, l] = np.nanmedian(CS_height_Vm)
# remove extra recordings from cells
S = S[keep_cells, :]
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
## LIA nondep cells
#dif = S[:, 2][LIA_cell_p < 0.95] - S[:, 0][LIA_cell_p < 0.95]
## remove nans
#dif = dif[~np.isnan(dif)]
#d, p = boot_pair_t(dif, num_b)
#print(dif.size)
#print(d)
#print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make CS features figures - after-CS hyperpolarization
# prep numbers for mean hist CS relative ahp - divided between states
bins = np.arange(-25, 10, 2)
H = np.full([len(data), bins.size-1, len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_rel_ahp_Vm = (data[i]['CS_stop_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']])
H[i, :, l] = np.histogram(CS_rel_ahp_Vm, bins=bins)[0]
# normalize to total number of CS
H[i, :, l] = H[i, :, l]/np.sum(H[i, :, l])
# remove extra recordings from cells
H = H[keep_cells, :, :]
# define the 95% CI for each bin by randomly selecting (with replacement) over cells
H_mean = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_high = np.full([H.shape[1], H.shape[2]], np.nan)
H_CI_low = np.full([H.shape[1], H.shape[2]], np.nan)
CI_perc = 95
num_b = 1000
for l in np.arange(len(ntl)):
real_H, CI_high, CI_low = CI_avg_hist(H[:, :, l], num_b, CI_perc)
H_mean[:, l] = real_H
H_CI_high[:, l] = CI_high
H_CI_low[:, l] = CI_low
# plot the mean hist isi
# create a figure with axes of defined size
fig = plt.figure(figsize=[2, 2])
# The first items are for padding and the second items are for the axes.
# sizes are in inch.
h = [Size.Fixed(0.5), Size.Fixed(1.2)]
v = [Size.Fixed(0.5), Size.Fixed(1.2)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
for l in d_l:
ax.plot(bins[:-1], H_mean[:, l], color=c_state_hist[l], zorder=2)
ax.fill_between(bins[:-1], H_CI_low[:, l], H_CI_high[:, l],
facecolor=c_state_fill[l], linewidth=0, zorder=1, alpha=0.25)
#ax.set_xlabel('CS relative afterhyperpolarization (mV)')
#ax.set_ylabel('proportion of CS')
ax.set_xlim([-20, 3])
ax.set_xticks([-20, -10, 0])
ax.set_xlabel('hyperpolarization (mV)')
ax.set_ylim([0, 0.5])
ax.set_yticks([0, 0.25, 0.5])
ax.set_yticklabels([0, '', 0.5])
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_ahp.png'), transparent=True)
# do the stats for the above figure
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
CS_rel_ahp_Vm = (data[i]['CS_stop_Vm'][data[i][ntl[l]+'_CS_bool']] -
data[i]['CS_start_Vm'][data[i][ntl[l]+'_CS_bool']])
if CS_rel_ahp_Vm.size > 0:
S[i, l] = np.nanmedian(CS_rel_ahp_Vm)
# remove extra recordings from cells
S = S[keep_cells, :]
## do the friedman test (nonparametric repeated measures anova)
## remove cells that have any nans
#S_nonan = S[np.all(~np.isnan(S), axis=1), :]
#X2, p_fried = stats.friedmanchisquare(S_nonan[:, 0], S_nonan[:, 1], S_nonan[:, 2])
#X2, p_fried = stats.friedmanchisquare(S[:, 0], S[:, 1], S[:, 2])
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# theta nonhyp cells
dif = dif = S[:, 1][theta_cell_p > 0.05] - S[:, 0][theta_cell_p > 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
#%% CS-based stats for the average histograms
measure = 'num_sp'
measure = 'CS_dur'
measure = 'CS_height_Vm'
measure = 'CS_rel_ahp_Vm'
num_b = 1000
g0 = CS_ntl[0][measure]
g1 = CS_ntl[1][measure]
g2 = CS_ntl[2][measure]
groups_list = [g0, g1, g2]
real_F, p_boot = boot_anova(groups_list, num_b)
# try the stats test again with a kruskal-wallace (nonparametric 1-way anova)
H, p_kw = stats.kruskal(g0, g1, g2, nan_policy='omit')
# do the pairwise t-tests
boot_t(g0, g1, 1000)
boot_t(g0, g2, 1000)
boot_t(g1, g2, 1000)
# do the 2-sample Kolmogorov–Smirnov test (good for bimodal distributions?)
stats.ks_2samp(g0, g1)
stats.ks_2samp(g0, g2)
stats.ks_2samp(g1, g2)
# some numbers from the histogram
l = 1
CS_ntl[l][measure].size
np.nanmedian(CS_ntl[l][measure])
np.nanstd(CS_ntl[l][measure])
MADAM(CS_ntl[l][measure], np.nanmedian(CS_ntl[l][measure]))
# %% make CS rate and index figures
keep_cells = [isinstance(d['cell_id'], int) for d in data]
theta_cell_p = np.array([d['theta_cell_p'] for d in data])[keep_cells]
LIA_cell_p = np.array([d['LIA_cell_p'] for d in data])[keep_cells]
## find which cells have a significant change - boot
#anova_cells = np.full(len(data), np.nan)
#t_boot_cells = np.full([len(data), len(states)], np.nan)
#real_d_cells = np.full([len(data), len(states)], np.nan)
#num_b = 1000
#for i in np.arange(len(data)):
# groups_list = [data[i]['nost_CS_rate'], data[i]['theta_CS_rate'],
# data[i]['LIA_CS_rate']]
# real_F, anova_cells[i] = boot_anova(groups_list, num_b)
# # if the anova is significant, do the adhoc stats
# if anova_cells[i] < 0.05:
# for l in np.arange(len(states)):
# real_d_cells[i, l], t_boot_cells[i, l] = boot_t(groups_list[0], groups_list[l+1], num_b)
## remove extra recordings
#anova_cells = anova_cells[keep_cells]
#t_boot_cells = t_boot_cells[keep_cells, :]
#real_d_cells = real_d_cells[keep_cells, :]
# find which cells have a significant change - nonparametric stats
p_kw = np.full(len(data), np.nan)
p_mw = np.full([len(data), len(states)], np.nan)
num_b = 1000
for i in np.arange(len(data)):
groups_list = [data[i]['nost_CS_rate'], data[i]['theta_CS_rate'],
data[i]['LIA_CS_rate']]
try:
H, p_kw[i] = stats.kruskal(groups_list[0], groups_list[1], groups_list[2],
nan_policy='omit')
except ValueError:
p_kw[i] = np.nan
# if the anova is significant, do the adhoc stats
if p_kw[i] < 0.05:
for l in np.arange(len(states)):
U, p_mw[i, l] = stats.mannwhitneyu(groups_list[0], groups_list[l+1],
alternative='two-sided')
# remove extra recordings
p_kw = p_kw[keep_cells]
p_mw = p_mw[keep_cells, :]
# each cells' average frequency of CS during theta, LIA, and no state
# Version: theta and LIA separate
# prep numbers
# only take first recording from each cell
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
num_CS = np.sum([data[i][ntl[l]+'_CS_bool']])
total_time = np.sum(data[i][ntl[l]+'_stop'] - data[i][ntl[l]+'_start'])
S[i, l] = num_CS/total_time
# remove extra recordings from cells
S = S[keep_cells, :]
# plot the stack plot for cell values for each state
fig, ax = plt.subplots(1, figsize=[2.3, 2])
line_x = np.array([1.75, 3.25])
bar_x = np.array([1, 4])
y = S[:, d_l]
for i in np.arange(y.shape[0]):
ax.plot(line_x, y[i, :], color=c_lgry, zorder=1)
if d_l == [0, 1]:
if theta_cell_p[i] < 0.05:
ax.plot(line_x, y[i, :], color=rgb2hex(c_hyp), zorder=2)
if theta_cell_p[i] > 0.95:
ax.plot(line_x, y[i, :], color=rgb2hex(c_dep), zorder=2)
elif d_l == [0, 2]:
if LIA_cell_p[i] < 0.05:
ax.plot(line_x, y[i, :], color=rgb2hex(c_hyp), zorder=2)
if LIA_cell_p[i] > 0.95:
ax.plot(line_x, y[i, :], color=rgb2hex(c_dep), zorder=2)
for l in np.arange(y.shape[1]):
# remove nans
no_nan = y[:, l]
no_nan = no_nan[~np.isnan(no_nan)]
bp = ax.boxplot(no_nan, sym='', patch_artist=True,
whis=[5, 95], widths=0.75, positions=[bar_x[l]])
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=c_state[d_l[l]], linewidth=1.5)
for patch in bp['boxes']:
patch.set(facecolor=c_wht)
ax.set_xticks(bar_x)
ax.xaxis.set_tick_params(length=0)
ax.set_xticklabels(['unlabeled', 'theta'])
ax.set_yticks([0, 0.2, 0.4, 0.6, 0.8])
ax.set_yticklabels([0, '', 0.4, '', 0.8])
ax.set_ylabel('Cs rate (Hz)')
ax.set_xlim([0, bar_x[1]+1])
ax.spines['bottom'].set_visible(False)
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_rate.png'), transparent=True)
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# find which cells have a significant change - nonparametric stats
p_kw = np.full(len(data), np.nan)
p_mw = np.full([len(data), len(states)], np.nan)
num_b = 1000
for i in np.arange(len(data)):
groups_list = [data[i]['nost_CS_perc'], data[i]['theta_CS_perc'],
data[i]['LIA_CS_perc']]
# do the kruskall-wallace if not all the CS_perc values are nan
if ~np.all(np.isnan(np.concatenate(groups_list))):
try:
H, p_kw[i] = stats.kruskal(groups_list[0], groups_list[1], groups_list[2],
nan_policy='omit')
except ValueError:
p_kw[i] = np.nan
# if the anova is significant, do the adhoc stats
if p_kw[i] < 0.05:
for l in np.arange(len(states)):
# remove nans before running the test
g0 = groups_list[0]
g0 = g0[~np.isnan(g0)]
g1 = groups_list[l+1]
g1 = g1[~np.isnan(g1)]
U, p_mw[i, l] = stats.mannwhitneyu(g0, g1,
alternative='two-sided')
# remove extra recordings
p_kw = p_kw[keep_cells]
p_mw = p_mw[keep_cells, :]
# %% CS index
# each cells' CS index during theta, LIA, and no state
# Version: theta and LIA separate
# prep numbers
# only take first recording from each cell
S = np.full([len(data), len(ntl)], np.nan)
for i in np.arange(len(data)):
for l in np.arange(len(ntl)):
num_CS_spikes = np.sum(np.array([c.size for c in data[i]['CS_ind']])[data[i][ntl[l]+'_CS_bool']])
total_spikes = np.sum([data[i][ntl[l]+'_spike_bool']])
CS_perc = num_CS_spikes/total_spikes
if CS_perc > 1:
CS_perc = 1
S[i, l] = CS_perc
# remove extra recordings from cells
S = S[keep_cells, :]
# plot the stack plot for cell values for each state
fig, ax = plt.subplots(1, figsize=[2.3, 2])
line_x = np.array([1.75, 3.25])
bar_x = np.array([1, 4])
y = S[:, d_l]
for i in np.arange(y.shape[0]):
ax.plot(line_x, y[i, :], color=c_lgry, zorder=1)
if d_l == [0, 1]:
if theta_cell_p[i] < 0.05:
ax.plot(line_x, y[i, :], color=rgb2hex(c_hyp), zorder=2)
if theta_cell_p[i] > 0.95:
ax.plot(line_x, y[i, :], color=rgb2hex(c_dep), zorder=2)
elif d_l == [0, 2]:
if LIA_cell_p[i] < 0.05:
ax.plot(line_x, y[i, :], color=rgb2hex(c_hyp), zorder=2)
if LIA_cell_p[i] > 0.95:
ax.plot(line_x, y[i, :], color=rgb2hex(c_dep), zorder=2)
for l in np.arange(y.shape[1]):
# remove nans
no_nan = y[:, l]
no_nan = no_nan[~np.isnan(no_nan)]
bp = ax.boxplot(no_nan, sym='', patch_artist=True,
whis=[5, 95], widths=0.75, positions=[bar_x[l]])
for element in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(bp[element], color=c_state[d_l[l]], linewidth=1.5)
for patch in bp['boxes']:
patch.set(facecolor=c_wht)
ax.set_xticks(bar_x)
ax.xaxis.set_tick_params(length=0)
ax.set_xticklabels(['unlabeled', 'theta'])
ax.set_yticks([0, 0.25, 0.5, 0.75, 1])
ax.set_yticklabels([0, '', 0.5, '', 1])
ax.set_ylabel('Cs index')
ax.set_xlim([0, bar_x[1]+1])
ax.spines['bottom'].set_visible(False)
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, ntl[d_l[-1]]+'_CS_index.png'), transparent=True)
# do the paired boot stats
num_b = 1000
p = np.full(len(ntl) - 1, np.nan)
d = np.full(len(ntl) - 1, np.nan)
for l in np.arange(len(ntl) - 1):
dif = S[:, l+1] - S[:, 0]
# remove nans
dif = dif[~np.isnan(dif)]
d[l], p[l] = boot_pair_t(dif, num_b)
print(d)
print(p)
# do the paired boot stats for theta hyp and LIA dep cells only
num_b = 1000
# theta hyp cells
dif = S[:, 1][theta_cell_p < 0.05] - S[:, 0][theta_cell_p < 0.05]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# LIA dep cells
dif = S[:, 2][LIA_cell_p > 0.95] - S[:, 0][LIA_cell_p > 0.95]
# remove nans
dif = dif[~np.isnan(dif)]
d, p = boot_pair_t(dif, num_b)
print(dif.size)
print(d)
print(p)
# descriptive numbers
l = 1
np.sum(~np.isnan(S[:, l]))
np.nanmedian(S[:, l])
np.nanstd(S[:, l])
MADAM(S[:, l], np.nanmedian(S[:, l]))
# %% make figures - dCSI vs dVm
l = 0
state='theta'
# Event-based correlation between dVm and change in CS index
unique_cells = [isinstance(d['cell_id'], int) for d in data]
fig, ax = plt.subplots(1, figsize=[2.25, 2.25])
n = 0
for i in np.arange(len(data)):
for j in np.arange(data[i][state+'_start'].size):
x = data[i][state+'_dVm'][j]
z = data[i][state+'_dVm_p'][j]
# calculate the CS index in the before window
CS_bef = np.logical_and(data[i][state+'_CS_spikes'][j] > states[l]['bef'],
data[i][state+'_CS_spikes'][j] < states[l]['bef'] + states[l]['samp_time'])
CS_bef = np.sum(CS_bef)
nonCS_bef = np.logical_and(data[i][state+'_nonCS_spikes'][j] > states[l]['bef'],
data[i][state+'_nonCS_spikes'][j] < states[l]['bef'] + states[l]['samp_time'])
nonCS_bef = np.sum(nonCS_bef)
CSindex_bef = CS_bef/(CS_bef+nonCS_bef)
# calculate the CS index in the after window
CS_aft = np.logical_and(data[i][state+'_CS_spikes'][j] > states[l]['aft'],
data[i][state+'_CS_spikes'][j] < states[l]['aft'] + states[l]['samp_time'])
CS_aft = np.sum(CS_aft)
nonCS_aft = np.logical_and(data[i][state+'_nonCS_spikes'][j] > states[l]['aft'],
data[i][state+'_nonCS_spikes'][j] < states[l]['aft'] + states[l]['samp_time'])
nonCS_aft = np.sum(nonCS_aft)
CSindex_aft = CS_aft/(CS_aft+nonCS_aft)
if np.logical_and(CS_bef+nonCS_bef == 0, CS_aft+nonCS_aft == 0):
y = np.nan
else:
y = CSindex_aft-CSindex_bef
if np.isnan(y) == False:
n = n+1
if z > 0.05:
ax.scatter(x, y, s=5, facecolors='none', edgecolors=c_mgry, alpha=1, zorder=1)
elif x < 0:
ax.scatter(x, y, s=5, facecolors=c_lhyp, edgecolors=c_lhyp, alpha=1, zorder=2)
elif x > 0:
ax.scatter(x, y, s=5, facecolors=c_ldep, edgecolors=c_ldep, alpha=1, zorder=2)
ax.axhline(0, linestyle='--', color=c_blk, zorder=1)
ax.axvline(0, linestyle='--', color=c_blk, zorder=1)
ax.set_ylim([-1.1, 1.1])
ax.set_xlim([-18, 18])
# cell-based dVm vs change in CS index
# prep numbers for dVm
all_dVm = np.array([d[state+'_mean_dVm'] for d in data])[[isinstance(d['cell_id'], int) for d in data]]
all_cell_p = np.array([d[state+'_cell_p'] for d in data])[[isinstance(d['cell_id'], int) for d in data]]
keep_cells = np.logical_or(np.isnan(all_dVm), np.isnan(all_cell_p))==0
all_dVm = all_dVm[keep_cells]
all_cell_p = all_cell_p[keep_cells]
cell_hyp_sig = all_dVm[all_cell_p < 0.05]
cell_hyp_no = all_dVm[(all_dVm < 0) & (all_cell_p >= 0.05)]
cell_dep_sig = all_dVm[all_cell_p > 0.95]
cell_dep_no = all_dVm[(all_dVm > 0) & (all_cell_p <= 0.95)]
# prep number for CS index
dCSI = np.full(len(data), np.nan)
for i in np.arange(len(data)):
dCSI_cell = np.full(data[i][state+'_start'].size, np.nan)
for j in np.arange(data[i][state+'_start'].size):
# calculate the CS index in the before window
CS_bef = np.logical_and(data[i][state+'_CS_spikes'][j] > states[l]['bef'],
data[i][state+'_CS_spikes'][j] < states[l]['bef'] + states[l]['samp_time'])
CS_bef = np.sum(CS_bef)
nonCS_bef = np.logical_and(data[i][state+'_nonCS_spikes'][j] > states[l]['bef'],
data[i][state+'_nonCS_spikes'][j] < states[l]['bef'] + states[l]['samp_time'])
nonCS_bef = np.sum(nonCS_bef)
CSindex_bef = CS_bef/(CS_bef+nonCS_bef)
# calculate the CS index in the after window
CS_aft = np.logical_and(data[i][state+'_CS_spikes'][j] > states[l]['aft'],
data[i][state+'_CS_spikes'][j] < states[l]['aft'] + states[l]['samp_time'])
CS_aft = np.sum(CS_aft)
nonCS_aft = np.logical_and(data[i][state+'_nonCS_spikes'][j] > states[l]['aft'],
data[i][state+'_nonCS_spikes'][j] < states[l]['aft'] + states[l]['samp_time'])
nonCS_aft = np.sum(nonCS_aft)
CSindex_aft = CS_aft/(CS_aft+nonCS_aft)
if np.logical_and(CS_bef+nonCS_bef == 0, CS_aft+nonCS_aft == 0):
dCSI_cell[j] = np.nan
else:
dCSI_cell[j] = CSindex_aft-CSindex_bef
dCSI[i] = np.nanmean(dCSI_cell)
dCSI = dCSI[unique_cells]
dCSI = dCSI[keep_cells]
cell_hyp_sig_dCSI = dCSI[all_cell_p < 0.05]
cell_hyp_no_dCSI = dCSI[(all_dVm < 0) & (all_cell_p >= 0.05)]
cell_dep_sig_dCSI = dCSI[all_cell_p > 0.95]
cell_dep_no_dCSI = dCSI[(all_dVm > 0) & (all_cell_p <= 0.95)]
# add the cell dots on top
s_cell = 20
ax.scatter(cell_hyp_sig, cell_hyp_sig_dCSI, s=s_cell, facecolors=c_hyp,
edgecolors=c_blk, zorder=3, alpha=1)
ax.scatter(cell_hyp_no, cell_hyp_no_dCSI, s=s_cell, facecolors='none',
edgecolors=c_blk, zorder=3, alpha=1)
ax.scatter(cell_dep_sig, cell_dep_sig_dCSI, s=s_cell, facecolors=rgb2hex(c_dep),
edgecolors=rgb2hex(c_blk), zorder=3, alpha=1)
ax.scatter(cell_dep_no, cell_dep_no_dCSI, s=s_cell, facecolors='none',
edgecolors=c_blk, zorder=3, alpha=1)
ax.set_xlabel(r'$\Delta$'+' Vm (mV)')
ax.set_ylabel(r'$\Delta$'+' CS index')
fig.tight_layout()
plt.savefig(os.path.join(fig_folder, 'dCSI_vs_dVm_'+state+'.png'), transparent=True)
|
Ashkees/Malezieux_CellRep_2020
|
figure_scripts/Malezieux_CellRep_FigS3.py
|
Malezieux_CellRep_FigS3.py
|
py
| 63,803 |
python
|
en
|
code
| 2 |
github-code
|
6
|
41465348149
|
from commands import *
from pyfiglet import Figlet
from datetime import timedelta
fterm = f = Figlet(font="term", justify="center", width=Console.width())
# fonts = Str.nl(File.read("pyfiglet_fonts.txt").strip())
def now():
return Time.datetime()
ends = []
for arg in OS.args[1:]:
ends.append(arg)
if not ends:
print("No times given, exit")
endtimes = []
last_endtime = Time.datetime(year=1970)
for end in ends:
end = Str.get_integers(end)
new_endtime = Time.datetime(hour = end[0], minute = end[1], second = 0)
while True:
if new_endtime < last_endtime:
new_endtime = new_endtime + timedelta(days=1)
else:
break
endtimes.append(new_endtime)
last_endtime = new_endtime
#debug
# for endtime in endtimes:
# print(endtime)
#debug END
endtimes.sort()
#debug
# print()
# for endtime in endtimes:
# print(endtime)
#debug END
# cnt = Json("time_until_cnt.json")
# if not isinstance(cnt.string, int):
# cnt.string = 0
while True:
Console.clean()
rebuild = True
for endtime in endtimes:
time = now()
# Print.debug(f"{endtime=}")
# Print.debug(f"{time=}")
if endtime < time:
continue
rebuild = False
seconds = int((endtime-time).total_seconds())
# Print.debug(f"{seconds=}")
human_readable = Time.human_readable(seconds)
# Print.debug(f"{human_readable=}")
# font = Random.item(fonts)
# try:
# font = fonts[cnt.string]
# except IndexError:
# cnt.string = 0
# font = fonts[cnt.string]
# font = "minecraft"
font = "minecraft_condenced"
# cnt.string += 1
# cnt.save()
# print(fterm.renderText(f"{font} {cnt.string}/{len(fonts)}"))
# print(fterm.renderText(f"{font}"))
f = Figlet(font=font, justify="center", width=Console.width())
until = f"{endtime.hour:02}:{endtime.minute:02}"
if endtime.day != time.day:
until = f"{endtime.day:02}.{endtime.month:02} {until}"
print(f.renderText(f"{human_readable} until {until}").rstrip())
# if seconds <= 0:
# Console.blink()
# break
if rebuild:
for cnt, endtime in enumerate(endtimes):
endtimes[cnt] = endtime + Time.delta(24*3600)
# OS.exit(1)
Time.sleep(1)
|
egigoka/test
|
time_until.py
|
time_until.py
|
py
| 2,474 |
python
|
en
|
code
| 2 |
github-code
|
6
|
43242806041
|
from preggy import expect
from tornado.testing import gen_test
from tests.base import TestCase
from thumbor.config import Config
from thumbor.context import Context
from thumbor.importer import Importer
class HealthcheckHandlerTestCase(TestCase):
@gen_test
async def test_can_get_healthcheck(self):
response = await self.async_get("/healthcheck")
expect(response.code).to_equal(200)
expect(response.body).to_equal("WORKING")
expect(response.headers.get("Cache-Control")).to_equal("no-cache")
@gen_test
async def test_can_head_healthcheck(self):
response = await self.async_fetch("/healthcheck", method="HEAD")
expect(response.code).to_equal(200)
expect(response.headers.get("Cache-Control")).to_equal("no-cache")
# Same test, but configured for the root URL
class HealthcheckOnRootTestCase(TestCase):
def get_context(self):
cfg = Config()
cfg.HEALTHCHECK_ROUTE = "/"
importer = Importer(cfg)
importer.import_modules()
return Context(None, cfg, importer)
@gen_test
async def test_can_get_healthcheck(self):
response = await self.async_get("/")
expect(response.code).to_equal(200)
expect(response.body).to_equal("WORKING")
@gen_test
async def test_can_head_healthcheck(self):
response = await self.async_fetch("/", method="HEAD")
expect(response.code).to_equal(200)
|
thumbor/thumbor
|
tests/handlers/test_healthcheck.py
|
test_healthcheck.py
|
py
| 1,446 |
python
|
en
|
code
| 9,707 |
github-code
|
6
|
17940251781
|
from sklearn.linear_model import LogisticRegression
import numpy as np
def logistic(train_feature_dir, train_label_dir, test_feature_dir, test_label_dir):
train_feature = np.load(train_feature_dir)
train_label = np.load(train_label_dir)
test_feature = np.load(test_feature_dir)
test_label = np.load(test_label_dir)
train_feature = np.reshape(train_feature, (len(train_feature), 1, 1, 3584))
test_feature = np.reshape(test_feature, (len(test_feature), 1, 1, 3584))
new_train_feature = []
for i in range(len(train_feature)):
new_train_feature.append(train_feature[i][0][0])
new_test_feature = []
for i in range(len(test_feature)):
new_test_feature.append(test_feature[i][0][0])
lr = LogisticRegression(C=1000.0, random_state=0) # use the default parameters
lr.fit(new_train_feature, train_label)
pre_label = lr.predict_proba(new_test_feature)
_pre_label = []
for i in range(len(test_feature)):
_pre_label.append(pre_label[i][1])
return np.array(_pre_label), test_label
|
jingmouren/antifraud
|
antifraud/methods/LR.py
|
LR.py
|
py
| 1,063 |
python
|
en
|
code
| 0 |
github-code
|
6
|
854022654
|
from PyQt4 import QtCore, QtGui
from dat import PipelineInformation
from dat.gui.overlays import Overlay
from dat.vistrail_data import VistrailManager
from dat.vistrails_interface import get_plot_modules
from vistrails.core.modules.module_registry import get_module_registry, \
ModuleRegistryException
from vistrails.gui.ports_pane import PortsList, PortItem
class PlotConfigOverlay(Overlay):
"""Base class for high level plot editors
Must implement setup(self, cell, plot), which is called
when the widget is shown.
"""
def setup(self, cell, plot):
raise NotImplementedError
class DefaultPlotConfigOverlay(PlotConfigOverlay):
"""Default widget for editing 'advanced' plot settings.
Shows PortList widget for each module in plot. If the module has an
advanced editor, that is shown instead.
"""
def __init__(self, cellcontainer):
Overlay.__init__(self, cellcontainer, False)
self.setSizePolicy(QtGui.QSizePolicy.Ignored,
QtGui.QSizePolicy.Ignored)
# Create tab widget
self.tabWidget = QtGui.QTabWidget()
# Create buttons
btnApply = QtGui.QPushButton("&Apply")
btnOk = QtGui.QPushButton("O&k")
btnReset = QtGui.QPushButton("&Reset")
# Connect buttons
btnApply.clicked.connect(self.applyClicked)
btnOk.clicked.connect(self.okClicked)
btnReset.clicked.connect(self.resetClicked)
# Add buttons to layout
layoutButtons = QtGui.QHBoxLayout()
layoutButtons.addWidget(btnReset)
layoutButtons.addStretch()
layoutButtons.addWidget(btnApply)
layoutButtons.addWidget(btnOk)
# Add tabwidget above buttons
vLayout = QtGui.QVBoxLayout()
vLayout.addWidget(self.tabWidget)
vLayout.addLayout(layoutButtons)
self.setLayout(vLayout)
self.cell = None
self.plot = None
def setup(self, cell, plot):
self.cell = cell
self.plot = plot
# Get pipeline of the cell
mngr = VistrailManager(cell._controller)
pipelineInfo = mngr.get_pipeline(cell.cellInfo)
# Clear old tabs
self.tabWidget.clear()
# Get all of the plot modules in the pipeline
plot_modules = get_plot_modules(
pipelineInfo,
cell._controller.current_pipeline)
registry = get_module_registry()
getter = registry.get_configuration_widget
for module in plot_modules:
widgetType = None
widget = None
# Try to get custom config widget for the module
try:
widgetType = \
getter(module.package, module.name, module.namespace)
except ModuleRegistryException:
pass
if widgetType:
# Use custom widget
widget = widgetType(module, cell._controller)
self.connect(widget, QtCore.SIGNAL("doneConfigure"),
self.configureDone)
self.connect(widget, QtCore.SIGNAL("stateChanged"),
self.stateChanged)
else:
# Use PortsList widget, only if module has ports
widget = DATPortsList(self)
widget.update_module(module)
if len(widget.port_spec_items) > 0:
widget.set_controller(cell._controller)
else:
widget = None
# Add widget in new tab
if widget:
self.tabWidget.addTab(widget, module.name)
def stateChanged(self):
pass
def configureDone(self):
pass
def applyClicked(self):
self.okClicked()
# Bring this overlay back up
self.cell._set_overlay(DefaultPlotConfigOverlay)
mngr = VistrailManager(self.cell._controller)
pipeline = mngr.get_pipeline(self.cell.cellInfo)
self.cell._overlay.setup(self.cell, pipeline.recipe.plot)
def okClicked(self):
mngr = VistrailManager(self.cell._controller)
pipeline = mngr.get_pipeline(self.cell.cellInfo)
if pipeline.version != self.cell._controller.current_version:
new_pipeline = PipelineInformation(
self.cell._controller.current_version,
pipeline.recipe,
pipeline.conn_map,
pipeline.port_map)
mngr.created_pipeline(self.cell.cellInfo, new_pipeline)
self.cell.update_pipeline(True)
else:
self.cell._set_overlay(None)
def resetClicked(self):
mngr = VistrailManager(self.cell._controller)
pipeline = mngr.get_pipeline(self.cell.cellInfo)
if pipeline.version != self.cell._controller.current_version:
self.cell._controller.change_selected_version(pipeline.version)
currentTabIndex = self.tabWidget.currentIndex()
self.setup(self.cell, self.plot)
self.tabWidget.setCurrentIndex(currentTabIndex)
class DATPortItem(PortItem):
def build_item(self, port_spec, is_connected, is_optional, is_visible,
is_editable=False):
PortItem.build_item(self, port_spec, is_connected,
is_optional, is_visible, is_editable)
self.setIcon(0, PortItem.null_icon)
self.setIcon(1, PortItem.null_icon)
class DATPortsList(PortsList):
""" Only input ports of constant type that aren't connected show up.
Visibility and linked columns are removed
"""
def __init__(self, parent=None):
PortsList.__init__(self, "input", parent)
def include_port(self, port_spec):
"""Determines whether or not a port should show up in this list.
"""
connected = port_spec.name in self.module.connected_input_ports
constant = get_module_registry().is_method(port_spec)
return not connected and constant
def create_port_item(self, port_spec, is_connected, is_optional,
is_visible, is_editable, parent=None):
"""Creates the port item
"""
return PortItem(port_spec, is_connected, True, False, False, parent)
# Override visible_clicked to prevent changing this
def visible_clicked(self, item):
pass
|
VisTrails/DAT
|
dat/gui/overlays/plot_config.py
|
plot_config.py
|
py
| 6,358 |
python
|
en
|
code
| 3 |
github-code
|
6
|
7954786176
|
import KeyHelper
import PictureHelper
import DESUnit
import pickle
def decrypt():
decrypt_keys = KeyHelper.keySelect()
print(len(decrypt_keys))
file = open("key_sequence.txt", "rb")
key_sequence = pickle.load(file)
file.close()
image_name = "en_2.bmp"
image_data = PictureHelper.read_picture(image_name)
image_data = PictureHelper.format_data(image_data)
decryption_image = []
print("processing...")
for i in range(0, len(image_data) - 1):
key = decrypt_keys[key_sequence[i] - 1]
#print("i = " + str(i))
#print("key_sequece[i] = " + str(key_sequence[i]))
des = DESUnit.DESUnit(key, "default")
des.compute_key()
decryption_image.append(des.action("decrypt", image_data[i]))
decryption_image.append(image_data[-1])
PictureHelper.reshape(decryption_image, "de_" + image_name)
if __name__ == "__main__":
decrypt()
|
2014zhouyou/ImageSharing
|
Decrypt.py
|
Decrypt.py
|
py
| 918 |
python
|
en
|
code
| 5 |
github-code
|
6
|
71168709947
|
import os
import subprocess
from django.conf import settings
from django.utils.dateparse import parse_date
from rest_framework import status
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.generics import ListCreateAPIView, RetrieveDestroyAPIView
from .models import ConvertedModel
from .serializers import ConvertedModelSerializer
class ConvertedModelListCreateView(ListCreateAPIView):
serializer_class = ConvertedModelSerializer
permission_classes = [IsAuthenticated]
pagination_class = LimitOffsetPagination
def get_queryset(self):
queryset = ConvertedModel.objects.filter(user=self.request.user)
start_date = self.request.query_params.get('start_date')
end_date = self.request.query_params.get('end_date')
if start_date:
start_date = parse_date(start_date)
queryset = queryset.filter(timestamp__gte=start_date)
if end_date:
end_date = parse_date(end_date)
queryset = queryset.filter(timestamp__lte=end_date)
return queryset
def perform_create(self, serializer):
original_file = self.request.FILES.get('original_file')
if not original_file:
return Response("No file provided", status=status.HTTP_400_BAD_REQUEST)
# Создание экземпляра модели напрямую сохранит файл в нужном месте
converted_model = ConvertedModel.objects.create(
user=self.request.user,
original_file=original_file
)
# Получение абсолютного пути к файлу
original_file_path = converted_model.original_file.path
try:
# Конвертация файла
converted_files_dir = os.path.join(settings.MEDIA_ROOT, 'converted_files')
os.makedirs(converted_files_dir, exist_ok=True)
output_file_name = os.path.splitext(original_file.name)[0] + '.glb'
output_file_path = os.path.join(converted_files_dir, output_file_name)
blender_executable = "blender"
blender_script = os.path.join('fileconverter', 'blender_script.py')
try:
subprocess.run([blender_executable, "--background", "--python", blender_script, original_file_path,
output_file_path], check=True)
except subprocess.CalledProcessError:
print(f"...")
if not os.path.exists(output_file_path):
raise Exception(f"Converted file not found: {output_file_path}")
# Обновление экземпляра модели с конвертированным файлом
converted_model.converted_file = 'converted_files/' + output_file_name
converted_model.converted_filename = output_file_name
converted_model.save()
except Exception as e:
converted_model.delete() # Удаляем запись при неудачной конвертации
return Response(str(e), status=status.HTTP_500_INTERNAL_SERVER_ERROR)
serializer.instance = converted_model
class ConvertedModelDetailView(RetrieveDestroyAPIView):
queryset = ConvertedModel.objects.all()
serializer_class = ConvertedModelSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
return self.queryset.filter(user=self.request.user)
|
maxcrimea/ARSY
|
web/fileconverter/views.py
|
views.py
|
py
| 3,589 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29325676672
|
import numpy as np
import warnings
import sys
import time
import random
import nna
from primitives import *
import copy
import math
LIMIT = 100
warnings.filterwarnings('ignore')
def opt(cycle, sw1, sw2):
rev = cycle[sw1:sw2+1]
rev.reverse()
cycle = cycle[:sw1]+rev+cycle[sw2+1:]
return cycle
def simulated_annealing(cycle, dists):
now_cycle = best_cycle = copy.copy(cycle)
now_v = best_v = score(cycle, dists)
t_st, t_fin = 50, 10
n = len(cycle)
move = 0
st = time.perf_counter()
while time.perf_counter() - st < LIMIT:
sw1 = random.randrange(0, n-1)
sw2 = random.randrange(sw1+1, n)
sw1m = sw1-1 if sw1 else n-1
sw2p = sw2+1 if sw2 != n-1 else 0
if not ((sw1 == 0 and sw2 == n-1) or sw2 - sw1 == 1):
diff = dists[cycle[sw1]][cycle[sw2p]] + dists[cycle[sw2]][cycle[sw1m]] - dists[cycle[sw1]][cycle[sw1m]] - dists[cycle[sw2]][cycle[sw2p]]
else:
diff = 0
temp = t_st + (t_fin - t_st) * (time.perf_counter() - st) / LIMIT
prob = 1 - np.exp(diff / temp)
if prob > random.random():
now_cycle = opt(cycle, sw1, sw2)
now_v = score(now_cycle, dists)
move += 1
return best_cycle, move
def main(instance):
with open("instances/"+instance+".tsp") as inputfile:
try:
indexofpoint = read_points(inputfile)
except FileNotFoundError as err:
print(err)
points = np.array(list(indexofpoint.keys()))
distance_matrix = compute_distance_matrix(points)
cycle = nna.nearest_neighbor(points, distance_matrix)
st = time.perf_counter()
cycle, move = simulated_annealing(cycle, distance_matrix)
en = time.perf_counter()
return points, cycle, score(cycle, distance_matrix), en-st, move
if __name__ == "__main__":
# input
basename, extname = sys.argv[-1].split(".")
if extname == "tsp":
points, ans, sc, com_t = main(basename)
print(basename, ans)
print("score", sc)
elif extname == "txt":
with open("instances/"+basename+".txt") as inputfile:
try:
for line in inputfile.read().splitlines():
points, ans, sc, com_t = main(line)
print(line, ans)
print("score:", sc)
except FileNotFoundError as err:
print(err)
|
ichiro-ss/algo_eng
|
const_algo/sa.py
|
sa.py
|
py
| 2,404 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18425266806
|
# from flask import Flask
from flask import Flask, render_template, request, redirect, url_for, session
import pymysql.cursors
import json
import pickle
from flask import jsonify
import sklearn
from flask_sqlalchemy import model
# Loading in the training models that we will be using later
bike_model = pickle.load(open('bikes_model.pkl', 'rb'))
stands_model = pickle.load(open('stands_model.pkl','rb'))
app = Flask(__name__)
# MySQL configuration
app.config['MYSQL_HOST'] = 'dbbikes.cvvkn6jkfbdf.eu-west-1.rds.amazonaws.com'
app.config['MYSQL_USER'] = 'SEGroup25'
app.config['MYSQL_PASSWORD'] = 'SEGroup25?'
app.config['MYSQL_DB'] = 'dbbikes'
app.config['MYSQL_PORT'] = 3306
# create a connection to the database
conn = pymysql.connect(
host=app.config['MYSQL_HOST'],
user=app.config['MYSQL_USER'],
password=app.config['MYSQL_PASSWORD'],
db=app.config['MYSQL_DB'],
port=app.config['MYSQL_PORT'],
cursorclass=pymysql.cursors.DictCursor
)
# Creating the route for the main page the user will access
@app.route('/')
def index():
# cursor that will execute the SQL query to the database
cursor = conn.cursor()
# Selecting the last 117 entries from the availability table
cursor.execute('SELECT * FROM availability ORDER BY last_update DESC LIMIT 117')
availability_results = cursor.fetchall()
# Selecting all the information from the stations table
cursor.execute('SELECT * FROM stations')
location_results = cursor.fetchall()
# Debugging code print statements to make sure the query executed successfully
print(availability_results)
print(location_results)
# Gathering station coordinate, name, and number values
locations = []
for location in location_results:
latitude = location['position_lat']
longitude = location['position_long']
name = location['stat_name']
number = location['number']
# Gathering available bike values and available bike stand values
bikes_available = None
for availability in availability_results:
if availability['number'] == location['number']:
bikes_available = availability['available_bikes']
bike_stands_available = availability['available_bike_stands']
break
# Adding each to the list locations
locations.append((latitude, longitude, bikes_available, name, bike_stands_available,number))
# Returning all information which will be used in conjunction with adding markers to a map and displaying window information
return render_template('index.html', API_KEY='AIzaSyCmEmTVXz4FLSsTM3JME9J3VW-WXECqmKw', locations=locations)
# Route for when the user wants to route from one station to another
@app.route('/mapping.html')
def map():
# Exact same code as the the previous route
cursor = conn.cursor()
cursor.execute('SELECT * FROM availability ORDER BY last_update DESC LIMIT 117')
availability_results = cursor.fetchall()
# Getting the locations
cursor.execute('SELECT * FROM stations')
location_results = cursor.fetchall()
print(availability_results)
print(location_results)
# extracting all the lat and long values
locations = []
for location in location_results:
latitude = location['position_lat']
longitude = location['position_long']
name = location['stat_name']
number = location['number']
bikes_available = None
for availability in availability_results:
if availability['number'] == location['number']:
bikes_available = availability['available_bikes']
bike_stands_available = availability['available_bike_stands']
break
locations.append((latitude, longitude, bikes_available, name, bike_stands_available,number))
return render_template('mapping.html', API_KEY='AIzaSyCmEmTVXz4FLSsTM3JME9J3VW-WXECqmKw', locations=locations)
# Route that will return the news portion of the site
@app.route('/news.html')
def news():
return render_template('news.html')
# Route that will return the how-to portion of the site
@app.route('/how-to.html')
def howto():
return render_template('how-to.html')
@app.route('/availability/<int:station_id>')
def predict_bikes(station_id):
from datetime import datetime
today = datetime.today()
dow,month = today.weekday(),today.month
predict_array = []
json_dict = {}
for h in range(24):
predict_array.append([station_id,month,h,0,99,99,99,99,dow])
results = bike_model.predict(predict_array).tolist()
for index,bikes in enumerate(results):
json_dict[index] = bikes
return json.dumps(json_dict)
@app.route('/standsavailability/<int:stand_id>')
def predict_stands(stand_id):
from datetime import datetime
today = datetime.today()
dow,month = today.weekday(),today.month
predict_array = []
json_dict = {}
for h in range(24):
predict_array.append([stand_id,month,h,0,99,99,99,99,dow])
results = stands_model.predict(predict_array).tolist()
for index,stands in enumerate(results):
json_dict[index] = stands
return json.dumps(json_dict)
# Start the application
if __name__ == "__main__":
app.run(host ="0.0.0.0", port =8080, debug = True)
|
Winnie901/Software-Engineering-Project-Git5
|
app.py
|
app.py
|
py
| 5,366 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41681774832
|
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
m=2
# 初始化隶属度矩阵U
def Initial_U(sample_num, cluster_n):
# sample_num为样本个数, cluster_n为分类数
U = np.random.rand(cluster_n,sample_num)
# 对 U 按列求和,然后取倒数
col_sum = np.sum(U, axis=0)
col_sum = 1 / col_sum
# 确保 U 的每列和为1
U = np.multiply(U, col_sum)
return U
# 计算类中心
def Cen_Iter( data, U, cluster_n):
# 初始化中心点
center = np.zeros(cluster_n)
for i in range(0, cluster_n):
# 根据迭代公式进行计算
u_ij_m = U[i, :] ** m
sum_u = np.sum(u_ij_m)
# 矩阵乘法
ux = np.dot(u_ij_m, data)
center[i] = ux / sum_u
return center
# 更新隶属度矩阵
def U_Iter(data,U, c):
cluster_n,sample_num = U.shape
for i in range(0, cluster_n):
for j in range(0,sample_num):
sum = 0
# 根据隶属度矩阵迭代公式计算
for k in range(0, cluster_n):
temp = (np.linalg.norm(data[j] - c[i]) /
np.linalg.norm(data[j] - c[k])) ** (2 / (m - 1))
sum = temp + sum
U[i, j] = 1 / sum
return U
def FCM(img_path,cluster_n=5,iter_num=10): # 迭代次数默认为10
# 读入灰度图像
start = time.time()
img=cv2.imread(img_path,0)
# 将图片拉成一列
data=img.reshape(img.shape[0]*img.shape[1],1)
print("开始聚类")
sample_num = len(data)
# 初始化隶属度矩阵U
U = Initial_U(sample_num, cluster_n)
for i in range(0, iter_num):
C = Cen_Iter(data, U, cluster_n)
U = U_Iter(data,U, C)
print("第%d次迭代" % (i + 1), end="")
print("聚类中心", C)
# 分类标签
label = np.argmax(U, axis=0)
# 最后的类中心矩阵
center = C
print("聚类完成,开始生成图片")
# 根据聚类结果和聚类中心构建新图像
new_img=center[label]
# 矩阵转成原来图片的形状
new_img=np.reshape(new_img,img.shape)
# 要变成图像得数据得转换成uint8
new_img=new_img.astype('uint8')
plt.subplot(121)
plt.imshow(img, cmap="gray")
plt.title("原图")
plt.axis('off')
plt.subplot(122)
plt.imshow(new_img, cmap="gray")
plt.title("FCM,%d个聚类中心"%cluster_n)
plt.axis('off')
end = time.time()
print("循环运行时间:%.2f秒" % (end - start))
plt.show()
plt.imshow(new_img, cmap="gray")
plt.axis('off')
plt.savefig('FCM_Baboon')
FCM("photo2.png",cluster_n=4)
|
LAS1520/Image-Processing
|
final pj/codes/FCM.py
|
FCM.py
|
py
| 2,715 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71745856507
|
# AC12
import time
import os
# Parte 1
def fib(n):
if n < 2:
return n
return fib(n - 2) + fib(n - 1)
def read_file(path):
with open(path, 'rb') as file:
all_size = os.path.getsize(path)
process = 0
print("{0: <9d}|{1: ^9d}|{2: ^12d}|{3: >8.2f}%|{4: ^1.8f}".format(
all_size, 0, all_size, 0.00, time.clock()))
output = open("salida.pdf", "wb")
i = 1
while True:
to_write = file.read(fib(i))
if to_write == b"":
break
output.write(bytearray(to_write)[::-1])
i += 1
process += len(to_write)
print("{0: <9d}|{1: ^9d}|{2: ^12d}|{3: >8.2f}%|{4: ^1.8f}".format(
all_size,
process, all_size - process,
(process / all_size) * 100, time.clock()))
output.close()
return process, i - 1
def divisores(numero):
i=1
lista=list()
while i <= numero:
if numero % i == 0:
lista.append(i)
i+=1
return lista
def numeros_abundantes(numero):
abundantes = list()
i = 0
while numero > len(abundantes):
lista = divisores(i)
suma=0
for k in lista:
suma+=k
if suma > 2*i:
abundantes.append(i)
i+=1
return abundantes[numero-1]
def parte_dos(path):
with open(path, "rb") as file:
total=bytearray(file.read())
all_size = len(total)
suma=0
for i in range(os.path.getsize(path)-1):
#print("i",i)
suma+=total[i]
#caracter=i.decode("UTF-8")
# suma+=ord(caracter)
byte_total=bytearray()
for i in range(os.path.getsize(path) - 1):
# print("i",i)
byte_original=(total[i]+suma) % 256
byte_total.extend(bytearray(byte_original))
#with open("resultado","a") as result:
# result.write(byte_original.encode())
archivo_uno=bytearray()
archivo_dos=bytearray()
procesados=0
num_ab=1
print("{0: <9d}|{1: ^9d}|{2: ^12d}|{3: >8.2f}%|{4: ^1.8f}".format(
all_size, 0, all_size, 0.00, time.clock()))
while procesados < len(byte_total):
archivo_uno.extend(byte_total[:numeros_abundantes(num_ab)])
archivo_dos.extend(byte_total[:numeros_abundantes(num_ab)])
num_ab+=1
procesados+=numeros_abundantes(num_ab)
print("{0: <9d}|{1: ^9d}|{2: ^12d}|{3: >8.2f}%|{4: ^1.8f}".format(
all_size,
procesados, all_size - procesados,
(procesados / all_size) * 100, time.clock()))
if (procesados / all_size) * 100 > 100:
break
with open('archivo_1.mp3', 'wb') as arc_uno:
arc_uno.write(bytearray(archivo_uno))
with open('archivo_2.gif', 'wb') as arc_uno:
arc_uno.write(bytearray(archivo_uno))
return procesados, num_ab
if __name__ == "__main__":
print("PARTE I")
print("{0:9s}|{1:^9s}|{2:^12s}|{3:^9s}|{4:^12s}".format("TOTAL", "PROCESADO",
"SINPROCESAR",
"PERCENT",
"DELTATIME"))
process_i, iter_i = read_file("Archivo1")
print("\nPARTE II")
print(
"{0:9s}|{1:^9s}|{2:^12s}|{3:^9s}|{4:^12s}".format("TOTAL", "PROCESADO",
"SINPROCESAR",
"PERCENT",
"DELTATIME"))
process_ii, iter_ii = parte_dos("salida.pdf")
# resumen
print("\n\n{0:5s}|{1:^13s}|{2:^13s}".format("PARTE", "PROCESADOS",
"ITERACIONES"))
print("{0:5s}|{1:^13d}|{2:^13d}".format("I", process_i, iter_i))
print("{0:5s}|{1:^13d}|{2:^13d}".format("II", process_ii, iter_ii))
|
isidoravs/iic2233-2016-2
|
Actividades/AC12/AC12.py
|
AC12.py
|
py
| 4,099 |
python
|
es
|
code
| 0 |
github-code
|
6
|
72784212027
|
# https://www.codewars.com/kata/558c04ecda7fb8f48b000075
def same(arr_a, arr_b):
if len(arr_a) != len(arr_b):
return False
for a, b in zip(arr_a, arr_b):
a.sort()
b.sort()
arr_a.sort()
arr_b.sort()
for a, b in zip(arr_a, arr_b):
if a != b:
return False
else:
return True
# clever: sorted(map(sorted, arr_a)) == sorted(map(sorted, arr_b))
|
blzzua/codewars
|
6-kyu/same_array.py
|
same_array.py
|
py
| 420 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.