code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
import gzip
from Bio import SeqIO
from pathlib import Path
import os
import subprocess
import tarfile
from io import BytesIO
#for parallel computing
from joblib import Parallel, delayed
import multiprocessing
num_cores_energy = multiprocessing.cpu_count()
from tqdm import tqdm
import pandas as pd
import sys
valid_aa = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y','-']
aa_3= ['ALA','CYS','ASP','GLU','PHE','GLY','HIS','ILE','LYS','LEU','MET','ASN','PRO','GLN','ARG','SER','THR','VAL','TRP','TYR','-']
d_aa_num= {a:i for i,a in enumerate(valid_aa)}
d_3to1 = {a3:a1 for a3,a1 in zip(aa_3,valid_aa)}
def read_dca_par(path_h_DCA, path_J_DCA):
''' read compressed DCA file '''
tar = tarfile.open(path_h_DCA, "r:gz")
for member in tar.getmembers():
f = tar.extractfile(member)
if f is not None:
content = f.read()
load_bytes = BytesIO(content)
h = np.load(load_bytes)
tar = tarfile.open(path_J_DCA, "r:gz")
for member in tar.getmembers():
f = tar.extractfile(member)
if f is not None:
content = f.read()
load_bytes = BytesIO(content)
J = np.load(load_bytes)
return h,J
def compute_sm_energy_dict(seq, h ,J):
''' for SINGLE MUTANTS, return a dictionary d['idx', 'mutated_aa'] = energy - energy_wild_type '''
''' it can be VERY SLOW and d_sm BIG(all possible sm ~ 21*L) '''
''' see below to speed it up '''
E0 = compute_energy(seq,h,J)
d_sm = {}
for i in range(0, len(seq)):
print(i, len(seq))
#add also the gap
for aa in valid_aa:
new_seq = seq[:i] + aa + seq[(i+1):]
E = compute_energy(new_seq,h,J)
print(E)
d_sm[i,aa] = np.round(E-E0,4)
return d_sm
def compute_sm_energy(seq, h ,J, idx, aa ):
''' for SINGLE MUTANTS, given the ref_seq,h,J and idx(pos_mutations) aa(mutated_aa)
return energy_sum_single_mutants - energy_wild_type '''
E0 = compute_energy(seq,h,J)
E_sum_sm = 0
for i,a_i in zip(idx, aa):
new_seq = seq[:i] + a_i + seq[(i+1):]
E = compute_energy(new_seq,h,J)
E_sum_sm += E
return np.round(E_sum_sm,4)
def compute_energy(seq, h, J, parallel = False):
if all_standard_aa(seq):
if(parallel == True):
#DO NOT USE FOR NOW!!!
#something weird... E_parallel != E_non_parallel
# parallel actually slower than non parallel (execution time limited by memory access and not processor time??)
E = 0
all_ei = Parallel(n_jobs=num_cores_energy)(delayed(compute_energy_given_ai)(seq, h, J, idx_ai) for idx_ai in range(0,len(seq)))
E = np.sum(all_ei)
return E
if(parallel == False):
E = 0
for idx_aa1 in range(0, len(seq)):
aa1 = seq[idx_aa1]
E -= h[d_aa_num[aa1], idx_aa1]
for idx_aa2 in range(idx_aa1+1, len(seq)):
aa2 = seq[idx_aa2]
E -= J[d_aa_num[aa1], d_aa_num[aa2], idx_aa1, idx_aa2]
return E
def compute_energy_given_ai(seq,h,J, idx_ai):
'''e.g. idx_ai=1; computing E_1 = h_1 + J_12 + J_13 etc. (good for parallelization)'''
ai = seq[idx_ai]
#print("**", idx_ai, ai)
ei = h[d_aa_num[ai], idx_ai]
for idx_aj in range(idx_ai+1, len(seq)):
aj = seq[idx_aj]
#print(idx_aj, aj)
ei -= J[d_aa_num[ai], d_aa_num[aj], idx_ai, idx_aj]
return ei
def compute_entropy_context_ind(path_msa):
''' compute context-independent entropy (from msa)'''
fi = compute_freq(path_msa)
S = compute_entropy_from_freq(fi)
return S
def compute_entropy_from_freq(fi, remove_gaps = True, base2 = True):
if remove_gaps:
fi = (fi[:20,:])/np.sum(fi[:20,:], axis = 0)
qq, N = fi.shape
S = []
for i in range(0,N):
si = 0
for q in range(0,qq):
si -= fi[q,i]*np.log(fi[q,i])
if base2:
si /= np.log(2)
S.append(si)
return S
def compute_entropy_context_dep(ref_seq, h,J ):
''' compute context-DEPENDENT entropy (from hhblits ref_seq, h, J)'''
q, N = h.shape
fi_plm = np.zeros(h.shape)
#same conventions than in Eq.5.8 (PhD thesis)
for i in range(0,N):
#compute denominator
denom = 0
for b in range(0,q):
arg_denom = h[b,i]
for j in range(0,N):
if(j!=i):
aj = d_aa_num[ref_seq[j]]
arg_denom += J[b, aj ,i, j]
denom += np.exp(arg_denom)
# compute numerator
for ai in range(0,q):
arg_num = h[ai,i]
for j in range(0,N):
if(j!=i):
aj = d_aa_num[ref_seq[j]]
arg_num += J[ai, aj ,i, j]
num = np.exp(arg_num)
fi_plm[ai,i] = num/denom
#return the entropy
S = compute_entropy_from_freq(fi_plm)
return S
def compute_num_gap(seq):
'''return the number of gaps in a sequence '''
num_gap = 0
for _,char in enumerate(seq):
if(char == '-'):
num_gap += 1
return num_gap
def compute_gap_fraction(seq):
num_gap = compute_num_gap(seq)
frac_gap = (num_gap+0.0)/len(seq)
return frac_gap
def compute_diff(ref_seq, seq):
''' compute the mutations between two strings, return idx_mut, aa_first_seq(wt), aa_second_seq(mutant)'''
vec_idx = []
vec_aa1 = []
vec_aa2 = []
for idx, aa in enumerate(zip(ref_seq,seq)):
aa1 = aa[0]
aa2 = aa[1]
if (aa1.lower() != aa2.lower()):
vec_idx.append(idx)
vec_aa1.append(aa1)
vec_aa2.append(aa2)
return vec_idx, vec_aa1, vec_aa2
def compute_dist(ref_seq, seq):
distance = sum([1 for x, y in zip(ref_seq, seq) if x.lower() != y.lower()])
return distance
def compute_dist_excluding_gaps(ref_seq, seq):
# distance = sum([1 for x, y in zip(ref_seq, seq) if ( x.lower() != y.lower() or x == '-' or y == '-' )])
distance = 0
for x, y in zip(ref_seq, seq):
if x == '-':
continue
elif y == '-':
continue
elif x.lower() != y.lower():
distance += 1
return distance
def compute_seqid(ref_seq, seq):
'''return the sequence identity (seqid) '''
distance = compute_dist_excluding_gaps(ref_seq,seq)
distance /= len(seq)
seqid = 1 - distance
return seqid
def compute_freq(path_msa):
''' compute single point frequencies of an MSA '''
records_msa = list(SeqIO.parse(open(path_msa,'r'), "fasta"))
fi = np.zeros(( len(d_aa_num), len(records_msa[0].seq) ))
for idx_rec, rec in enumerate(records_msa):
seq = rec.seq
for idx_aa, amino_a in enumerate(seq):
fi[d_aa_num[amino_a], idx_aa] += 1
#add (small) pseudocount to take into account 0 frequencies (0*log(0))
alpha = 0.0001
fi = (1-alpha)*fi + alpha/2
#normalize
fi /= fi.sum(axis = 0)
return fi
def all_standard_aa(seq):
'''return True if sequence contains only standard-aa'''
for char in seq:
if((char not in valid_aa) and char !='-'):
#print("seq containing non standard aa: "+char)
return False
break
return True
def split_proteome(path_ref_proteome, name_ref_proteome, tmp_path):
''' simple function to split the reference proteome in reference proteins'''
with open(os.path.join(path_ref_proteome, name_ref_proteome), "r") as input_handle:
for record_ref in SeqIO.parse(input_handle, "fasta"):
name = record_ref.id
seq_ref = str(record_ref.seq)
#save tmp file with the seq of the reference
name_tmp_file = "ref_"+name
f_tmp = open(os.path.join(tmp_path,name_tmp_file),"w")
f_tmp.write(">"+name+"\n")
f_tmp.write(seq_ref)
f_tmp.close()
return 0
def run_hhblits(path_hhblits, path_ref_prot, path_db, path_msa_out, num_cores):
''' run hhblits, get the distant homologs MSA, return the number of sequences '''
#1) run hhblits
FNULL = open(os.devnull, 'w')
subprocess.run([path_hhblits, '-i', path_ref_prot, '-d', path_db, '-oa3m', path_ref_prot+".a3m", '-cpu' , str(num_cores)], stdout=FNULL, stderr=subprocess.STDOUT)
#num of sequences
file_out = open(path_msa_out, 'w')
#2) parse and filter the hhblits msa
with open(path_ref_prot+".a3m", "r") as input_handle:
for idx_record, record in enumerate(SeqIO.parse(input_handle, "fasta")):
seq = str(record.seq)
#hhblits ouput is a3m format, to make it a fasta remove dot and lower
seq = ''.join(char for char in seq if (char.isupper() or char =='-'))
# 2.1) do the filtering
records_ref = list(SeqIO.parse(open(path_ref_prot,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
# - remove sequences which are to gapped (i.e. gap_fraction mst be less than 10% gap)
# - remove sequence which are CLOSE to the reference sequence (i.e. sequence_identity must be LESS than 90%)
# - remove sequences containing non standard aa
if( (compute_gap_fraction(seq) < 0.1) and (compute_seqid(ref_seq, seq) < 0.9) and all_standard_aa(seq)):
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
file_out.close()
return 0
def filterMSA(path_ref_prot, path_msa_in, path_msa_out, include_refseq=True, max_grap_fraction = 0.2, max_seq_id = 0.9):
file_out = open(path_msa_out, 'w')
# parse and filter the msa
records_ref = list(SeqIO.parse(open(path_ref_prot,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
with open(path_msa_in, "r") as input_handle:
count = 1
for idx_record, record in enumerate(SeqIO.parse(input_handle, "fasta")):
seq = str(record.seq)
#remove dot and lower
seq = ''.join(char for char in seq if (char.isupper() or char =='-'))
# do the filtering
# - remove the sequences which are to gapped (i.e. sequence must contain less than 10 gap)
# - remove sequence which are close the reference sequence (i.e. sequence_identity must be less than 90%)
# - remove sequences containing non standard aa
if include_refseq and count == 1: # Keep the first seq, i.e., the reference sequence
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
count += 1
elif( (compute_gap_fraction(seq) < max_grap_fraction) and (compute_seqid(ref_seq, seq) < max_seq_id) and all_standard_aa(seq)):
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
file_out.close()
return 0
def filterMSA_gisaid(path_ref_prot, path_msa_in, path_msa_out, max_grap_fraction = 0.2, min_seq_id = 0.9):
file_out = open(path_msa_out, 'w')
# parse and filter the msa
records_ref = list(SeqIO.parse(open(path_ref_prot,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
with open(path_msa_in, "r") as input_handle:
count = 1
for idx_record, record in enumerate(SeqIO.parse(input_handle, "fasta")):
seq = str(record.seq)
#remove dot and lower
seq = ''.join(char for char in seq if (char.isupper() or char =='-'))
# do the filtering
# - remove the sequences which are to gapped (i.e. sequence must contain less than 10 gap)
# - remove sequence which are far the reference sequence (i.e. sequence_identity must greater than 90%)
# - remove sequences containing non standard aa
if count == 1: # Keep the first seq, i.e., the reference sequence
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
count += 1
elif( (compute_gap_fraction(seq) < max_grap_fraction) and (compute_seqid(ref_seq, seq) > min_seq_id) and all_standard_aa(seq)):
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
file_out.close()
return 0
def do_DCA_inference(path_msa, path_dca_par, min_num_seq, num_cores):
#1) number of lines (sequences). N.b. in Uniclust30 Meff ~ M
M = len(open(path_msa).readlines())/2
#2) do the inference with DCA
#only for msa with more than min_num_seq sequences
if( M > min_num_seq):
#import julia (Try to include julia variables into python => TO DO, see 'import julia') ---> doesn't work, I gave up...
# filename =
out_file = path_msa + '.out'
# f_julia= open(os.path.join(path_dca_par,out_file), 'a')
f_julia= open(out_file, 'w')
f_julia.write(path_msa.split("/")[-1]+".fa"+'\n')
f_julia.close() #close and re-open. Otherwise it writes the msa only at the end (after plmDCA info) (function waits subprocess to finish)
# f_julia= open(os.path.join(path_dca_par,out_file), 'a')
f_julia= open(out_file, 'w')
subprocess.run(["julia",'-p', str(num_cores), './src/plmDCA_inference.jl',path_msa, path_dca_par], stdout=f_julia, stderr=subprocess.STDOUT)
f_julia.close()
else:
print('... ERROR! too few seqs (M={0})!'.format(str(M)))
return 0
def run_phmmer(path_phmmer, path_ref_prot, path_db, path_msa_out, path_tmp_stockholm, path_tmp_msa, num_cores):
''' run phmmer, get the local homologs MSA (form E coli strains) '''
file_out = open(path_msa_out, 'w')
#1) run phmmer
FNULL = open(os.devnull, 'w')
subprocess.run([path_phmmer, '-A', path_tmp_stockholm, '--cpu', str(num_cores), path_ref_prot, path_db], stdout=FNULL, stderr=subprocess.STDOUT)
#2) convert stockholm to fasta
subprocess.run(['./src/stockholm2fasta.pl', '-g', path_tmp_stockholm ], stdout=open(path_tmp_msa,'w'),stderr=subprocess.STDOUT)
#3) parse and filter the hhblits msa
with open(path_tmp_msa, "r") as input_handle:
for idx_record, record in enumerate(SeqIO.parse(input_handle, "fasta")):
seq = str(record.seq)
#remove dot and lower
seq = ''.join(char for char in seq if (char.isupper() or char =='-'))
# 2.1) do the filtering
records_ref = list(SeqIO.parse(open(path_ref_prot,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
# - remove the sequences which are to gapped (i.e. sequence must contain less than 10 gap)
# - remove sequence which are FAR the reference sequence (i.e. sequence_identity must be MORE than 90%)
# - remove sequences containing non standard aa
if( (compute_num_gap(seq) < 10) and (compute_seqid(ref_seq, seq) > 0.9) and all_standard_aa(seq)):
file_out.write(str(">"+record.id)+'\n')
file_out.write(str(seq)+'\n')
file_out.close()
#rm prefiltering msa
subprocess.run(['rm' , path_tmp_msa])
#rm stockholm (too big!)
subprocess.run(['rm' , path_tmp_stockholm])
return 0
def compute_energy_local_msa(ref_prot_file, output_file, ali_file,h,J, verbose ):
''' load DCA model, compute energy of strains (also e_sum_sm and postions mutated '''
records_ref = list(SeqIO.parse(open(ref_prot_file,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
ref_name = str(records_ref[0].id)
E0 = 0
#0. compute energy single mutants -> NO need for it ( and very slow, seq function compute_sm_energy(seq, h, J, idx, aa)
#d_sm = compute_sm_energy(ref_seq, h, J)
path_msa_local = os.path.join(ali_file)
all_seq_name = []
all_seq_num_occurences= []
all_seq_e = []
all_seq_dist = []
all_seq_ref_prot = []
all_seq_sum_sm = []
all_seq_mut_idx= []
all_seq_mut_aa = []
E0 = compute_energy(ref_seq, h, J)
with open(path_msa_local,"r") as f:
for record in tqdm(SeqIO.parse(f,"fasta")):
seq = str(record.seq)
E = compute_energy(seq, h, J)
idx, aa1, aa2 = compute_diff(ref_seq,seq)
# sum of energies single mutants
E_sum_sm = compute_sm_energy(ref_seq, h, J, idx, aa2)
#num mutations
dist = len(idx)
name_seq_list = (str(record.id).split('/')[0]).split('-')
if(verbose == True):
for name_seq in name_seq_list:
all_seq_ref_prot.append(ref_name)
all_seq_name.append(name_seq)
all_seq_e.append(np.round(E,4))
all_seq_dist.append(int(dist))
all_seq_sum_sm.append(np.round(E_sum_sm,4))
all_seq_mut_idx.append(idx)
all_seq_mut_aa.append(aa2)
all_seq_e_e0 = np.round(all_seq_e - E0,4)
if(verbose == False):
all_seq_ref_prot.append(ref_name)
all_seq_num_occurences.append(len(name_seq_list))
all_seq_e.append(np.round(E,4))
all_seq_dist.append(int(dist))
all_seq_sum_sm.append(np.round(E_sum_sm,4))
all_seq_e_e0 = np.round(all_seq_e - E0 ,4)
all_seq_mut_idx.append(idx)
all_seq_mut_aa.append(aa2)
if(verbose == True):
df = pd.DataFrame({'ref':all_seq_ref_prot, 'seq_name':all_seq_name, 'e':all_seq_e, 'e-e0':all_seq_e_e0, 'e_sum_sm': all_seq_sum_sm, 'dist':all_seq_dist, 'idx_mut':all_seq_mut_idx, 'aa_mut': all_seq_mut_aa})
if(verbose == False):
df = pd.DataFrame({'ref':all_seq_ref_prot, 'num_occurences':all_seq_num_occurences, 'e':all_seq_e, 'e-e0':all_seq_e_e0, 'e_sum_sm': all_seq_sum_sm,'dist':all_seq_dist, 'idx_mut':all_seq_mut_idx, 'aa_mut': all_seq_mut_aa})
df.to_csv(os.path.join(output_file), index = False)
return 0
def compute_energy_ind_msa(ref_prot_file, ali_file, output_file, h,J ):
''' load DCA model, compute energy of mutation sampled from the profile model'''
records_ref = list(SeqIO.parse(open(ref_prot_file,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
ref_name = str(records_ref[0].id)
path_msa_local_ind = os.path.join(ali_file)
all_seq_ref_prot = []
all_seq_e = []
all_seq_e_e0 = []
all_seq_dist = []
E0 = compute_energy(ref_seq, h, J)
with open(path_msa_local_ind,"r") as f:
for record in tqdm(SeqIO.parse(f,"fasta")):
seq = str(record.seq)
dist = compute_dist(ref_seq,seq)
E = compute_energy(seq, h, J)
all_seq_ref_prot.append(ref_name)
all_seq_e.append(np.round(E,4))
all_seq_dist.append(int(dist))
all_seq_e_e0 = np.round(all_seq_e - E0 ,4)
df_ind = pd.DataFrame({'ref':all_seq_ref_prot, 'e':all_seq_e, 'e-e0':all_seq_e_e0, 'dist':all_seq_dist})
df_ind.to_csv(os.path.join(output_file), index = False)
return 0
def compute_energy_rand_msa(ref_prot_file, ali_file, output_file, h,J):
''' load DCA model, compute energy of mutation sampled from the random model'''
records_ref = list(SeqIO.parse(open(ref_prot_file,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
ref_name = str(records_ref[0].id)
path_msa_local_rand= os.path.join(ali_file)
all_seq_ref_prot = []
all_seq_e = []
all_seq_e_e0 = []
all_seq_dist = []
E0 = compute_energy(ref_seq, h, J)
with open(path_msa_local_rand,"r") as f:
for record in tqdm(SeqIO.parse(f,"fasta")):
seq = str(record.seq)
dist = compute_dist(ref_seq,seq)
E = compute_energy(seq, h, J)
all_seq_ref_prot.append(ref_name)
all_seq_e.append(np.round(E,4))
all_seq_dist.append(int(dist))
all_seq_e_e0 = np.round(all_seq_e - E0 ,4)
df_rand = pd.DataFrame({'ref':all_seq_ref_prot, 'e':all_seq_e, 'e-e0':all_seq_e_e0, 'dist':all_seq_dist})
df_rand.to_csv(os.path.join(output_file, 'e_'+ref_name+'_rand.csv'), index = False)
return 0
def compute_all_entropies(ref_prot_file, ali_file, ali_file_local, output_file, h, J ):
''' compute s_ind, s_dep , s_strains '''
records_ref = list(SeqIO.parse(open(ref_prot_file,'r'), "fasta"))
ref_seq = str(records_ref[0].seq)
ref_name = str(records_ref[0].id)
####################################################################################################
#context IND entropy (from msa_hhblits)
path_msa_hhblits = os.path.join(ali_file)
S_ind = np.round(compute_entropy_context_ind(path_msa_hhblits),4)
#context DEP entropy (from ref_seq, h, J)
S_dep = np.round(compute_entropy_context_dep(ref_seq, h,J),4)
#compute entropy in MSA_local (hhblits) (i.e. observed polymorphism?)
path_msa_local = os.path.join(ali_file_local)
S_local_obs = np.round(compute_entropy_context_ind(path_msa_local),4)
all_seq_ref_prot = [ref_name for i in range(0,len(ref_seq))]
all_seq_idx= [i for i in range(0,len(ref_seq))]
df_s = pd.DataFrame({'ref':all_seq_ref_prot, 'idx': all_seq_idx, 's_ind':S_ind, 's_dep':S_dep, 's_local_obs':S_local_obs})
df_s.to_csv(os.path.join(output_file), index = False)
return 0
|
[
"tarfile.open",
"subprocess.run",
"os.path.join",
"io.BytesIO",
"multiprocessing.cpu_count",
"numpy.log",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"joblib.Parallel",
"Bio.SeqIO.parse",
"pandas.DataFrame",
"joblib.delayed",
"numpy.load",
"numpy.round"
] |
[((247, 274), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (272, 274), False, 'import multiprocessing\n'), ((748, 780), 'tarfile.open', 'tarfile.open', (['path_h_DCA', '"""r:gz"""'], {}), "(path_h_DCA, 'r:gz')\n", (760, 780), False, 'import tarfile\n'), ((998, 1030), 'tarfile.open', 'tarfile.open', (['path_J_DCA', '"""r:gz"""'], {}), "(path_J_DCA, 'r:gz')\n", (1010, 1030), False, 'import tarfile\n'), ((2228, 2249), 'numpy.round', 'np.round', (['E_sum_sm', '(4)'], {}), '(E_sum_sm, 4)\n', (2236, 2249), True, 'import numpy as np\n'), ((4259, 4276), 'numpy.zeros', 'np.zeros', (['h.shape'], {}), '(h.shape)\n', (4267, 4276), True, 'import numpy as np\n'), ((15588, 15624), 'subprocess.run', 'subprocess.run', (["['rm', path_tmp_msa]"], {}), "(['rm', path_tmp_msa])\n", (15602, 15624), False, 'import subprocess\n'), ((15667, 15709), 'subprocess.run', 'subprocess.run', (["['rm', path_tmp_stockholm]"], {}), "(['rm', path_tmp_stockholm])\n", (15681, 15709), False, 'import subprocess\n'), ((16248, 16270), 'os.path.join', 'os.path.join', (['ali_file'], {}), '(ali_file)\n', (16260, 16270), False, 'import os\n'), ((18837, 18859), 'os.path.join', 'os.path.join', (['ali_file'], {}), '(ali_file)\n', (18849, 18859), False, 'import os\n'), ((19903, 19925), 'os.path.join', 'os.path.join', (['ali_file'], {}), '(ali_file)\n', (19915, 19925), False, 'import os\n'), ((21123, 21145), 'os.path.join', 'os.path.join', (['ali_file'], {}), '(ali_file)\n', (21135, 21145), False, 'import os\n'), ((21423, 21451), 'os.path.join', 'os.path.join', (['ali_file_local'], {}), '(ali_file_local)\n', (21435, 21451), False, 'import os\n'), ((21654, 21777), 'pandas.DataFrame', 'pd.DataFrame', (["{'ref': all_seq_ref_prot, 'idx': all_seq_idx, 's_ind': S_ind, 's_dep':\n S_dep, 's_local_obs': S_local_obs}"], {}), "({'ref': all_seq_ref_prot, 'idx': all_seq_idx, 's_ind': S_ind,\n 's_dep': S_dep, 's_local_obs': S_local_obs})\n", (21666, 21777), True, 'import pandas as pd\n'), ((7654, 7688), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_handle', '"""fasta"""'], {}), "(input_handle, 'fasta')\n", (7665, 7688), False, 'from Bio import SeqIO\n'), ((17981, 18196), 'pandas.DataFrame', 'pd.DataFrame', (["{'ref': all_seq_ref_prot, 'seq_name': all_seq_name, 'e': all_seq_e, 'e-e0':\n all_seq_e_e0, 'e_sum_sm': all_seq_sum_sm, 'dist': all_seq_dist,\n 'idx_mut': all_seq_mut_idx, 'aa_mut': all_seq_mut_aa}"], {}), "({'ref': all_seq_ref_prot, 'seq_name': all_seq_name, 'e':\n all_seq_e, 'e-e0': all_seq_e_e0, 'e_sum_sm': all_seq_sum_sm, 'dist':\n all_seq_dist, 'idx_mut': all_seq_mut_idx, 'aa_mut': all_seq_mut_aa})\n", (17993, 18196), True, 'import pandas as pd\n'), ((18222, 18457), 'pandas.DataFrame', 'pd.DataFrame', (["{'ref': all_seq_ref_prot, 'num_occurences': all_seq_num_occurences, 'e':\n all_seq_e, 'e-e0': all_seq_e_e0, 'e_sum_sm': all_seq_sum_sm, 'dist':\n all_seq_dist, 'idx_mut': all_seq_mut_idx, 'aa_mut': all_seq_mut_aa}"], {}), "({'ref': all_seq_ref_prot, 'num_occurences':\n all_seq_num_occurences, 'e': all_seq_e, 'e-e0': all_seq_e_e0,\n 'e_sum_sm': all_seq_sum_sm, 'dist': all_seq_dist, 'idx_mut':\n all_seq_mut_idx, 'aa_mut': all_seq_mut_aa})\n", (18234, 18457), True, 'import pandas as pd\n'), ((18453, 18478), 'os.path.join', 'os.path.join', (['output_file'], {}), '(output_file)\n', (18465, 18478), False, 'import os\n'), ((19361, 19388), 'numpy.round', 'np.round', (['(all_seq_e - E0)', '(4)'], {}), '(all_seq_e - E0, 4)\n', (19369, 19388), True, 'import numpy as np\n'), ((19406, 19509), 'pandas.DataFrame', 'pd.DataFrame', (["{'ref': all_seq_ref_prot, 'e': all_seq_e, 'e-e0': all_seq_e_e0, 'dist':\n all_seq_dist}"], {}), "({'ref': all_seq_ref_prot, 'e': all_seq_e, 'e-e0': all_seq_e_e0,\n 'dist': all_seq_dist})\n", (19418, 19509), True, 'import pandas as pd\n'), ((19520, 19545), 'os.path.join', 'os.path.join', (['output_file'], {}), '(output_file)\n', (19532, 19545), False, 'import os\n'), ((20428, 20455), 'numpy.round', 'np.round', (['(all_seq_e - E0)', '(4)'], {}), '(all_seq_e - E0, 4)\n', (20436, 20455), True, 'import numpy as np\n'), ((20474, 20577), 'pandas.DataFrame', 'pd.DataFrame', (["{'ref': all_seq_ref_prot, 'e': all_seq_e, 'e-e0': all_seq_e_e0, 'dist':\n all_seq_dist}"], {}), "({'ref': all_seq_ref_prot, 'e': all_seq_e, 'e-e0': all_seq_e_e0,\n 'dist': all_seq_dist})\n", (20486, 20577), True, 'import pandas as pd\n'), ((20589, 20645), 'os.path.join', 'os.path.join', (['output_file', "('e_' + ref_name + '_rand.csv')"], {}), "(output_file, 'e_' + ref_name + '_rand.csv')\n", (20601, 20645), False, 'import os\n'), ((21786, 21811), 'os.path.join', 'os.path.join', (['output_file'], {}), '(output_file)\n', (21798, 21811), False, 'import os\n'), ((935, 951), 'io.BytesIO', 'BytesIO', (['content'], {}), '(content)\n', (942, 951), False, 'from io import BytesIO\n'), ((968, 987), 'numpy.load', 'np.load', (['load_bytes'], {}), '(load_bytes)\n', (975, 987), True, 'import numpy as np\n'), ((1185, 1201), 'io.BytesIO', 'BytesIO', (['content'], {}), '(content)\n', (1192, 1201), False, 'from io import BytesIO\n'), ((1218, 1237), 'numpy.load', 'np.load', (['load_bytes'], {}), '(load_bytes)\n', (1225, 1237), True, 'import numpy as np\n'), ((1802, 1821), 'numpy.round', 'np.round', (['(E - E0)', '(4)'], {}), '(E - E0, 4)\n', (1810, 1821), True, 'import numpy as np\n'), ((2752, 2766), 'numpy.sum', 'np.sum', (['all_ei'], {}), '(all_ei)\n', (2758, 2766), True, 'import numpy as np\n'), ((3852, 3878), 'numpy.sum', 'np.sum', (['fi[:20, :]'], {'axis': '(0)'}), '(fi[:20, :], axis=0)\n', (3858, 3878), True, 'import numpy as np\n'), ((4060, 4069), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (4066, 4069), True, 'import numpy as np\n'), ((4633, 4650), 'numpy.exp', 'np.exp', (['arg_denom'], {}), '(arg_denom)\n', (4639, 4650), True, 'import numpy as np\n'), ((4909, 4924), 'numpy.exp', 'np.exp', (['arg_num'], {}), '(arg_num)\n', (4915, 4924), True, 'import numpy as np\n'), ((7554, 7604), 'os.path.join', 'os.path.join', (['path_ref_proteome', 'name_ref_proteome'], {}), '(path_ref_proteome, name_ref_proteome)\n', (7566, 7604), False, 'import os\n'), ((8672, 8706), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_handle', '"""fasta"""'], {}), "(input_handle, 'fasta')\n", (8683, 8706), False, 'from Bio import SeqIO\n'), ((10099, 10133), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_handle', '"""fasta"""'], {}), "(input_handle, 'fasta')\n", (10110, 10133), False, 'from Bio import SeqIO\n'), ((11595, 11629), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_handle', '"""fasta"""'], {}), "(input_handle, 'fasta')\n", (11606, 11629), False, 'from Bio import SeqIO\n'), ((14639, 14673), 'Bio.SeqIO.parse', 'SeqIO.parse', (['input_handle', '"""fasta"""'], {}), "(input_handle, 'fasta')\n", (14650, 14673), False, 'from Bio import SeqIO\n'), ((16569, 16592), 'Bio.SeqIO.parse', 'SeqIO.parse', (['f', '"""fasta"""'], {}), "(f, 'fasta')\n", (16580, 16592), False, 'from Bio import SeqIO\n'), ((19059, 19082), 'Bio.SeqIO.parse', 'SeqIO.parse', (['f', '"""fasta"""'], {}), "(f, 'fasta')\n", (19070, 19082), False, 'from Bio import SeqIO\n'), ((20126, 20149), 'Bio.SeqIO.parse', 'SeqIO.parse', (['f', '"""fasta"""'], {}), "(f, 'fasta')\n", (20137, 20149), False, 'from Bio import SeqIO\n'), ((2617, 2650), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores_energy'}), '(n_jobs=num_cores_energy)\n', (2625, 2650), False, 'from joblib import Parallel, delayed\n'), ((4008, 4024), 'numpy.log', 'np.log', (['fi[q, i]'], {}), '(fi[q, i])\n', (4014, 4024), True, 'import numpy as np\n'), ((7887, 7924), 'os.path.join', 'os.path.join', (['tmp_path', 'name_tmp_file'], {}), '(tmp_path, name_tmp_file)\n', (7899, 7924), False, 'import os\n'), ((17465, 17492), 'numpy.round', 'np.round', (['(all_seq_e - E0)', '(4)'], {}), '(all_seq_e - E0, 4)\n', (17473, 17492), True, 'import numpy as np\n'), ((17828, 17855), 'numpy.round', 'np.round', (['(all_seq_e - E0)', '(4)'], {}), '(all_seq_e - E0, 4)\n', (17836, 17855), True, 'import numpy as np\n'), ((19280, 19294), 'numpy.round', 'np.round', (['E', '(4)'], {}), '(E, 4)\n', (19288, 19294), True, 'import numpy as np\n'), ((20347, 20361), 'numpy.round', 'np.round', (['E', '(4)'], {}), '(E, 4)\n', (20355, 20361), True, 'import numpy as np\n'), ((17675, 17689), 'numpy.round', 'np.round', (['E', '(4)'], {}), '(E, 4)\n', (17683, 17689), True, 'import numpy as np\n'), ((17775, 17796), 'numpy.round', 'np.round', (['E_sum_sm', '(4)'], {}), '(E_sum_sm, 4)\n', (17783, 17796), True, 'import numpy as np\n'), ((2651, 2683), 'joblib.delayed', 'delayed', (['compute_energy_given_ai'], {}), '(compute_energy_given_ai)\n', (2658, 2683), False, 'from joblib import Parallel, delayed\n'), ((17193, 17207), 'numpy.round', 'np.round', (['E', '(4)'], {}), '(E, 4)\n', (17201, 17207), True, 'import numpy as np\n'), ((17309, 17330), 'numpy.round', 'np.round', (['E_sum_sm', '(4)'], {}), '(E_sum_sm, 4)\n', (17317, 17330), True, 'import numpy as np\n')]
|
import os
import random
import argparse
import time
from datetime import datetime
from tqdm import tqdm
import paddle
paddle.disable_static()
import paddle.nn.functional as F
import paddle.optimizer as optim
from pgl.utils.data import Dataloader
import numpy as np
from models import DeepFRI
from data_preprocessing import MyDataset
from custom_metrics import do_compute_metrics
from utils import add_saved_args_and_params
def do_compute(model, batch):
logits = model(*batch[:-1])
return logits, batch[-1]
def run_batch(model, data_loader, desc):
logits_list = []
ground_truth = []
for batch in tqdm(data_loader, desc=f"{desc}"):
logits, labels = do_compute(model, batch)
logits_list.append(F.sigmoid(logits).tolist())
ground_truth.append(labels.tolist())
logits_list = np.concatenate(logits_list)
ground_truth = np.concatenate(ground_truth)
metrics = do_compute_metrics(ground_truth, logits_list)
return metrics
def test(model, test_data_loader):
model.eval()
with paddle.no_grad():
test_metrics = run_batch(model, test_data_loader, "test")
print(f"#### Test results")
print("f_max: {0:.4f}, auprc: {1:.4f}".format(*test_metrics))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--cuda", type=str, default="0", help="GPU ID to train on.")
parser.add_argument("-bs", "--batch_size", type=int, default=64, help="Batch size.")
parser.add_argument(
"--test_file",
type=str,
default="data/nrPDB-GO_2019.06.18_test.txt",
help="File with list of protein chains for training.",
)
parser.add_argument(
"--protein_chain_graphs",
type=str,
default="data/chain_graphs",
help="Path to graph reprsentations of proteins.",
)
parser.add_argument(
"--model_name",
type=str,
required=True,
help="Path to saved/trained methods with parameters.",
)
parser.add_argument(
"--label_data_path",
type=str,
required=True,
help="Mapping containing protein chains with associated their labels. Choose from [molecular_function.npz, cellular_component.npz, biological_process.npz]",
)
parser.add_argument(
"-lm",
"--lm_model_name",
type=str,
help="Path to the pre-trained LSTM-Language Model.",
)
parser.add_argument(
"--use_cache",
type=int,
default=0,
choices=[0, 1],
help="Whether to save protein graph in memory for fast reading.",
)
args = parser.parse_args()
args.use_cache = bool(args.use_cache)
if int(args.cuda) == -1:
paddle.set_device("cpu")
else:
paddle.set_device("gpu:%s" % args.cuda)
test_chain_list = [p.strip() for p in open(args.test_file)]
saved_state_dict = paddle.load(args.model_name)
# In-place assignment
add_saved_args_and_params(args, saved_state_dict)
test_dataset = MyDataset(
test_chain_list,
args.n_channels,
args.pad_len,
args.protein_chain_graphs,
args.cmap_thresh,
args.label_data_path,
args.use_cache,
)
test_loader = Dataloader(
test_dataset, batch_size=args.batch_size, collate_fn=test_dataset.collate_fn
)
args.n_labels = test_dataset.n_labels
model = DeepFRI(args)
model.set_state_dict(saved_state_dict["model"])
model.eval()
print(f"\n{args.task}: Testing on {len(test_dataset)} protein samples.")
print(f"Starting at {datetime.now()}\n")
print(args)
test(model, test_loader)
|
[
"custom_metrics.do_compute_metrics",
"paddle.no_grad",
"argparse.ArgumentParser",
"paddle.nn.functional.sigmoid",
"tqdm.tqdm",
"utils.add_saved_args_and_params",
"datetime.datetime.now",
"paddle.disable_static",
"data_preprocessing.MyDataset",
"numpy.concatenate",
"paddle.load",
"models.DeepFRI",
"paddle.set_device",
"pgl.utils.data.Dataloader"
] |
[((120, 143), 'paddle.disable_static', 'paddle.disable_static', ([], {}), '()\n', (141, 143), False, 'import paddle\n'), ((623, 656), 'tqdm.tqdm', 'tqdm', (['data_loader'], {'desc': 'f"""{desc}"""'}), "(data_loader, desc=f'{desc}')\n", (627, 656), False, 'from tqdm import tqdm\n'), ((827, 854), 'numpy.concatenate', 'np.concatenate', (['logits_list'], {}), '(logits_list)\n', (841, 854), True, 'import numpy as np\n'), ((874, 902), 'numpy.concatenate', 'np.concatenate', (['ground_truth'], {}), '(ground_truth)\n', (888, 902), True, 'import numpy as np\n'), ((917, 962), 'custom_metrics.do_compute_metrics', 'do_compute_metrics', (['ground_truth', 'logits_list'], {}), '(ground_truth, logits_list)\n', (935, 962), False, 'from custom_metrics import do_compute_metrics\n'), ((1279, 1358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (1302, 1358), False, 'import argparse\n'), ((2964, 2992), 'paddle.load', 'paddle.load', (['args.model_name'], {}), '(args.model_name)\n', (2975, 2992), False, 'import paddle\n'), ((3023, 3072), 'utils.add_saved_args_and_params', 'add_saved_args_and_params', (['args', 'saved_state_dict'], {}), '(args, saved_state_dict)\n', (3048, 3072), False, 'from utils import add_saved_args_and_params\n'), ((3092, 3242), 'data_preprocessing.MyDataset', 'MyDataset', (['test_chain_list', 'args.n_channels', 'args.pad_len', 'args.protein_chain_graphs', 'args.cmap_thresh', 'args.label_data_path', 'args.use_cache'], {}), '(test_chain_list, args.n_channels, args.pad_len, args.\n protein_chain_graphs, args.cmap_thresh, args.label_data_path, args.\n use_cache)\n', (3101, 3242), False, 'from data_preprocessing import MyDataset\n'), ((3315, 3408), 'pgl.utils.data.Dataloader', 'Dataloader', (['test_dataset'], {'batch_size': 'args.batch_size', 'collate_fn': 'test_dataset.collate_fn'}), '(test_dataset, batch_size=args.batch_size, collate_fn=\n test_dataset.collate_fn)\n', (3325, 3408), False, 'from pgl.utils.data import Dataloader\n'), ((3473, 3486), 'models.DeepFRI', 'DeepFRI', (['args'], {}), '(args)\n', (3480, 3486), False, 'from models import DeepFRI\n'), ((1046, 1062), 'paddle.no_grad', 'paddle.no_grad', ([], {}), '()\n', (1060, 1062), False, 'import paddle\n'), ((2792, 2816), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (2809, 2816), False, 'import paddle\n'), ((2835, 2874), 'paddle.set_device', 'paddle.set_device', (["('gpu:%s' % args.cuda)"], {}), "('gpu:%s' % args.cuda)\n", (2852, 2874), False, 'import paddle\n'), ((3660, 3674), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3672, 3674), False, 'from datetime import datetime\n'), ((735, 752), 'paddle.nn.functional.sigmoid', 'F.sigmoid', (['logits'], {}), '(logits)\n', (744, 752), True, 'import paddle.nn.functional as F\n')]
|
"""
Kravatte Achouffe Cipher Suite: Encryption, Decryption, and Authentication Tools based on the Farfalle modes
Copyright 2018 <NAME>
see LICENSE file
"""
from multiprocessing import Pool
from math import floor, ceil, log2
from typing import Tuple
from os import cpu_count
from ctypes import memset
import numpy as np
KravatteTagOutput = Tuple[bytes, bytes]
KravatteValidatedOutput = Tuple[bytes, bool]
class Kravatte(object):
"""Implementation of the Farfalle Pseudo-Random Function (PRF) construct utilizing the
Keccak-1600 permutation.
"""
KECCAK_BYTES = 200
'''Number of Bytes in Keccak-1600 state'''
KECCAK_LANES = 25
'''Number of 8-Byte lanes in Keccak-1600 state'''
KECCAK_PLANES_SLICES = 5
''' Size of x/y dimensions of Keccak lane array '''
THETA_REORDER = ((4, 0, 1, 2, 3), (1, 2, 3, 4, 0))
IOTA_CONSTANTS = np.array([0x000000000000800A, 0x800000008000000A, 0x8000000080008081,
0x8000000000008080, 0x0000000080000001, 0x8000000080008008],
dtype=np.uint64)
'''Iota Step Round Constants For Keccak-p(1600, 4) and Keccak-p(1600, 6)'''
RHO_SHIFTS = np.array([[0, 36, 3, 41, 18],
[1, 44, 10, 45, 2],
[62, 6, 43, 15, 61],
[28, 55, 25, 21, 56],
[27, 20, 39, 8, 14]], dtype=np.uint64)
'''Lane Shifts for Rho Step'''
CHI_REORDER = ((1, 2, 3, 4, 0), (2, 3, 4, 0, 1))
'''Lane Re-order Mapping for Chi Step'''
PI_ROW_REORDER = np.array([[0, 3, 1, 4, 2],
[1, 4, 2, 0, 3],
[2, 0, 3, 1, 4],
[3, 1, 4, 2, 0],
[4, 2, 0, 3, 1]])
'''Row Re-order Mapping for Pi Step'''
PI_COLUMN_REORDER = np.array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]])
'''Column Re-order Mapping for Pi Step'''
COMPRESS_ROW_REORDER = np.array([[0, 0, 0, 0, 1],
[1, 1, 1, 1, 2],
[2, 2, 2, 2, 3],
[3, 3, 3, 3, 4],
[4, 4, 4, 4, 0]])
'''Row Re-order Mapping for Compress Step'''
COMPRESS_COLUMN_REORDER = np.array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
'''Column Re-order Mapping for Compress Step'''
EXPAND_ROW_REORDER = np.array([[0, 0, 0, 1, 1],
[1, 1, 1, 2, 2],
[2, 2, 2, 3, 3],
[3, 3, 3, 4, 4],
[4, 4, 4, 0, 0]])
'''Row Re-order Mapping for Expand Step'''
EXPAND_COLUMN_REORDER = np.array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 4, 4]])
'''Column Re-order Mapping for Expand Step'''
def __init__(self, key: bytes=b'', workers: int=None, mp_input: bool=True, mp_output: bool=True):
"""
Initialize Kravatte with user key
Inputs:
key (bytes): encryption/authentication key
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
"""
self.update_key(key)
self.reset_state()
# Enable Standard or Optimized Multi-process codepaths
if workers is not None:
self.collect_message = self._collect_message_mp if mp_input else self._collect_message_sp
self.generate_digest = self._generate_digest_mp if mp_output else self._generate_digest_sp
self.workers = cpu_count() if workers == 0 else workers
else:
self.collect_message = self._collect_message_sp
self.generate_digest = self._generate_digest_sp
self.workers = None
def update_key(self, key: bytes) -> None:
"""
Pad and compute new Kravatte base key from bytes source.
Inputs:
key (bytes): user provided bytes to be padded (if necessary) and computed into Kravatte base key
"""
key_pad = self._pad_10_append(key, self.KECCAK_BYTES)
key_array = np.frombuffer(key_pad, dtype=np.uint64, count=self.KECCAK_LANES,
offset=0).reshape([self.KECCAK_PLANES_SLICES,
self.KECCAK_PLANES_SLICES], order='F')
self.kra_key = self._keccak(key_array)
def reset_state(self) -> None:
"""
Clear existing Farfalle/Kravatte state and prepares for new input message collection.
Elements reset include:
- Message block collector
- Rolling key
- Currently stored output digest
- Digest Active and New Collector Flags
Inputs:
None
"""
self.roll_key = np.copy(self.kra_key)
self.collector = np.zeros([5, 5], dtype=np.uint64)
self.digest = bytearray(b'')
self.digest_active = False
self.new_collector = True
def _generate_absorb_queue(self, absorb_steps: int, kra_msg: bytes):
"""
Generator for Keccak-sized blocks of input message for Farfalle compression
Inputs:
absorb_steps (int): Number of blocks to generate for absorption
kra_msg (bytes): padded input message ready for slicing into input blocks
"""
for msg_block in range(absorb_steps):
yield (np.frombuffer(kra_msg, dtype=np.uint64, count=25, offset=msg_block * self.KECCAK_BYTES).reshape([5, 5], order='F') ^ self.roll_key)
self.roll_key = self._kravatte_roll_compress(self.roll_key)
def _collect_message_sp(self, message: bytes, append_bits: int=0, append_bit_count: int=0) -> None:
"""
Pad and Process Blocks of Message into Kravatte collector state
Inputs:
message (bytes): arbitrary number of bytes to be padded into Keccak blocks and absorbed into the collector
append_bits (int): bits to append to the message before padding. Required for more advanced Kravatte modes.
append_bit_count (int): number of bits to append
"""
if self.digest_active:
self.reset_state()
if self.new_collector:
self.new_collector = False
else:
self.roll_key = self._kravatte_roll_compress(self.roll_key)
# Pad Message
msg_len = len(message)
kra_msg = self._pad_10_append(message, msg_len + (self.KECCAK_BYTES - (msg_len % self.KECCAK_BYTES)), append_bits, append_bit_count)
absorb_steps = len(kra_msg) // self.KECCAK_BYTES
# Absorb into Collector
for msg_block in range(absorb_steps):
m = np.frombuffer(kra_msg, dtype=np.uint64, count=25, offset=msg_block * self.KECCAK_BYTES).reshape([5, 5], order='F')
m_k = m ^ self.roll_key
self.roll_key = self._kravatte_roll_compress(self.roll_key)
self.collector = self.collector ^ self._keccak(m_k)
def _collect_message_mp(self, message: bytes, append_bits: int=0, append_bit_count: int=0) -> None:
"""
Pad and Process Blocks of Message into Kravatte collector state - Multi-process Aware Variant
Inputs:
message (bytes): arbitrary number of bytes to be padded into Keccak blocks and absorbed into the collector
append_bits (int): bits to append to the message before padding. Required for more advanced Kravatte modes.
append_bit_count (int): number of bits to append
"""
if self.digest_active:
self.reset_state()
if self.new_collector:
self.new_collector = False
else:
self.roll_key = self._kravatte_roll_compress(self.roll_key)
# Pad Message
msg_len = len(message)
kra_msg = self._pad_10_append(message, msg_len + (self.KECCAK_BYTES - (msg_len % self.KECCAK_BYTES)), append_bits, append_bit_count)
absorb_steps = len(kra_msg) // self.KECCAK_BYTES
workload = 1 if (absorb_steps // self.workers) == 0 else (absorb_steps // self.workers)
with Pool(processes=self.workers) as kravatte_pool:
for output_element in kravatte_pool.imap_unordered(self._keccak, self._generate_absorb_queue(absorb_steps, kra_msg), chunksize=workload):
self.collector ^= output_element
def _generate_digest_sp(self, output_size: int, short_kravatte: bool=False) -> None:
"""
Squeeze an arbitrary number of bytes from collector state
Inputs:
output_size (int): Number of bytes to generate and store in Kravatte digest parameter
short_kravatte (bool): Enable disable short kravatte required for other Kravatte modes
"""
if not self.digest_active:
self.collector = self.collector if short_kravatte else self._keccak(self.collector)
self.roll_key = self._kravatte_roll_compress(self.roll_key)
self.digest_active = True
self.digest = bytearray(b'')
full_output_size = output_size + (200 - (output_size % 200)) if output_size % 200 else output_size
generate_steps = full_output_size // 200
for _ in range(generate_steps):
collector_squeeze = self._keccak(self.collector)
self.collector = self._kravatte_roll_expand(self.collector)
self.digest.extend((collector_squeeze ^ self.roll_key).tobytes('F'))
self.digest = self.digest[:output_size]
def _generate_squeeze_queue(self, generate_steps: int):
"""
Generator for Keccak-sized blocks of expanded collector state for output squeezing
Inputs:
generate_steps (int): Number of blocks to generate and for absorb
"""
for _ in range(generate_steps):
yield self.collector
self.collector = self._kravatte_roll_expand(self.collector)
def _generate_digest_mp(self, output_size: int, short_kravatte: bool=False) -> None:
"""
Squeeze an arbitrary number of bytes from collector state - Multi-process Aware Variant
Inputs:
output_size (int): Number of bytes to generate and store in Kravatte digest parameter
short_kravatte (bool): Enable disable short kravatte required for other Kravatte modes
"""
if not self.digest_active:
self.collector = self.collector if short_kravatte else self._keccak(self.collector)
self.roll_key = self._kravatte_roll_compress(self.roll_key)
self.digest_active = True
self.digest = bytearray(b'')
full_output_size = output_size + (200 - (output_size % 200)) if output_size % 200 else output_size
generate_steps = full_output_size // 200
workload = 1 if (generate_steps // self.workers) == 0 else (generate_steps // self.workers)
with Pool(processes=self.workers) as kravatte_pool:
for digest_block in kravatte_pool.imap(self._keccak_xor_key, self._generate_squeeze_queue(generate_steps), chunksize=workload):
self.digest.extend(digest_block.tobytes('F'))
self.digest = self.digest[:output_size]
def _keccak(self, input_array):
"""
Implementation of Keccak-1600 PRF defined in FIPS 202
Inputs:
input_array (numpy array): Keccak compatible state array: 200-byte as 5x5 64-bit lanes
Return:
numpy array: Keccak compatible state array: 200-byte as 5x5 64-bit lanes
"""
state = np.copy(input_array)
for round_num in range(6):
# theta_step:
# Exclusive-or each slice-lane by state based permutation value
array_shift = state << 1 | state >> 63
state ^= np.bitwise_xor.reduce(state[self.THETA_REORDER[0], ], 1, keepdims=True) ^ np.bitwise_xor.reduce(array_shift[self.THETA_REORDER[1], ], 1, keepdims=True)
# rho_step:
# Left Rotate each lane by pre-calculated value
state = state << self.RHO_SHIFTS | state >> np.uint64(64 - self.RHO_SHIFTS)
# pi_step:
# Shuffle lanes to pre-calculated positions
state = state[self.PI_ROW_REORDER, self.PI_COLUMN_REORDER]
# chi_step:
# Exclusive-or each individual lane based on and/invert permutation
state ^= ~state[self.CHI_REORDER[0], ] & state[self.CHI_REORDER[1], ]
# iota_step:
# Exclusive-or first lane of state with round constant
state[0, 0] ^= self.IOTA_CONSTANTS[round_num]
return state
def _keccak_xor_key(self, input_array):
"""
Implementation of Keccak-1600 PRF defined in FIPS 202 plus an XOR with the current key state
Inputs:
input_array (numpy array): Keccak compatible state array: 200-byte as 5x5 64-bit lanes
Return:
numpy array: Keccak compatible state array: 200-byte as 5x5 64-bit lanes
"""
state = np.copy(input_array)
for round_num in range(6):
# theta_step:
# Exclusive-or each slice-lane by state based permutation value
array_shift = state << 1 | state >> 63
state ^= np.bitwise_xor.reduce(state[self.THETA_REORDER[0], ], 1, keepdims=True) ^ np.bitwise_xor.reduce(array_shift[self.THETA_REORDER[1], ], 1, keepdims=True)
# rho_step:
# Left Rotate each lane by pre-calculated value
state = state << self.RHO_SHIFTS | state >> np.uint64(64 - self.RHO_SHIFTS)
# pi_step:
# Shuffle lanes to pre-calculated positions
state = state[self.PI_ROW_REORDER, self.PI_COLUMN_REORDER]
# chi_step:
# Exclusive-or each individual lane based on and/invert permutation
state ^= ~state[self.CHI_REORDER[0], ] & state[self.CHI_REORDER[1], ]
# iota_step:
# Exclusive-or first lane of state with round constant
state[0, 0] ^= self.IOTA_CONSTANTS[round_num]
return state ^ self.roll_key
def scrub(self):
"""
Explicitly zero out both the key and collector array states. Use prior to reinitialization of
key or when finished with object to help avoid leaving secret/interim data in memory.
WARNING: Does not guarantee other copies of these arrays are not present elsewhere in memory
Not applicable in multi-process mode.
Inputs:
None
Return:
None
"""
# Clear collector array
collector_location = self.collector.ctypes.data
memset(collector_location, 0x00, self.KECCAK_BYTES)
# Clear Kravatte base key array
key_location = self.kra_key.ctypes.data
memset(key_location, 0x00, self.KECCAK_BYTES)
# Clear Kravatte rolling key array
key_location = self.roll_key.ctypes.data
memset(key_location, 0x00, self.KECCAK_BYTES)
def _kravatte_roll_compress(self, input_array):
"""
Kravatte defined roll function for compression side of Farfalle PRF
Inputs:
input_array (numpy array): Keccak compatible state array: 200-byte as 5x5 64-bit lanes
Return:
numpy array: Keccak compatible state array: 200-byte as 5x5 64-bit lanes
"""
state = input_array[self.COMPRESS_ROW_REORDER, self.COMPRESS_COLUMN_REORDER]
state[4, 4] = ((state[4, 4] << np.uint64(7)) | (state[4, 4] >> np.uint64(57))) ^ \
(state[0, 4]) ^ \
(state[0, 4] >> np.uint64(3))
return state
def _kravatte_roll_expand(self, input_array):
"""
Kravatte defined roll function for expansion side of Farfalle PRF
Inputs:
input_array (numpy array): Keccak compatible state array: 200-byte as 5x5 64-bit lanes
Return:
numpy array: Keccak compatible state array: 200-byte as 5x5 64-bit lanes
"""
state = input_array[self.EXPAND_ROW_REORDER, self.EXPAND_COLUMN_REORDER]
state[4, 4] = ((input_array[0, 3] << np.uint64(7)) | (input_array[0, 3] >> np.uint64(57))) ^ \
((input_array[1, 3] << np.uint64(18)) | (input_array[1, 3] >> np.uint64(46))) ^ \
((input_array[1, 3] >> np.uint64(1)) & input_array[2, 3])
return state
@staticmethod
def _pad_10_append(input_bytes: bytes, desired_length: int, append_bits: int=0, append_bit_count: int=0) -> bytes:
"""
Farfalle defined padding function. Limited to byte divisible inputs only
Inputs:
input_bytes (bytes): Collection of bytes
desired_length (int): Number of bytes to pad input len out to
append_bits (int): one or more bits to be inserted before the padding starts. Allows
"appending" bits as required by several Kravatte modes
append_bit_count (int): number of bits to append
Return:
bytes: input bytes with padding applied
"""
start_len = len(input_bytes)
if start_len == desired_length:
return input_bytes
head_pad_byte = bytes([(0b01 << append_bit_count) | (((2**append_bit_count) - 1) & append_bits)])
pad_len = desired_length - (start_len % desired_length)
padded_bytes = input_bytes + head_pad_byte + (b'\x00' * (pad_len - 1))
return padded_bytes
@staticmethod
def compare_bytes(a: bytes, b: bytes) -> bool:
"""
Time Constant Byte Comparison Function
Inputs:
a (bytes): first set of bytes
b (bytes): second set of bytes
Return:
boolean
"""
compare = True
if len(a) != len(b):
return False
for (element_a, element_b) in zip(a, b):
compare = compare and (element_a == element_b)
return compare
def mac(key: bytes, message: bytes, output_size: int, workers: int=None, mp_input: bool=True,
mp_output: bool=True) -> bytearray:
"""
Kravatte Message Authentication Code Generation of given length from a message
based on a user provided key
Args:
key (bytes): User authentication key (0 - 200 bytes)
message (bytes): User message
output_size (int): Size of authenticated digest in bytes
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
Returns:
bytes: message authentication bytes of length output_size
"""
kravatte_mac_gen = Kravatte(key, workers=workers, mp_input=mp_input, mp_output=mp_output)
kravatte_mac_gen.collect_message(message)
kravatte_mac_gen.generate_digest(output_size)
kravatte_mac_gen.scrub()
return kravatte_mac_gen.digest
def siv_wrap(key: bytes, message: bytes, metadata: bytes, tag_size: int=32, workers: int=None,
mp_input: bool=True, mp_output: bool=True) -> KravatteTagOutput:
"""
Authenticated Encryption with Associated Data (AEAD) of a provided plaintext using a key and
metadata using the Synthetic Initialization Vector method described in the Farfalle/Kravatte
spec. Generates ciphertext (of equivalent length to the plaintext) and verification tag. Inverse
of siv_unwrap function.
Args:
key (bytes): Encryption key; 0-200 bytes in length
message (bytes): Plaintext message for encryption
metadata (bytes): Nonce/Seed value for authenticated encryption
tag_size (int, optional): The tag size in bytes. Defaults to 32 bytes as defined in the
Kravatte spec
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
Returns:
tuple (bytes, bytes): Bytes of ciphertext and tag
"""
# Initialize Kravatte
kravatte_siv_wrap = Kravatte(key, workers=workers, mp_input=mp_input, mp_output=mp_output)
# Generate Tag From Metadata and Plaintext
kravatte_siv_wrap.collect_message(metadata)
kravatte_siv_wrap.collect_message(message)
kravatte_siv_wrap.generate_digest(tag_size)
siv_tag = kravatte_siv_wrap.digest
# Generate Key Stream
kravatte_siv_wrap.collect_message(metadata)
kravatte_siv_wrap.collect_message(siv_tag)
kravatte_siv_wrap.generate_digest(len(message))
ciphertext = bytes([p_text ^ key_stream for p_text, key_stream in zip(message, kravatte_siv_wrap.digest)])
kravatte_siv_wrap.scrub()
return ciphertext, siv_tag
def siv_unwrap(key: bytes, ciphertext: bytes, siv_tag: bytes, metadata: bytes, workers: int=None,
mp_input: bool=True, mp_output: bool=True) -> KravatteValidatedOutput:
"""
Decryption of Synthetic Initialization Vector method described in the Farfalle/Kravatte
spec. Given a key, metadata, and validation tag, generates plaintext (of equivalent length to
the ciphertext) and validates message based on included tag, metadata, and key. Inverse of
siv_wrap function.
Args:
key (bytes): Encryption key; 0-200 bytes in length
ciphertext (bytes): Ciphertext SIV Message
siv_tag (bytes): Authenticating byte string
metadata (bytes): Metadata used to encrypt message and generate tag
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
Returns:
tuple (bytes, boolean): Bytes of plaintext and message validation boolean
"""
# Initialize Kravatte
kravatte_siv_unwrap = Kravatte(key, workers=workers, mp_input=mp_input, mp_output=mp_output)
# Re-Generate Key Stream
kravatte_siv_unwrap.collect_message(metadata)
kravatte_siv_unwrap.collect_message(siv_tag)
kravatte_siv_unwrap.generate_digest(len(ciphertext))
siv_plaintext = bytes([p_text ^ key_stream for p_text, key_stream in zip(ciphertext, kravatte_siv_unwrap.digest)])
# Re-Generate Tag From Metadata and Recovered Plaintext
kravatte_siv_unwrap.collect_message(metadata)
kravatte_siv_unwrap.collect_message(siv_plaintext)
kravatte_siv_unwrap.generate_digest(len(siv_tag))
generated_tag = kravatte_siv_unwrap.digest
# Check if tag matches provided tag matches reconstituted tag
valid_tag = kravatte_siv_unwrap.compare_bytes(siv_tag, generated_tag)
kravatte_siv_unwrap.scrub()
return siv_plaintext, valid_tag
class KravatteSAE(Kravatte):
"""
An authenticated encryption mode designed to track a session consisting of a series of messages
and an initialization nonce. ** DEPRECATED in favor of KravatteSANE **
"""
TAG_SIZE = 16
OFFSET = TAG_SIZE
def __init__(self, nonce: bytes, key: bytes=b'', workers: int=None, mp_input: bool=True,
mp_output: bool=True):
"""
Initialize KravatteSAE with user key and nonce
Args:
nonce (bytes) - random unique value to initialize the session with
key (bytes) - secret key for encrypting session messages
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
"""
super(KravatteSAE, self).__init__(key, workers, mp_input, mp_output)
self.initialize_history(nonce)
def initialize_history(self, nonce: bytes) -> None:
"""
Initialize session history by storing Keccak collector state and current internal key
Args:
key (bytes): user provided bytes to be padded (if necessary) and computed into Kravatte base key
"""
self.collect_message(nonce)
self.history_collector = np.copy(self.collector)
self.history_key = np.copy(self.roll_key)
self.generate_digest(self.TAG_SIZE)
self.tag = self.digest.copy()
def wrap(self, plaintext: bytes, metadata: bytes) -> KravatteTagOutput:
"""
Encrypt an arbitrary plaintext message using the included metadata as part of an on-going
session. Creates authentication tag for validation during decryption.
Args:
plaintext (bytes): user plaintext of arbitrary length
metadata (bytes): associated data to ensure a unique encryption permutation
Returns:
(bytes, bytes): encrypted cipher text and authentication tag
"""
# Restore Kravatte State to When Latest History was Absorbed
self.collector = np.copy(self.history_collector)
self.roll_key = np.copy(self.history_key)
self.digest = bytearray(b'')
self.digest_active = False
# Generate/Apply Key Stream
self.generate_digest(len(plaintext) + self.OFFSET)
ciphertext = bytes([p_text ^ key_stream for p_text, key_stream in zip(plaintext, self.digest[self.OFFSET:])])
# Update History
if len(metadata) > 0 or len(plaintext) == 0:
self._append_to_history(metadata, 0)
if len(plaintext) > 0:
self._append_to_history(ciphertext, 1)
self.history_collector = np.copy(self.collector)
self.history_key = np.copy(self.roll_key)
# Generate Tag
self.generate_digest(self.TAG_SIZE)
return ciphertext, self.digest
def unwrap(self, ciphertext: bytes, metadata: bytes, validation_tag: bytes) -> KravatteValidatedOutput:
"""
Decrypt an arbitrary ciphertext message using the included metadata as part of an on-going
session. Creates authentication tag for validation during decryption.
Args:
ciphertext (bytes): user ciphertext of arbitrary length
metadata (bytes): associated data from encryption
validation_tag (bytes): collection of bytes that authenticates the decrypted plaintext as
being encrypted with the same secret key
Returns:
(bytes, bool): decrypted plaintext and boolean indicating in decryption was authenticated against secret key
"""
# Restore Kravatte State to When Latest History was Absorbed
self.collector = np.copy(self.history_collector)
self.roll_key = np.copy(self.history_key)
self.digest = bytearray(b'')
self.digest_active = False
# Generate/Apply Key Stream
self.generate_digest(len(ciphertext) + self.OFFSET)
plaintext = bytes([p_text ^ key_stream for p_text, key_stream in zip(ciphertext, self.digest[self.OFFSET:])])
# Update History
if len(metadata) > 0 or len(ciphertext) == 0:
self._append_to_history(metadata, 0)
if len(ciphertext) > 0:
self._append_to_history(ciphertext, 1)
self.history_collector = np.copy(self.collector)
self.history_key = np.copy(self.roll_key)
# Generate Tag
self.generate_digest(self.TAG_SIZE)
# Store Generated Tag and Validate
self.tag = self.digest.copy()
valid_tag = self.compare_bytes(self.tag, validation_tag)
return plaintext, valid_tag
def _append_to_history(self, message: bytes, pad_bit: int) -> None:
"""
Update history collector state with provided message.
Args:
message (bytes): arbitrary number of bytes to be padded into Keccak blocks and absorbed into the collector
pad_bit (int): Either 1 or 0 to append to the end of the regular message before padding
"""
if self.digest_active:
self.collector = np.copy(self.history_collector)
self.roll_key = np.copy(self.history_key)
self.digest = bytearray(b'')
self.digest_active = False
self.roll_key = self._kravatte_roll_compress(self.roll_key)
# Pad Message with a single bit and then
start_len = len(message)
padded_len = start_len + (self.KECCAK_BYTES - (start_len % self.KECCAK_BYTES))
padded_bytes = self._pad_10_append(message, padded_len, pad_bit, 1)
absorb_steps = len(padded_bytes) // self.KECCAK_BYTES
# Absorb into Collector
for msg_block in range(absorb_steps):
m = np.frombuffer(padded_bytes, dtype=np.uint64, count=25, offset=msg_block * self.KECCAK_BYTES).reshape([5, 5], order='F')
m_k = m ^ self.roll_key
self.roll_key = self._kravatte_roll_compress(self.roll_key)
self.collector = self.collector ^ self._keccak(m_k)
class KravatteSANE(Kravatte):
"""
An authenticated encryption mode designed to track a session consisting of a series of messages,
metadata, and an initialization nonce. A replacement for KravatteSAE
"""
TAG_SIZE = 16
OFFSET = TAG_SIZE
"""
An authenticated encryption mode designed to track a session consisting of a series of messages
and an initialization nonce. A replacement for KravatteSAE
"""
def __init__(self, nonce: bytes, key: bytes=b'', workers: int=None, mp_input: bool=True,
mp_output: bool=True):
"""
Initialize KravatteSANE with user key and nonce
Args:
nonce (bytes) - random unique value to initialize the session with
key (bytes) - secret key for encrypting session messages
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
"""
super(KravatteSANE, self).__init__(key, workers, mp_input, mp_output)
self.initialize_history(nonce, False)
def initialize_history(self, nonce: bytes, reinitialize: bool=True) -> None:
"""
Initialize session history. Session history is stored pre-compressed within the Keccak collector
and current matching internal key state. Kravatte-SANE session history starts with the user
provided nonce.
Args:
nonce (bytes): user provided bytes to initialize the session history
reinitialize (bool): perform a full reset of the Keccak state when manually restarting the history log
"""
if reinitialize:
self.reset_state()
self.collect_message(nonce)
self.history_collector = np.copy(self.collector)
self.history_key = np.copy(self.roll_key)
self.generate_digest(self.TAG_SIZE)
self.tag = self.digest.copy()
self.e_attr = 0
def wrap(self, plaintext: bytes, metadata: bytes) -> KravatteTagOutput:
"""
Encrypt an arbitrary plaintext message using the included metadata as part of an on-going
session. Creates authentication tag for validation during decryption.
Args:
plaintext (bytes): user plaintext of arbitrary length
metadata (bytes): associated data to ensure a unique encryption permutation
Returns:
(bytes, bytes): encrypted cipher text and authentication tag
"""
# Restore Kravatte State to When Latest History was Absorbed
self.collector = np.copy(self.history_collector)
self.roll_key = np.copy(self.history_key)
self.digest = bytearray(b'')
self.digest_active = False
# Generate/Apply Key Stream
self.generate_digest(len(plaintext) + self.OFFSET)
ciphertext = bytes([p_text ^ key_stream for p_text, key_stream in zip(plaintext, self.digest[self.OFFSET:])])
# Restore/Update History States if required
self._restore_history()
if len(metadata) > 0 or len(plaintext) == 0:
self._append_to_history(metadata, (self.e_attr << 1) | 0, 2)
if len(plaintext) > 0:
self._append_to_history(ciphertext, (self.e_attr << 1) | 1, 2)
# Increment e toggler attribute
self.e_attr ^= 1
# Generate Tag
self.generate_digest(self.TAG_SIZE)
return ciphertext, self.digest
def unwrap(self, ciphertext: bytes, metadata: bytes, validation_tag: bytes) -> KravatteValidatedOutput:
"""
Decrypt an arbitrary ciphertext message using the included metadata as part of an on-going
session. Validates decryption based on the provided authentication tag.
Args:
ciphertext (bytes): user ciphertext of arbitrary length
metadata (bytes): associated data from encryption
validation_tag (bytes): collection of bytes that authenticates the decrypted plaintext as
being encrypted with the same secret key
Returns:
(bytes, bool): decrypted plaintext and boolean indicating in decryption was authenticated against secret key
"""
# Restore Kravatte State to When Latest History was Absorbed
self.collector = np.copy(self.history_collector)
self.roll_key = np.copy(self.history_key)
self.digest = bytearray(b'')
self.digest_active = False
# Generate/Apply Key Stream
self.generate_digest(len(ciphertext) + self.OFFSET)
plaintext = bytes([p_text ^ key_stream for p_text, key_stream in zip(ciphertext, self.digest[self.OFFSET:])])
# Restore/Update History States if required
self._restore_history()
if len(metadata) > 0 or len(ciphertext) == 0:
self._append_to_history(metadata, (self.e_attr << 1) | 0, 2)
if len(ciphertext) > 0:
self._append_to_history(ciphertext, (self.e_attr << 1) | 1, 2)
# Increment e toggler attribute
self.e_attr ^= 1
# Generate Tag
self.generate_digest(self.TAG_SIZE)
# Store Generated Tag and Validate
self.tag = self.digest.copy()
valid_tag = self.compare_bytes(self.tag, validation_tag)
return plaintext, valid_tag
def _append_to_history(self, message: bytes, pad_bits: int, pad_size: int) -> None:
"""
Update history collector state with provided message.
Args:
message (bytes): arbitrary number of bytes to be padded into Keccak blocks and absorbed into the collector
pad_bits (int): Up to 6 additional bits added to the end of the regular message before padding
pad_size (int): Number of bits to append
"""
self.collect_message(message, pad_bits, pad_size)
self.history_collector = np.copy(self.collector)
self.history_key = np.copy(self.roll_key)
def _restore_history(self) -> None:
"""
Restore the internal kravatte state to the previously saved history state
Args:
None
"""
self.collector = np.copy(self.history_collector)
self.roll_key = np.copy(self.history_key)
self.digest = bytearray(b'')
self.digest_active = False
class KravatteSANSE(Kravatte):
"""
A nonce-less authenticated encryption mode designed to track a session consisting of a series of
messages and metadata. A replacement for Kravatte-SIV
"""
TAG_SIZE = 32
def __init__(self, key: bytes=b'', workers: int=None, mp_input: bool=True, mp_output: bool=True):
"""
Initialize KravatteSANSE with user key
Args:
key (bytes) - secret key for encrypting/decrypting session messages
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
"""
super(KravatteSANSE, self).__init__(key, workers, mp_input, mp_output)
self.initialize_history(False)
def initialize_history(self, reinitialize: bool=True) -> None:
"""
Initialize session history. Session history is stored pre-compressed within the Keccak collector
and current matching internal key state. Kravatte-SANSE session history starts empty.
Args:
reinitialize (bool): perform a full reset of the Keccak state when manually restarting the history log
"""
if reinitialize:
self.reset_state()
self.history_collector = np.copy(self.collector)
self.history_key = np.copy(self.roll_key)
self.history_collector_state = np.copy(self.new_collector)
self.e_attr = 0
def wrap(self, plaintext: bytes, metadata: bytes) -> KravatteTagOutput:
"""
Encrypt an arbitrary plaintext message using the included metadata as part of an on-going
session. Creates authentication tag for validation during decryption.
Args:
plaintext (bytes): user plaintext of arbitrary length
metadata (bytes): associated data to ensure a unique encryption permutation
Returns:
(bytes, bytes): encrypted cipher text and authentication tag
"""
# Restore Kravatte State to When Latest History was Absorbed
self._restore_history()
# Update History
if len(metadata) > 0 or len(plaintext) == 0:
self._append_to_history(metadata, (self.e_attr << 1) | 0, 2)
if len(plaintext) > 0:
# Generate Tag
self.collect_message(plaintext, (self.e_attr << 2) | 0b10, 3)
self.generate_digest(self.TAG_SIZE)
tag = self.digest
# Reset History State and Generate/Apply Key Stream
self._restore_history()
self.collect_message(tag, ((self.e_attr << 2) | 0b11), 3)
self.generate_digest(len(plaintext))
ciphertext = bytes([p_text ^ key_stream for p_text, key_stream in zip(plaintext, self.digest)])
# Reset History State and Update it with Plaintext and Padding
self._restore_history()
self._append_to_history(plaintext, (self.e_attr << 2) | 0b10, 3)
else:
ciphertext = b''
self.generate_digest(self.TAG_SIZE)
tag = self.digest
self.e_attr ^= 1
return ciphertext, tag
def unwrap(self, ciphertext: bytes, metadata: bytes, validation_tag: bytes) -> KravatteValidatedOutput:
"""
Decrypt an arbitrary ciphertext message using the included metadata as part of an on-going
session. Validates decryption based on the provided authentication tag.
Args:
ciphertext (bytes): user ciphertext of arbitrary length
metadata (bytes): associated data from encryption
validation_tag (bytes): collection of bytes that authenticates the decrypted plaintext as
being encrypted with the same secret key
Returns:
(bytes, bool): decrypted plaintext and boolean indicating in decryption was authenticated against secret key
"""
# Restore Kravatte State to When Latest History was Absorbed
self._restore_history()
if len(metadata) > 0 or len(ciphertext) == 0:
self._append_to_history(metadata, (self.e_attr << 1) | 0, 2)
if len(ciphertext) > 0:
self.collect_message(validation_tag, ((self.e_attr << 2) | 0b11), 3)
self.generate_digest(len(ciphertext))
plaintext = bytes([p_text ^ key_stream for p_text, key_stream in zip(ciphertext, self.digest)])
# Update History
self._restore_history()
self._append_to_history(plaintext, (self.e_attr << 2) | 0b10, 3)
else:
plaintext = b''
# Generate Tag
self.generate_digest(self.TAG_SIZE)
self.e_attr ^= 1
# Store Generated Tag and Validate
self.tag = self.digest.copy()
valid_tag = self.compare_bytes(self.tag, validation_tag)
return plaintext, valid_tag
def _append_to_history(self, message: bytes, pad_bits: int, pad_size: int) -> None:
"""
Update history collector state with provided message. Save the new history state.
Args:
message (bytes): arbitrary number of bytes to be padded into Keccak blocks and absorbed into the collector
pad_bits (int): Up to 6 additional bits added to the end of the regular message before padding
pad_size (int): Number of bits to append
"""
self.collect_message(message, pad_bits, pad_size)
self.history_collector = np.copy(self.collector)
self.history_key = np.copy(self.roll_key)
self.history_collector_state = np.copy(self.new_collector)
def _restore_history(self) -> None:
"""
Restore the internal kravatte state to the previously saved history state
Args:
None
"""
self.collector = np.copy(self.history_collector)
self.roll_key = np.copy(self.history_key)
self.new_collector = np.copy(self.history_collector_state)
self.digest = bytearray(b'')
self.digest_active = False
class KravatteWBC(Kravatte):
""" Configurable Wide Block Cipher encryption mode with customization tweak """
SPLIT_THRESHOLD = 398
def __init__(self, block_cipher_size: int, tweak: bytes=b'', key: bytes=b'', workers: int=None,
mp_input: bool=True, mp_output: bool=True):
"""
Initialize KravatteWBC object
Inputs:
block_cipher_size (int) - size of block cipher in bytes
tweak (bytes) - arbitrary value to customize cipher output
key (bytes) - secret key for encrypting message blocks
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
"""
super(KravatteWBC, self).__init__(key, workers, mp_input, mp_output)
self.split_bytes(block_cipher_size)
self.tweak = tweak
def split_bytes(self, message_size_bytes: int) -> None:
"""
Calculates the size (in bytes) of the "left" and "right" components of the block encryption
decryption process. Based on algorithm given in Farfalle spec.
Input
message_size_bytes (int): user defined block size for this instance of KravatteWBC
"""
if message_size_bytes <= self.SPLIT_THRESHOLD:
nL = ceil(message_size_bytes / 2)
else:
q = floor(((message_size_bytes + 1) / self.KECCAK_BYTES)) + 1
x = floor(log2(q - 1))
nL = ((q - (2**x)) * self.KECCAK_BYTES) - 1
self.size_L = nL
self.size_R = message_size_bytes - nL
def encrypt(self, message: bytes) -> bytes:
"""
Encrypt a user message using KravatteWBC mode
Inputs:
message (bytes): plaintext message to encrypt. Length should be <= the block cipher size
defined in the KravatteWBC object
Returns:
bytes: encrypted block same length as message
"""
L = message[0:self.size_L]
R = message[self.size_L:]
# R0 ← R0 + HK(L||0), with R0 the first min(b, |R|) bits of R
self.collect_message(L, append_bits=0b0, append_bit_count=1)
self.generate_digest(min(self.KECCAK_BYTES, self.size_R), short_kravatte=True)
extended_digest = self.digest + ((self.size_R - len(self.digest)) * b'\x00')
R = bytes([p_text ^ key_stream for p_text, key_stream in zip(R, extended_digest)])
# L ← L + GK (R||1 ◦ W)
self.collect_message(self.tweak)
self.collect_message(R, append_bits=0b1, append_bit_count=1)
self.generate_digest(self.size_L)
L = bytes([p_text ^ key_stream for p_text, key_stream in zip(L, self.digest)])
# R ← R + GK (L||0 ◦ W)
self.collect_message(self.tweak)
self.collect_message(L, append_bits=0b0, append_bit_count=1)
self.generate_digest(self.size_R)
R = bytes([p_text ^ key_stream for p_text, key_stream in zip(R, self.digest)])
# L0 ← L0 + HK(R||1), with L0 the first min(b, |L|) bits of L
self.collect_message(R, append_bits=0b1, append_bit_count=1)
self.generate_digest(min(self.KECCAK_BYTES, self.size_L), short_kravatte=True)
extended_digest = self.digest + ((self.size_L - len(self.digest)) * b'\x00')
L = bytes([p_text ^ key_stream for p_text, key_stream in zip(L, extended_digest)])
# C ← the concatenation of L and R
return L + R
def decrypt(self, ciphertext: bytes) -> bytes:
"""
Decrypt a user message using KravatteWBC mode
Args:
message (bytes): ciphertext message to decrypt.
Returns:
bytes: decrypted block same length as ciphertext
"""
L = ciphertext[0:self.size_L]
R = ciphertext[self.size_L:]
# L0 ← L0 + HK(R||1), with L0 the first min(b, |L|) bits of L
self.collect_message(R, append_bits=0b1, append_bit_count=1)
self.generate_digest(min(self.KECCAK_BYTES, self.size_L), short_kravatte=True)
extended_digest = self.digest + ((self.size_L - len(self.digest)) * b'\x00')
L = bytes([c_text ^ key_stream for c_text, key_stream in zip(L, extended_digest)])
# R ← R + GK (L||0 ◦ W)
self.collect_message(self.tweak)
self.collect_message(L, append_bits=0b0, append_bit_count=1)
self.generate_digest(self.size_R)
R = bytes([c_text ^ key_stream for c_text, key_stream in zip(R, self.digest)])
# L ← L + GK (R||1 ◦ W)
self.collect_message(self.tweak)
self.collect_message(R, append_bits=0b1, append_bit_count=1)
self.generate_digest(self.size_L)
L = bytes([c_text ^ key_stream for c_text, key_stream in zip(L, self.digest)])
# R0 ← R0 + HK(L||0), with R0 the first min(b, |R|) bits of R
self.collect_message(L, append_bits=0b0, append_bit_count=1)
self.generate_digest(min(self.KECCAK_BYTES, self.size_R), short_kravatte=True)
extended_digest = self.digest + ((self.size_R - len(self.digest)) * b'\x00')
R = bytes([c_text ^ key_stream for c_text, key_stream in zip(R, extended_digest)])
# P ← the concatenation of L and R
return L + R
class KravatteWBC_AE(KravatteWBC):
""" Authentication with associated metadata version Kravatte Wide Block Cipher encryption mode """
WBC_AE_TAG_LEN = 16
def __init__(self, block_cipher_size: int, key: bytes=b'', workers: int=None,
mp_input: bool=True, mp_output: bool=True):
"""
Initialize KravatteWBC_AE object
Args:
block_cipher_size (int) - size of block cipher in bytes
key (bytes) - secret key for encrypting message blocks
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
"""
super(KravatteWBC_AE, self).__init__(block_cipher_size + self.WBC_AE_TAG_LEN, b'', key=key,
workers=workers, mp_input=mp_input,
mp_output=mp_output)
def wrap(self, message: bytes, metadata: bytes) -> bytes:
"""
Encrypt a user message and generate included authenticated data. Requires metadata input
in lieu of customization tweak.
Args:
message (bytes): User message same length as configured object block size
metadata (bytes): associated metadata to ensure unique output
Returns:
bytes: authenticated encrypted block
"""
self.tweak = metadata # metadata treated as tweak
padded_message = message + (self.WBC_AE_TAG_LEN * b'\x00')
return self.encrypt(padded_message)
def unwrap(self, ciphertext: bytes, metadata: bytes) -> KravatteValidatedOutput:
"""
Decrypt a ciphertext block and validate included authenticated data. Requires metadata input
in lieu of customization tweak.
Args:
message (bytes): ciphertext same length as configured object block size
metadata (bytes): associated metadata to ensure unique output
Returns:
(bytes, bool): plaintext byes and decryption valid flag
"""
L = ciphertext[0:self.size_L]
R = ciphertext[self.size_L:]
self.tweak = metadata
# L0 ← L0 + HK(R||1), with L0 the first min(b, |L|) bits of L
self.collect_message(R, append_bits=0b1, append_bit_count=1)
self.generate_digest(min(self.KECCAK_BYTES, self.size_L), short_kravatte=True)
extended_digest = self.digest + ((self.size_L - len(self.digest)) * b'\x00')
L = bytes([c_text ^ key_stream for c_text, key_stream in zip(L, extended_digest)])
# R ← R + GK (L||0 ◦ A)
self.collect_message(self.tweak)
self.collect_message(L, append_bits=0b0, append_bit_count=1)
self.generate_digest(self.size_R)
R = bytes([c_text ^ key_stream for c_text, key_stream in zip(R, self.digest)])
# |R| ≥ b+t
if self.size_R >= self.KECCAK_BYTES + self.WBC_AE_TAG_LEN:
# if the last t bytes of R ̸= 0t then return error!
valid_plaintext = True if R[-self.WBC_AE_TAG_LEN:] == (self.WBC_AE_TAG_LEN * b'\x00') else False
# L ← L + GK (R||1 ◦ A)
self.collect_message(self.tweak)
self.collect_message(R, append_bits=0b1, append_bit_count=1)
self.generate_digest(self.size_L)
L = bytes([c_text ^ key_stream for c_text, key_stream in zip(L, self.digest)])
# R0 ← R0 + HK(L||0), with R0 the first b bytes of R
self.collect_message(L, append_bits=0b0, append_bit_count=1)
self.generate_digest(self.KECCAK_BYTES, short_kravatte=True)
extended_digest = self.digest + ((self.size_R - len(self.digest)) * b'\x00')
R = bytes([c_text ^ key_stream for c_text, key_stream in zip(R, extended_digest)])
else:
# L ← L + GK (R||1 ◦ A)
self.collect_message(self.tweak)
self.collect_message(R, append_bits=0b1, append_bit_count=1)
self.generate_digest(self.size_L)
L = bytes([c_text ^ key_stream for c_text, key_stream in zip(L, self.digest)])
# R0 ← R0 + HK(L||0), with R0 the first min(b, |R|) bytes of R
self.collect_message(L, append_bits=0b0, append_bit_count=1)
self.generate_digest(min(self.KECCAK_BYTES, self.size_R), short_kravatte=True)
extended_digest = self.digest + ((self.size_R - len(self.digest)) * b'\x00')
R = bytes([c_text ^ key_stream for c_text, key_stream in zip(R, extended_digest)])
# if the last t bytes of L||R ̸= 0t then return error!
valid_plaintext = True if (L + R)[-self.WBC_AE_TAG_LEN:] == (self.WBC_AE_TAG_LEN * b'\x00') else False
# P′ ← L||R
return (L + R)[:-self.WBC_AE_TAG_LEN], valid_plaintext
class KravatteOracle(Kravatte):
"""Pseudo-random byte stream generator. Accepts an authentication key and arbitrary sized seed
input. Once initialized, the random method can return an arbitrary amount of random output bytes
for each call. Generator collector state can be reinitialized at anytime with the seed_generator
method
"""
def __init__(self, seed: bytes=b'', key: bytes=b'', workers: int=None, mp_input: bool=True,
mp_output: bool=True):
"""
Initialize KravatteOracle with user key and seed.
Inputs:
seed (bytes) - random unique value to initialize the oracle object with
key (bytes) - secret key for authenticating generator
workers (int): parallel processes to use in compression/expansion operations
mp_input (bool): Enable multi-processing for calculations on input data
mp_output (bool): Enable multi-processing for calculations on output data
"""
super(KravatteOracle, self).__init__(key, workers, mp_input, mp_input)
self.seed_generator(seed)
def seed_generator(self, seed: bytes):
"""
Re-seed Kravatte collector state with new seed data.
Input:
seed (bytes): Collection of seed bytes that are absorbed as single message
"""
self.collect_message(seed)
def random(self, output_size: int) -> bytearray:
"""
Generates a stream of pseudo-random bytes from the current state of the Kravatte collector
state
Input:
output_size (bytes): Number of bytes to return
Returns:
bytearray: Pseudo-random Kravatte squeezed collector output
"""
self.generate_digest(output_size)
return self.digest
if __name__ == "__main__":
from time import perf_counter
import hashlib
from binascii import hexlify
import os
my_key = b'\xFF' * 32
my_message = bytes([x % 256 for x in range(4 * 1024 * 1024)])
print("Normal Message MAC Generation")
start = perf_counter()
my_kra = mac(my_key, my_message, 1024 * 1024 * 4)
stop = perf_counter()
print("Process Time:", stop - start)
a1 = hashlib.md5()
a1.update(my_kra)
print(hexlify(a1.digest()))
print("%d Process/Core Message MAC Generation" % os.cpu_count())
start = perf_counter()
my_kra = mac(my_key, my_message, 1024 * 1024 * 4, workers=os.cpu_count())
stop = perf_counter()
print("Process Time:", stop - start)
a2 = hashlib.md5()
a2.update(my_kra)
print(hexlify(a2.digest()))
assert a1.digest() == a2.digest()
|
[
"numpy.copy",
"math.ceil",
"hashlib.md5",
"numpy.bitwise_xor.reduce",
"math.floor",
"math.log2",
"time.perf_counter",
"numpy.array",
"numpy.zeros",
"numpy.uint64",
"multiprocessing.Pool",
"os.cpu_count",
"numpy.frombuffer",
"ctypes.memset"
] |
[((870, 1005), 'numpy.array', 'np.array', (['[32778, 9223372039002259466, 9223372039002292353, 9223372036854808704, \n 2147483649, 9223372039002292232]'], {'dtype': 'np.uint64'}), '([32778, 9223372039002259466, 9223372039002292353, \n 9223372036854808704, 2147483649, 9223372039002292232], dtype=np.uint64)\n', (878, 1005), True, 'import numpy as np\n'), ((1177, 1312), 'numpy.array', 'np.array', (['[[0, 36, 3, 41, 18], [1, 44, 10, 45, 2], [62, 6, 43, 15, 61], [28, 55, 25, \n 21, 56], [27, 20, 39, 8, 14]]'], {'dtype': 'np.uint64'}), '([[0, 36, 3, 41, 18], [1, 44, 10, 45, 2], [62, 6, 43, 15, 61], [28,\n 55, 25, 21, 56], [27, 20, 39, 8, 14]], dtype=np.uint64)\n', (1185, 1312), True, 'import numpy as np\n'), ((1573, 1673), 'numpy.array', 'np.array', (['[[0, 3, 1, 4, 2], [1, 4, 2, 0, 3], [2, 0, 3, 1, 4], [3, 1, 4, 2, 0], [4, 2,\n 0, 3, 1]]'], {}), '([[0, 3, 1, 4, 2], [1, 4, 2, 0, 3], [2, 0, 3, 1, 4], [3, 1, 4, 2, 0\n ], [4, 2, 0, 3, 1]])\n', (1581, 1673), True, 'import numpy as np\n'), ((1861, 1961), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4,\n 4, 4, 4]]'], {}), '([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3\n ], [4, 4, 4, 4, 4]])\n', (1869, 1961), True, 'import numpy as np\n'), ((2167, 2267), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 1], [1, 1, 1, 1, 2], [2, 2, 2, 2, 3], [3, 3, 3, 3, 4], [4, 4,\n 4, 4, 0]]'], {}), '([[0, 0, 0, 0, 1], [1, 1, 1, 1, 2], [2, 2, 2, 2, 3], [3, 3, 3, 3, 4\n ], [4, 4, 4, 4, 0]])\n', (2175, 2267), True, 'import numpy as np\n'), ((2491, 2591), 'numpy.array', 'np.array', (['[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1,\n 2, 3, 4]]'], {}), '([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4\n ], [0, 1, 2, 3, 4]])\n', (2499, 2591), True, 'import numpy as np\n'), ((2825, 2925), 'numpy.array', 'np.array', (['[[0, 0, 0, 1, 1], [1, 1, 1, 2, 2], [2, 2, 2, 3, 3], [3, 3, 3, 4, 4], [4, 4,\n 4, 0, 0]]'], {}), '([[0, 0, 0, 1, 1], [1, 1, 1, 2, 2], [2, 2, 2, 3, 3], [3, 3, 3, 4, 4\n ], [4, 4, 4, 0, 0]])\n', (2833, 2925), True, 'import numpy as np\n'), ((3137, 3237), 'numpy.array', 'np.array', (['[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1,\n 2, 4, 4]]'], {}), '([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4\n ], [0, 1, 2, 4, 4]])\n', (3145, 3237), True, 'import numpy as np\n'), ((54652, 54666), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (54664, 54666), False, 'from time import perf_counter\n'), ((54732, 54746), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (54744, 54746), False, 'from time import perf_counter\n'), ((54797, 54810), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (54808, 54810), False, 'import hashlib\n'), ((54947, 54961), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (54959, 54961), False, 'from time import perf_counter\n'), ((55051, 55065), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (55063, 55065), False, 'from time import perf_counter\n'), ((55116, 55129), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (55127, 55129), False, 'import hashlib\n'), ((5558, 5579), 'numpy.copy', 'np.copy', (['self.kra_key'], {}), '(self.kra_key)\n', (5565, 5579), True, 'import numpy as np\n'), ((5605, 5638), 'numpy.zeros', 'np.zeros', (['[5, 5]'], {'dtype': 'np.uint64'}), '([5, 5], dtype=np.uint64)\n', (5613, 5638), True, 'import numpy as np\n'), ((12299, 12319), 'numpy.copy', 'np.copy', (['input_array'], {}), '(input_array)\n', (12306, 12319), True, 'import numpy as np\n'), ((13771, 13791), 'numpy.copy', 'np.copy', (['input_array'], {}), '(input_array)\n', (13778, 13791), True, 'import numpy as np\n'), ((15407, 15455), 'ctypes.memset', 'memset', (['collector_location', '(0)', 'self.KECCAK_BYTES'], {}), '(collector_location, 0, self.KECCAK_BYTES)\n', (15413, 15455), False, 'from ctypes import memset\n'), ((15556, 15598), 'ctypes.memset', 'memset', (['key_location', '(0)', 'self.KECCAK_BYTES'], {}), '(key_location, 0, self.KECCAK_BYTES)\n', (15562, 15598), False, 'from ctypes import memset\n'), ((15703, 15745), 'ctypes.memset', 'memset', (['key_location', '(0)', 'self.KECCAK_BYTES'], {}), '(key_location, 0, self.KECCAK_BYTES)\n', (15709, 15745), False, 'from ctypes import memset\n'), ((25002, 25025), 'numpy.copy', 'np.copy', (['self.collector'], {}), '(self.collector)\n', (25009, 25025), True, 'import numpy as np\n'), ((25053, 25075), 'numpy.copy', 'np.copy', (['self.roll_key'], {}), '(self.roll_key)\n', (25060, 25075), True, 'import numpy as np\n'), ((25789, 25820), 'numpy.copy', 'np.copy', (['self.history_collector'], {}), '(self.history_collector)\n', (25796, 25820), True, 'import numpy as np\n'), ((25845, 25870), 'numpy.copy', 'np.copy', (['self.history_key'], {}), '(self.history_key)\n', (25852, 25870), True, 'import numpy as np\n'), ((26402, 26425), 'numpy.copy', 'np.copy', (['self.collector'], {}), '(self.collector)\n', (26409, 26425), True, 'import numpy as np\n'), ((26453, 26475), 'numpy.copy', 'np.copy', (['self.roll_key'], {}), '(self.roll_key)\n', (26460, 26475), True, 'import numpy as np\n'), ((27451, 27482), 'numpy.copy', 'np.copy', (['self.history_collector'], {}), '(self.history_collector)\n', (27458, 27482), True, 'import numpy as np\n'), ((27507, 27532), 'numpy.copy', 'np.copy', (['self.history_key'], {}), '(self.history_key)\n', (27514, 27532), True, 'import numpy as np\n'), ((28067, 28090), 'numpy.copy', 'np.copy', (['self.collector'], {}), '(self.collector)\n', (28074, 28090), True, 'import numpy as np\n'), ((28118, 28140), 'numpy.copy', 'np.copy', (['self.roll_key'], {}), '(self.roll_key)\n', (28125, 28140), True, 'import numpy as np\n'), ((31648, 31671), 'numpy.copy', 'np.copy', (['self.collector'], {}), '(self.collector)\n', (31655, 31671), True, 'import numpy as np\n'), ((31699, 31721), 'numpy.copy', 'np.copy', (['self.roll_key'], {}), '(self.roll_key)\n', (31706, 31721), True, 'import numpy as np\n'), ((32459, 32490), 'numpy.copy', 'np.copy', (['self.history_collector'], {}), '(self.history_collector)\n', (32466, 32490), True, 'import numpy as np\n'), ((32515, 32540), 'numpy.copy', 'np.copy', (['self.history_key'], {}), '(self.history_key)\n', (32522, 32540), True, 'import numpy as np\n'), ((34187, 34218), 'numpy.copy', 'np.copy', (['self.history_collector'], {}), '(self.history_collector)\n', (34194, 34218), True, 'import numpy as np\n'), ((34243, 34268), 'numpy.copy', 'np.copy', (['self.history_key'], {}), '(self.history_key)\n', (34250, 34268), True, 'import numpy as np\n'), ((35753, 35776), 'numpy.copy', 'np.copy', (['self.collector'], {}), '(self.collector)\n', (35760, 35776), True, 'import numpy as np\n'), ((35804, 35826), 'numpy.copy', 'np.copy', (['self.roll_key'], {}), '(self.roll_key)\n', (35811, 35826), True, 'import numpy as np\n'), ((36031, 36062), 'numpy.copy', 'np.copy', (['self.history_collector'], {}), '(self.history_collector)\n', (36038, 36062), True, 'import numpy as np\n'), ((36087, 36112), 'numpy.copy', 'np.copy', (['self.history_key'], {}), '(self.history_key)\n', (36094, 36112), True, 'import numpy as np\n'), ((37567, 37590), 'numpy.copy', 'np.copy', (['self.collector'], {}), '(self.collector)\n', (37574, 37590), True, 'import numpy as np\n'), ((37618, 37640), 'numpy.copy', 'np.copy', (['self.roll_key'], {}), '(self.roll_key)\n', (37625, 37640), True, 'import numpy as np\n'), ((37680, 37707), 'numpy.copy', 'np.copy', (['self.new_collector'], {}), '(self.new_collector)\n', (37687, 37707), True, 'import numpy as np\n'), ((41753, 41776), 'numpy.copy', 'np.copy', (['self.collector'], {}), '(self.collector)\n', (41760, 41776), True, 'import numpy as np\n'), ((41804, 41826), 'numpy.copy', 'np.copy', (['self.roll_key'], {}), '(self.roll_key)\n', (41811, 41826), True, 'import numpy as np\n'), ((41866, 41893), 'numpy.copy', 'np.copy', (['self.new_collector'], {}), '(self.new_collector)\n', (41873, 41893), True, 'import numpy as np\n'), ((42098, 42129), 'numpy.copy', 'np.copy', (['self.history_collector'], {}), '(self.history_collector)\n', (42105, 42129), True, 'import numpy as np\n'), ((42154, 42179), 'numpy.copy', 'np.copy', (['self.history_key'], {}), '(self.history_key)\n', (42161, 42179), True, 'import numpy as np\n'), ((42209, 42246), 'numpy.copy', 'np.copy', (['self.history_collector_state'], {}), '(self.history_collector_state)\n', (42216, 42246), True, 'import numpy as np\n'), ((8874, 8902), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.workers'}), '(processes=self.workers)\n', (8878, 8902), False, 'from multiprocessing import Pool\n'), ((11644, 11672), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'self.workers'}), '(processes=self.workers)\n', (11648, 11672), False, 'from multiprocessing import Pool\n'), ((28846, 28877), 'numpy.copy', 'np.copy', (['self.history_collector'], {}), '(self.history_collector)\n', (28853, 28877), True, 'import numpy as np\n'), ((28906, 28931), 'numpy.copy', 'np.copy', (['self.history_key'], {}), '(self.history_key)\n', (28913, 28931), True, 'import numpy as np\n'), ((43752, 43780), 'math.ceil', 'ceil', (['(message_size_bytes / 2)'], {}), '(message_size_bytes / 2)\n', (43756, 43780), False, 'from math import floor, ceil, log2\n'), ((54919, 54933), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (54931, 54933), False, 'import os\n'), ((55024, 55038), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (55036, 55038), False, 'import os\n'), ((4318, 4329), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (4327, 4329), False, 'from os import cpu_count\n'), ((4869, 4943), 'numpy.frombuffer', 'np.frombuffer', (['key_pad'], {'dtype': 'np.uint64', 'count': 'self.KECCAK_LANES', 'offset': '(0)'}), '(key_pad, dtype=np.uint64, count=self.KECCAK_LANES, offset=0)\n', (4882, 4943), True, 'import numpy as np\n'), ((12531, 12601), 'numpy.bitwise_xor.reduce', 'np.bitwise_xor.reduce', (['state[self.THETA_REORDER[0],]', '(1)'], {'keepdims': '(True)'}), '(state[self.THETA_REORDER[0],], 1, keepdims=True)\n', (12552, 12601), True, 'import numpy as np\n'), ((12605, 12681), 'numpy.bitwise_xor.reduce', 'np.bitwise_xor.reduce', (['array_shift[self.THETA_REORDER[1],]', '(1)'], {'keepdims': '(True)'}), '(array_shift[self.THETA_REORDER[1],], 1, keepdims=True)\n', (12626, 12681), True, 'import numpy as np\n'), ((14003, 14073), 'numpy.bitwise_xor.reduce', 'np.bitwise_xor.reduce', (['state[self.THETA_REORDER[0],]', '(1)'], {'keepdims': '(True)'}), '(state[self.THETA_REORDER[0],], 1, keepdims=True)\n', (14024, 14073), True, 'import numpy as np\n'), ((14077, 14153), 'numpy.bitwise_xor.reduce', 'np.bitwise_xor.reduce', (['array_shift[self.THETA_REORDER[1],]', '(1)'], {'keepdims': '(True)'}), '(array_shift[self.THETA_REORDER[1],], 1, keepdims=True)\n', (14098, 14153), True, 'import numpy as np\n'), ((16373, 16385), 'numpy.uint64', 'np.uint64', (['(3)'], {}), '(3)\n', (16382, 16385), True, 'import numpy as np\n'), ((43811, 43862), 'math.floor', 'floor', (['((message_size_bytes + 1) / self.KECCAK_BYTES)'], {}), '((message_size_bytes + 1) / self.KECCAK_BYTES)\n', (43816, 43862), False, 'from math import floor, ceil, log2\n'), ((43891, 43902), 'math.log2', 'log2', (['(q - 1)'], {}), '(q - 1)\n', (43895, 43902), False, 'from math import floor, ceil, log2\n'), ((7459, 7551), 'numpy.frombuffer', 'np.frombuffer', (['kra_msg'], {'dtype': 'np.uint64', 'count': '(25)', 'offset': '(msg_block * self.KECCAK_BYTES)'}), '(kra_msg, dtype=np.uint64, count=25, offset=msg_block * self.\n KECCAK_BYTES)\n', (7472, 7551), True, 'import numpy as np\n'), ((12824, 12855), 'numpy.uint64', 'np.uint64', (['(64 - self.RHO_SHIFTS)'], {}), '(64 - self.RHO_SHIFTS)\n', (12833, 12855), True, 'import numpy as np\n'), ((14296, 14327), 'numpy.uint64', 'np.uint64', (['(64 - self.RHO_SHIFTS)'], {}), '(64 - self.RHO_SHIFTS)\n', (14305, 14327), True, 'import numpy as np\n'), ((17107, 17119), 'numpy.uint64', 'np.uint64', (['(1)'], {}), '(1)\n', (17116, 17119), True, 'import numpy as np\n'), ((29484, 29580), 'numpy.frombuffer', 'np.frombuffer', (['padded_bytes'], {'dtype': 'np.uint64', 'count': '(25)', 'offset': '(msg_block * self.KECCAK_BYTES)'}), '(padded_bytes, dtype=np.uint64, count=25, offset=msg_block *\n self.KECCAK_BYTES)\n', (29497, 29580), True, 'import numpy as np\n'), ((16243, 16255), 'numpy.uint64', 'np.uint64', (['(7)'], {}), '(7)\n', (16252, 16255), True, 'import numpy as np\n'), ((16275, 16288), 'numpy.uint64', 'np.uint64', (['(57)'], {}), '(57)\n', (16284, 16288), True, 'import numpy as np\n'), ((16900, 16912), 'numpy.uint64', 'np.uint64', (['(7)'], {}), '(7)\n', (16909, 16912), True, 'import numpy as np\n'), ((16938, 16951), 'numpy.uint64', 'np.uint64', (['(57)'], {}), '(57)\n', (16947, 16951), True, 'import numpy as np\n'), ((17003, 17016), 'numpy.uint64', 'np.uint64', (['(18)'], {}), '(18)\n', (17012, 17016), True, 'import numpy as np\n'), ((17042, 17055), 'numpy.uint64', 'np.uint64', (['(46)'], {}), '(46)\n', (17051, 17055), True, 'import numpy as np\n'), ((6171, 6263), 'numpy.frombuffer', 'np.frombuffer', (['kra_msg'], {'dtype': 'np.uint64', 'count': '(25)', 'offset': '(msg_block * self.KECCAK_BYTES)'}), '(kra_msg, dtype=np.uint64, count=25, offset=msg_block * self.\n KECCAK_BYTES)\n', (6184, 6263), True, 'import numpy as np\n')]
|
import numpy as np
import gym
poleThetaSpace = np.linspace(-0.209, 0.209, 10)
poleThetaVelSpace = np.linspace(-4, 4, 10)
cartPosSpace = np.linspace(-2.4, 2.4, 10)
cartVelSpace = np.linspace(-4, 4, 10)
def get_state(observation):
cartX, cartXdot, cartTheta, cartThetaDot = observation
cartX = int(np.digitize(cartX, cartPosSpace))
cartXdot = int(np.digitize(cartXdot, cartVelSpace))
cartTheta = int(np.digitize(cartTheta, poleThetaSpace))
cartThetaDot = int(np.digitize(cartThetaDot, poleThetaVelSpace))
return (cartX, cartXdot, cartTheta, cartThetaDot)
def choose_action(q, obs, eps, n_actions=2):
state = get_state(obs)
if np.random.random() < eps:
action = np.random.choice([i for i in range(n_actions)])
else:
action_values = [q[(state, a)] for a in range(n_actions)]
action = np.argmax(action_values)
return action
if __name__ == '__main__':
env = gym.make('CartPole-v0')
alpha = 0.1
gamma = 0.9
epsilon = 1.0
states = []
for i in range(len(cartPosSpace)+1):
for j in range(len(cartVelSpace)+1):
for k in range(len(poleThetaSpace)+1):
for l in range(len(poleThetaVelSpace)+1):
states.append((i,j,k,l))
Q = {}
for s in states:
for a in range(2):
Q[(s, a)] = 0.0
n = 16
state_memory = np.zeros((n, 4))
action_memory = np.zeros(n)
reward_memory = np.zeros(n)
scores = []
n_episodes = 50000
for i in range(n_episodes):
done = False
score = 0
t = 0
T = np.inf
observation = env.reset()
action = choose_action(Q, observation, epsilon)
action_memory[t%n] = action
state_memory[t%n] = observation
while not done:
observation, reward, done, info = env.step(action)
score += reward
state_memory[(t+1)%n] = observation
reward_memory[(t+1)%n] = reward
if done:
T = t + 1
#print('episode ends at step', t)
action = choose_action(Q, observation, epsilon)
action_memory[(t+1)%n] = action
tau = t - n + 1
if tau >= 0:
G = [gamma**(j-tau-1)*reward_memory[j%n] \
for j in range(tau+1, min(tau+n, T)+1)]
G = np.sum(G)
if tau + n < T:
s = get_state(state_memory[(tau+n)%n])
a = int(action_memory[(tau+n)%n])
G += gamma**n * Q[(s,a)]
s = get_state(state_memory[tau%n])
a = action_memory[tau%n]
Q[(s,a)] += alpha*(G-Q[(s,a)])
#print('tau ', tau, '| Q %.2f' % \
# Q[(get_state(state_memory[tau%n]), action_memory[tau%n])])
t += 1
for tau in range(t-n+1, T):
G = [gamma**(j-tau-1)*reward_memory[j%n] \
for j in range(tau+1, min(tau+n, T)+1)]
G = np.sum(G)
if tau + n < T:
s = get_state(state_memory[(tau+n)%n])
a = int(action_memory[(tau+n)%n])
G += gamma**n * Q[(s,a)]
s = get_state(state_memory[tau%n])
a = action_memory[tau%n]
Q[(s,a)] += alpha*(G-Q[(s,a)])
#print('tau ', tau, '| Q %.2f' % \
# Q[(get_state(state_memory[tau%n]), action_memory[tau%n])])
scores.append(score)
avg_score = np.mean(scores[-1000:])
epsilon = epsilon -2 / n_episodes if epsilon > 0 else 0
if i % 1000 == 0:
print('episode ', i, 'avg_score %.1f' % avg_score,
'epsilon %.2f' % epsilon)
|
[
"numpy.mean",
"numpy.digitize",
"numpy.random.random",
"numpy.argmax",
"numpy.sum",
"numpy.linspace",
"numpy.zeros",
"gym.make"
] |
[((48, 78), 'numpy.linspace', 'np.linspace', (['(-0.209)', '(0.209)', '(10)'], {}), '(-0.209, 0.209, 10)\n', (59, 78), True, 'import numpy as np\n'), ((99, 121), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(10)'], {}), '(-4, 4, 10)\n', (110, 121), True, 'import numpy as np\n'), ((137, 163), 'numpy.linspace', 'np.linspace', (['(-2.4)', '(2.4)', '(10)'], {}), '(-2.4, 2.4, 10)\n', (148, 163), True, 'import numpy as np\n'), ((179, 201), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(10)'], {}), '(-4, 4, 10)\n', (190, 201), True, 'import numpy as np\n'), ((925, 948), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (933, 948), False, 'import gym\n'), ((1375, 1391), 'numpy.zeros', 'np.zeros', (['(n, 4)'], {}), '((n, 4))\n', (1383, 1391), True, 'import numpy as np\n'), ((1412, 1423), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1420, 1423), True, 'import numpy as np\n'), ((1444, 1455), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1452, 1455), True, 'import numpy as np\n'), ((306, 338), 'numpy.digitize', 'np.digitize', (['cartX', 'cartPosSpace'], {}), '(cartX, cartPosSpace)\n', (317, 338), True, 'import numpy as np\n'), ((359, 394), 'numpy.digitize', 'np.digitize', (['cartXdot', 'cartVelSpace'], {}), '(cartXdot, cartVelSpace)\n', (370, 394), True, 'import numpy as np\n'), ((416, 454), 'numpy.digitize', 'np.digitize', (['cartTheta', 'poleThetaSpace'], {}), '(cartTheta, poleThetaSpace)\n', (427, 454), True, 'import numpy as np\n'), ((479, 523), 'numpy.digitize', 'np.digitize', (['cartThetaDot', 'poleThetaVelSpace'], {}), '(cartThetaDot, poleThetaVelSpace)\n', (490, 523), True, 'import numpy as np\n'), ((660, 678), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (676, 678), True, 'import numpy as np\n'), ((844, 868), 'numpy.argmax', 'np.argmax', (['action_values'], {}), '(action_values)\n', (853, 868), True, 'import numpy as np\n'), ((3507, 3530), 'numpy.mean', 'np.mean', (['scores[-1000:]'], {}), '(scores[-1000:])\n', (3514, 3530), True, 'import numpy as np\n'), ((3024, 3033), 'numpy.sum', 'np.sum', (['G'], {}), '(G)\n', (3030, 3033), True, 'import numpy as np\n'), ((2370, 2379), 'numpy.sum', 'np.sum', (['G'], {}), '(G)\n', (2376, 2379), True, 'import numpy as np\n')]
|
from __future__ import division
import copy
from functools import reduce
import numpy
import six
from mpilot import params
from mpilot.commands import Command
from mpilot.libraries.eems.exceptions import (
MismatchedWeights,
MixedArrayLengths,
DuplicateRawValues,
)
from mpilot.libraries.eems.mixins import SameArrayShapeMixin
from mpilot.utils import insure_fuzzy
class Copy(Command):
"""Copies the data from another field"""
display_name = "Copy"
inputs = {"InFieldName": params.ResultParameter(params.DataParameter())}
output = params.DataParameter()
def execute(self, **kwargs):
return numpy.copy(kwargs["InFieldName"].result)
class AMinusB(SameArrayShapeMixin, Command):
"""Performs A - B"""
display_name = "A Minus B"
inputs = {
"A": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
"B": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
}
output = params.DataParameter()
def execute(self, **kwargs):
a = kwargs["A"].result
b = kwargs["B"].result
self.validate_array_shapes([a, b], lineno=self.lineno)
return a - b
class Sum(SameArrayShapeMixin, Command):
"""Sums input variables"""
display_name = "Sum"
inputs = {
"InFieldNames": params.ListParameter(
params.ResultParameter(params.DataParameter(), is_fuzzy=False)
)
}
output = params.DataParameter()
def execute(self, **kwargs):
arrays = [c.result for c in kwargs["InFieldNames"]]
self.validate_array_shapes(arrays, lineno=self.lineno)
result = arrays[0].copy()
for arr in arrays[1:]:
result += arr
return result
class WeightedSum(SameArrayShapeMixin, Command):
"""Takes the weighted sum of input variables"""
display_name = "Weighted Sum"
inputs = {
"InFieldNames": params.ListParameter(
params.ResultParameter(params.DataParameter(), is_fuzzy=False)
),
"Weights": params.ListParameter(params.NumberParameter()),
}
output = params.DataParameter()
def execute(self, **kwargs):
weights = kwargs["Weights"]
arrays = [c.result for c in kwargs["InFieldNames"]]
if len(weights) != len(arrays):
raise MismatchedWeights(len(weights), len(arrays))
self.validate_array_shapes(arrays, lineno=self.lineno)
result = arrays[0] * weights[0]
for weight, arr in zip(weights[1:], arrays[1:]):
result += arr * weight
return result
class Multiply(SameArrayShapeMixin, Command):
"""Multiplies input variables"""
display_name = "Multiply"
inputs = {
"InFieldNames": params.ListParameter(
params.ResultParameter(params.DataParameter(), is_fuzzy=False)
)
}
output = params.DataParameter()
def execute(self, **kwargs):
arrays = [c.result for c in kwargs["InFieldNames"]]
self.validate_array_shapes(arrays, lineno=self.lineno)
result = numpy.copy(arrays[0])
for arr in arrays[1:]:
result *= arr
return result
class ADividedByB(SameArrayShapeMixin, Command):
"""Performs A / B"""
display_name = "A Divided By B"
inputs = {
"A": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
"B": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
}
output = params.DataParameter()
def execute(self, **kwargs):
a = kwargs["A"].result
b = kwargs["B"].result
self.validate_array_shapes([a, b], lineno=self.lineno)
return a / b
class Minimum(SameArrayShapeMixin, Command):
"""Takes the minimum input variables"""
display_name = "Minimum"
inputs = {
"InFieldNames": params.ListParameter(
params.ResultParameter(params.DataParameter(), is_fuzzy=False)
)
}
output = params.DataParameter()
def execute(self, **kwargs):
arrays = [c.result for c in kwargs["InFieldNames"]]
self.validate_array_shapes(arrays, lineno=self.lineno)
return reduce(lambda x, y: numpy.ma.minimum(x, y), arrays)
class Maximum(SameArrayShapeMixin, Command):
"""Takes the maximum input variables"""
display_name = "Maximum"
inputs = {
"InFieldNames": params.ListParameter(
params.ResultParameter(params.DataParameter(), is_fuzzy=False)
)
}
output = params.DataParameter()
def execute(self, **kwargs):
arrays = [c.result for c in kwargs["InFieldNames"]]
self.validate_array_shapes(arrays, lineno=self.lineno)
return reduce(lambda x, y: numpy.ma.maximum(x, y), arrays)
class Mean(SameArrayShapeMixin, Command):
"""Mean of input variables"""
display_name = "Mean"
inputs = {
"InFieldNames": params.ListParameter(
params.ResultParameter(params.DataParameter(), is_fuzzy=False)
)
}
output = params.DataParameter()
def execute(self, **kwargs):
arrays = [c.result for c in kwargs["InFieldNames"]]
self.validate_array_shapes(arrays, lineno=self.lineno)
return sum(arrays) / len(arrays)
class WeightedMean(SameArrayShapeMixin, Command):
"""Takes the weighted mean of input variables"""
display_name = "Weighted Mean"
inputs = {
"InFieldNames": params.ListParameter(
params.ResultParameter(params.DataParameter(), is_fuzzy=False)
),
"Weights": params.ListParameter(params.NumberParameter()),
}
output = params.DataParameter()
def execute(self, **kwargs):
weights = kwargs["Weights"]
arrays = [c.result for c in kwargs["InFieldNames"]]
if len(weights) != len(arrays):
raise MismatchedWeights(len(weights), len(arrays))
self.validate_array_shapes(arrays, lineno=self.lineno)
result = arrays[0] * weights[0]
for weight, arr in zip(weights[1:], arrays[1:]):
result += arr * weight
return result / sum(weights)
class Normalize(Command):
"""Normalizes the data from another field to range (default 0:1)"""
display_name = "Normalize"
inputs = {
"InFieldName": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
"StartVal": params.NumberParameter(required=False),
"EndVal": params.NumberParameter(required=False),
}
output = params.DataParameter()
def execute(self, **kwargs):
arr = kwargs["InFieldName"].result
start = kwargs.get("StartVal", 0)
end = kwargs.get("EndVal", 1)
arr_min = arr.min()
arr_max = arr.max()
return (arr - arr_min) * (start - end) / (arr_min - arr_max) + start
class NormalizeZScore(Command):
"""Converts input values into normalized values using linear interpolation based on Z Score"""
display_name = "Normalize by Z Score"
inputs = {
"InFieldName": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
"TrueThresholdZScore": params.NumberParameter(required=False),
"FalseThresholdZScore": params.NumberParameter(required=False),
"StartVal": params.NumberParameter(required=False),
"EndVal": params.NumberParameter(required=False),
}
output = params.DataParameter()
def execute(self, **kwargs):
arr = kwargs["InFieldName"].result
true_threshold = float(kwargs.get("TrueThresholdZScore", 0))
false_threshold = float(kwargs.get("FalseThresholdZScore", 1))
start = kwargs.get("StartVal", 0)
end = kwargs.get("EndVal", 1)
raw_mean = numpy.ma.mean(arr)
raw_std = numpy.ma.std(arr)
x1 = raw_mean + raw_std * true_threshold
x2 = raw_mean + raw_std * false_threshold
y1 = end
y2 = start
result = arr.copy()
result -= x1
result *= y2 - y1
result /= x2 - x1
result += y1
# despite the name, `insure_fuzzy` works to constrain values to any range
return insure_fuzzy(result, start, end)
class NormalizeCat(Command):
"""Converts integer input values into narmalized values based on user specification"""
display_name = "Normalize by Category"
inputs = {
"InFieldName": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
"RawValues": params.ListParameter(params.NumberParameter()),
"NormalValues": params.ListParameter(params.NumberParameter()),
"DefaultNormalValue": params.NumberParameter(),
}
output = params.DataParameter()
def execute(self, **kwargs):
arr = kwargs["InFieldName"].result
raw_values = kwargs["RawValues"]
normal_values = kwargs["NormalValues"]
default_normal_value = kwargs["DefaultNormalValue"]
if len(raw_values) != len(normal_values):
raise MixedArrayLengths(
len(raw_values), len(normal_values), lineno=self.lineno
)
if len(raw_values) != len(set(raw_values)):
raise DuplicateRawValues(lineno=self.argument_lines.get("RawValues"))
result = numpy.ma.array(
numpy.full(arr.shape, default_normal_value, dtype=float)
)
for raw, normal in zip(raw_values, normal_values):
result[arr.data == raw] = normal
return result
class NormalizeCurve(Command):
"""Converts input values into normalized values based on user-defined curve"""
display_name = "Normalize Curve"
inputs = {
"InFieldName": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
"RawValues": params.ListParameter(params.NumberParameter()),
"NormalValues": params.ListParameter(params.NumberParameter()),
}
output = params.DataParameter()
def execute(self, **kwargs):
arr = kwargs["InFieldName"].result
raw_values = kwargs["RawValues"]
normal_values = kwargs["NormalValues"]
if len(raw_values) != len(normal_values):
raise MixedArrayLengths(
len(raw_values), len(normal_values), lineno=self.lineno
)
if len(raw_values) != len(set(raw_values)):
raise DuplicateRawValues(lineno=self.argument_lines.get("RawValues"))
result = numpy.ma.empty(arr.shape, dtype=float)
value_pairs = sorted(zip(raw_values, normal_values))
# For raw values less than the lowest raw value, set them to the corresponding normal value
result[arr <= value_pairs[0][0]] = value_pairs[0][1]
# Assign normal values for each of the line segments that approximate the curve
for i, (raw, normal) in list(enumerate(value_pairs))[1:]:
prev_raw = value_pairs[i - 1][0]
prev_normal = value_pairs[i - 1][1]
m = (normal - prev_normal) / (raw - prev_raw)
b = prev_normal - m * prev_raw
where_idx = numpy.where(
numpy.logical_and(arr.data > prev_raw, arr.data <= raw)
)
result[where_idx] = arr.data[where_idx]
result[where_idx] *= m
result[where_idx] += b
# For raw values greater than the highest raw value, set them to the corresponding normal value
result[arr > value_pairs[-1][0]] = value_pairs[-1][1]
result.mask = arr.mask.copy()
return result
class NormalizeMeanToMid(NormalizeCurve):
"""Uses "NormalizeCurve" to create a non-linear transformation that is a good match for the input data"""
display_name = "Mean to Mid"
inputs = {
"InFieldName": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
"IgnoreZeros": params.BooleanParameter(),
"NormalValues": params.ListParameter(params.NumberParameter()),
}
output = params.DataParameter()
def execute(self, **kwargs):
arr = kwargs["InFieldName"].result
ignore_zeros = kwargs["IgnoreZeros"]
low_value = arr.min()
high_value = arr.max()
if ignore_zeros:
arr = arr[arr != 0]
mean_value = arr.mean()
below_mean = arr[arr <= mean_value]
above_mean = arr[arr > mean_value]
high_mean = above_mean.compressed().mean()
low_mean = below_mean.compressed().mean()
raw_values = [low_value, low_mean, mean_value, high_mean, high_value]
normal_values = kwargs["NormalValues"][:]
if raw_values[-1] == raw_values[-2]:
del raw_values[-2]
del normal_values[-2]
if raw_values[0] == raw_values[1]:
del raw_values[1]
del normal_values[1]
kwargs = copy.copy(kwargs)
kwargs["RawValues"] = raw_values
kwargs["NormalValues"] = normal_values
return super(NormalizeMeanToMid, self).execute(**kwargs)
class NormalizeCurveZScore(Command):
"""Converts input values into narmalized values based on user-defined curve"""
display_name = "Normalize Curve by Z Score"
inputs = {
"InFieldName": params.ResultParameter(params.DataParameter(), is_fuzzy=False),
"ZScoreValues": params.ListParameter(params.NumberParameter()),
"NormalValues": params.ListParameter(params.NumberParameter()),
}
output = params.DataParameter()
def execute(self, **kwargs):
arr = kwargs["InFieldName"].result
z_score_values = kwargs["ZScoreValues"]
normal_values = kwargs["NormalValues"]
if len(z_score_values) != len(normal_values):
raise MixedArrayLengths(
len(z_score_values), len(normal_values), lineno=self.lineno
)
raw_mean = numpy.ma.mean(arr)
raw_std = numpy.ma.std(arr)
raw_values = [raw_mean + value * raw_std for value in z_score_values]
result = numpy.ma.empty(arr.shape, dtype=float)
value_pairs = sorted(zip(raw_values, normal_values))
# For raw values less than the lowest raw value, set them to the corresponding normal value
result[arr <= value_pairs[0][0]] = value_pairs[0][1]
# Assign normal values for each of the line segments that approximate the curve
for i, (raw, normal) in list(enumerate(value_pairs))[1:]:
prev_raw = value_pairs[i - 1][0]
prev_normal = value_pairs[i - 1][1]
m = (normal - prev_normal) / (raw - prev_raw)
b = prev_normal - m * prev_raw
where_idx = numpy.where(
numpy.logical_and(arr.data > prev_raw, arr.data <= raw)
)
result[where_idx] = arr.data[where_idx]
result[where_idx] *= m
result[where_idx] += b
# For raw values greater than the highest raw value, set them to the corresponding normal value
result[arr > value_pairs[-1][0]] = value_pairs[-1][1]
result.mask = arr.mask.copy()
return result
class PrintVars(Command):
"""Prints each variable in a list of variable names."""
display_name = "Print variable(s) to screen or file"
inputs = {
"InFieldNames": params.ListParameter(params.ResultParameter()),
"OutFileName": params.PathParameter(must_exist=False, required=False),
}
output = params.BooleanParameter()
def execute(self, **kwargs):
commands = kwargs["InFieldNames"]
out_path = kwargs.get("OutFileName")
if out_path:
with open(out_path, "w") as f_out:
f_out.write(
"\n".join(
"{}: {}".format(c.result_name, c.result) for c in commands
)
)
else:
for command in kwargs["InFieldNames"]:
print("{}: {}".format(command.result_name, command.result))
return True
|
[
"numpy.copy",
"mpilot.params.ResultParameter",
"numpy.ma.std",
"numpy.ma.mean",
"mpilot.utils.insure_fuzzy",
"numpy.full",
"mpilot.params.PathParameter",
"mpilot.params.NumberParameter",
"numpy.ma.minimum",
"numpy.ma.maximum",
"numpy.logical_and",
"mpilot.params.DataParameter",
"numpy.ma.empty",
"copy.copy",
"mpilot.params.BooleanParameter"
] |
[((565, 587), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (585, 587), False, 'from mpilot import params\n'), ((970, 992), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (990, 992), False, 'from mpilot import params\n'), ((1439, 1461), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (1459, 1461), False, 'from mpilot import params\n'), ((2106, 2128), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (2126, 2128), False, 'from mpilot import params\n'), ((2864, 2886), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (2884, 2886), False, 'from mpilot import params\n'), ((3465, 3487), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (3485, 3487), False, 'from mpilot import params\n'), ((3955, 3977), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (3975, 3977), False, 'from mpilot import params\n'), ((4489, 4511), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (4509, 4511), False, 'from mpilot import params\n'), ((5007, 5029), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (5027, 5029), False, 'from mpilot import params\n'), ((5603, 5625), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (5623, 5625), False, 'from mpilot import params\n'), ((6466, 6488), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (6486, 6488), False, 'from mpilot import params\n'), ((7339, 7361), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (7359, 7361), False, 'from mpilot import params\n'), ((8608, 8630), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (8628, 8630), False, 'from mpilot import params\n'), ((9822, 9844), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (9842, 9844), False, 'from mpilot import params\n'), ((11855, 11877), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (11875, 11877), False, 'from mpilot import params\n'), ((13314, 13336), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (13334, 13336), False, 'from mpilot import params\n'), ((15280, 15305), 'mpilot.params.BooleanParameter', 'params.BooleanParameter', ([], {}), '()\n', (15303, 15305), False, 'from mpilot import params\n'), ((637, 677), 'numpy.copy', 'numpy.copy', (["kwargs['InFieldName'].result"], {}), "(kwargs['InFieldName'].result)\n", (647, 677), False, 'import numpy\n'), ((3062, 3083), 'numpy.copy', 'numpy.copy', (['arrays[0]'], {}), '(arrays[0])\n', (3072, 3083), False, 'import numpy\n'), ((6349, 6387), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {'required': '(False)'}), '(required=False)\n', (6371, 6387), False, 'from mpilot import params\n'), ((6407, 6445), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {'required': '(False)'}), '(required=False)\n', (6429, 6445), False, 'from mpilot import params\n'), ((7090, 7128), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {'required': '(False)'}), '(required=False)\n', (7112, 7128), False, 'from mpilot import params\n'), ((7162, 7200), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {'required': '(False)'}), '(required=False)\n', (7184, 7200), False, 'from mpilot import params\n'), ((7222, 7260), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {'required': '(False)'}), '(required=False)\n', (7244, 7260), False, 'from mpilot import params\n'), ((7280, 7318), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {'required': '(False)'}), '(required=False)\n', (7302, 7318), False, 'from mpilot import params\n'), ((7679, 7697), 'numpy.ma.mean', 'numpy.ma.mean', (['arr'], {}), '(arr)\n', (7692, 7697), False, 'import numpy\n'), ((7716, 7733), 'numpy.ma.std', 'numpy.ma.std', (['arr'], {}), '(arr)\n', (7728, 7733), False, 'import numpy\n'), ((8091, 8123), 'mpilot.utils.insure_fuzzy', 'insure_fuzzy', (['result', 'start', 'end'], {}), '(result, start, end)\n', (8103, 8123), False, 'from mpilot.utils import insure_fuzzy\n'), ((8563, 8587), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (8585, 8587), False, 'from mpilot import params\n'), ((10337, 10375), 'numpy.ma.empty', 'numpy.ma.empty', (['arr.shape'], {'dtype': 'float'}), '(arr.shape, dtype=float)\n', (10351, 10375), False, 'import numpy\n'), ((11737, 11762), 'mpilot.params.BooleanParameter', 'params.BooleanParameter', ([], {}), '()\n', (11760, 11762), False, 'from mpilot import params\n'), ((12706, 12723), 'copy.copy', 'copy.copy', (['kwargs'], {}), '(kwargs)\n', (12715, 12723), False, 'import copy\n'), ((13711, 13729), 'numpy.ma.mean', 'numpy.ma.mean', (['arr'], {}), '(arr)\n', (13724, 13729), False, 'import numpy\n'), ((13748, 13765), 'numpy.ma.std', 'numpy.ma.std', (['arr'], {}), '(arr)\n', (13760, 13765), False, 'import numpy\n'), ((13863, 13901), 'numpy.ma.empty', 'numpy.ma.empty', (['arr.shape'], {'dtype': 'float'}), '(arr.shape, dtype=float)\n', (13877, 13901), False, 'import numpy\n'), ((15205, 15259), 'mpilot.params.PathParameter', 'params.PathParameter', ([], {'must_exist': '(False)', 'required': '(False)'}), '(must_exist=False, required=False)\n', (15225, 15259), False, 'from mpilot import params\n'), ((527, 549), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (547, 549), False, 'from mpilot import params\n'), ((833, 855), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (853, 855), False, 'from mpilot import params\n'), ((910, 932), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (930, 932), False, 'from mpilot import params\n'), ((2060, 2084), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (2082, 2084), False, 'from mpilot import params\n'), ((3328, 3350), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (3348, 3350), False, 'from mpilot import params\n'), ((3405, 3427), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (3425, 3427), False, 'from mpilot import params\n'), ((5557, 5581), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (5579, 5581), False, 'from mpilot import params\n'), ((6288, 6310), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (6308, 6310), False, 'from mpilot import params\n'), ((7018, 7040), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (7038, 7040), False, 'from mpilot import params\n'), ((8351, 8373), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (8371, 8373), False, 'from mpilot import params\n'), ((8434, 8458), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (8456, 8458), False, 'from mpilot import params\n'), ((8506, 8530), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (8528, 8530), False, 'from mpilot import params\n'), ((9211, 9267), 'numpy.full', 'numpy.full', (['arr.shape', 'default_normal_value'], {'dtype': 'float'}), '(arr.shape, default_normal_value, dtype=float)\n', (9221, 9267), False, 'import numpy\n'), ((9621, 9643), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (9641, 9643), False, 'from mpilot import params\n'), ((9704, 9728), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (9726, 9728), False, 'from mpilot import params\n'), ((9776, 9800), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (9798, 9800), False, 'from mpilot import params\n'), ((11673, 11695), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (11693, 11695), False, 'from mpilot import params\n'), ((11809, 11833), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (11831, 11833), False, 'from mpilot import params\n'), ((13110, 13132), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (13130, 13132), False, 'from mpilot import params\n'), ((13196, 13220), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (13218, 13220), False, 'from mpilot import params\n'), ((13268, 13292), 'mpilot.params.NumberParameter', 'params.NumberParameter', ([], {}), '()\n', (13290, 13292), False, 'from mpilot import params\n'), ((15155, 15179), 'mpilot.params.ResultParameter', 'params.ResultParameter', ([], {}), '()\n', (15177, 15179), False, 'from mpilot import params\n'), ((1370, 1392), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (1390, 1392), False, 'from mpilot import params\n'), ((1969, 1991), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (1989, 1991), False, 'from mpilot import params\n'), ((2795, 2817), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (2815, 2817), False, 'from mpilot import params\n'), ((3886, 3908), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (3906, 3908), False, 'from mpilot import params\n'), ((4171, 4193), 'numpy.ma.minimum', 'numpy.ma.minimum', (['x', 'y'], {}), '(x, y)\n', (4187, 4193), False, 'import numpy\n'), ((4420, 4442), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (4440, 4442), False, 'from mpilot import params\n'), ((4705, 4727), 'numpy.ma.maximum', 'numpy.ma.maximum', (['x', 'y'], {}), '(x, y)\n', (4721, 4727), False, 'import numpy\n'), ((4938, 4960), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (4958, 4960), False, 'from mpilot import params\n'), ((5466, 5488), 'mpilot.params.DataParameter', 'params.DataParameter', ([], {}), '()\n', (5486, 5488), False, 'from mpilot import params\n'), ((11003, 11058), 'numpy.logical_and', 'numpy.logical_and', (['(arr.data > prev_raw)', '(arr.data <= raw)'], {}), '(arr.data > prev_raw, arr.data <= raw)\n', (11020, 11058), False, 'import numpy\n'), ((14529, 14584), 'numpy.logical_and', 'numpy.logical_and', (['(arr.data > prev_raw)', '(arr.data <= raw)'], {}), '(arr.data > prev_raw, arr.data <= raw)\n', (14546, 14584), False, 'import numpy\n')]
|
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
from magenta.models.nsynth.wavenet import fastgen
import sys
# Change path back to /src to load other modules
sys.path.insert(0, '/home/ubuntu/DeepBass/src')
from ingestion.IO_utils import Load, Save
from preprocess.SilenceRemoval import SR
import streamlit as st
import time
import math
###############################################################################
def LinearFade(length):
fadein = np.linspace(0, 1, length).reshape(1, -1, 1)
return fadein
###############################################################################
def HannFade(length):
fadein = (0.5 * (1.0 - np.cos(math.pi * np.arange(length) /
float(length)))).reshape(1, -1, 1)
return fadein
###############################################################################
def fade(encoding, fade_type, mode='in'):
length = encoding.shape[1]
method = globals().copy().get(fade_type)
if not method:
raise NotImplementedError("Fade %s not implemented" % fade_type)
fadein = method(length)
if mode == 'in':
return fadein * encoding
else:
return (1.0 - fadein) * encoding
###############################################################################
def crossfade(encoding1, encoding2, fade_type):
return fade(encoding1, fade_type, 'out') + fade(encoding2, fade_type, 'in')
###############################################################################
"""Demo of cross fading in the NSynth embedding space
User Parameters:
tlen (float): Amount of time for reconstruction
silence_len1 (float) : Skip this many seconds of the ending that is silent
silence_len2 (float) : Skip this many seconds of the beginning that is silent
AUDIO_DIR (str) : Directory of the audio files
output_dir (str) : Directory to save the reconstruction
model_dir (str) : Directory of the pretrained model (tf checkpoint)
Returns:
Streamlit notebook
Crossfaded audio in the form of a wav file
Notes:
sr must be 16 kHz per the model architecture
"""
# Directory where mp3 are stored.
AUDIO_DIR = '/home/ubuntu/test'
filenames = [f for f in listdir(AUDIO_DIR) if isfile(join(AUDIO_DIR, f))]
sr = 16000
# magenta also uses librosa for loading
FirstSong_fname = filenames[1]
SecondSong_fname = filenames[0]
FirstSong, _ = Load(AUDIO_DIR, FirstSong_fname , sr=sr)
SecondSong, _ = Load(AUDIO_DIR, SecondSong_fname, sr=sr)
# Remove any silence at the end of the first song
# and the beginning of the second song
t_snip = 30 # interrogation length in seconds
end_index = SR(FirstSong, 'end', t_snip=t_snip)
end_index = int(t_snip*sr - end_index) # change index reference frame
start_index = SR(SecondSong, 'begin', t_snip=t_snip)
FirstSong = FirstSong[:-end_index]
SecondSong = SecondSong[start_index:]
# Trim to t_len seconds
t_len = 5
sample_length = t_len*sr
FirstSong_end = FirstSong[-sample_length:]
SecondSong_begin = SecondSong[0:sample_length]
# Plot PCM of both snippets
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
axs[0].plot(FirstSong_end)
axs[0].set_title('First Song')
axs[1].plot(SecondSong_begin)
axs[1].set_title('Second Song')
st.pyplot()
# Save original snippets
output_dir = '/home/ubuntu/DeepBass/src/notebooks/'
output_name1 = 'originalend_' + FirstSong_fname + '.wav'
Save(output_dir, output_name1, FirstSong_end, sr)
output_name2 = 'originalbegin_' + SecondSong_fname + '.wav'
Save(output_dir, output_name2, SecondSong_begin, sr)
model_dir = '/home/ubuntu/DeepBass/src/notebooks/wavenet-ckpt/model.ckpt-200000'
# Create encodings
start = time.time()
enc1 = fastgen.encode(FirstSong_end, model_dir, sample_length)
enc2 = fastgen.encode(SecondSong_begin, model_dir, sample_length)
end = time.time()
st.write('Encoding took ' + str((end-start)) + ' seconds')
# Create cross fading in the latent space
fade_type = 'LinearFade'
xfade_encoding = crossfade(enc1, enc2, fade_type)
fig, axs = plt.subplots(3, 1, figsize=(10, 7))
axs[0].plot(enc1[0])
axs[0].set_title('Encoding 1')
axs[1].plot(enc2[0])
axs[1].set_title('Encoding 2')
axs[2].plot(xfade_encoding[0])
axs[2].set_title('Crossfade')
st.pyplot()
start = time.time()
@st.cache
def synth():
fastgen.synthesize(xfade_encoding, checkpoint_path = model_dir,
save_paths=['enc_' + fade_type + '_' + FirstSong_fname + \
SecondSong_fname],
samples_per_save=sample_length)
return None
synth()
end = time.time()
st.write('Decoding took ' + str((end-start)) + ' seconds')
xfade_audio, _ = Load(output_dir, 'enc_' + fade_type + '_' + FirstSong_fname + \
SecondSong_fname, sr=sr)
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(xfade_audio)
ax.set_title('Crossfaded audio')
st.pyplot()
|
[
"magenta.models.nsynth.wavenet.fastgen.encode",
"sys.path.insert",
"streamlit.pyplot",
"os.listdir",
"os.path.join",
"preprocess.SilenceRemoval.SR",
"numpy.linspace",
"ingestion.IO_utils.Load",
"magenta.models.nsynth.wavenet.fastgen.synthesize",
"ingestion.IO_utils.Save",
"time.time",
"matplotlib.pyplot.subplots",
"numpy.arange"
] |
[((217, 264), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/ubuntu/DeepBass/src"""'], {}), "(0, '/home/ubuntu/DeepBass/src')\n", (232, 264), False, 'import sys\n'), ((2443, 2482), 'ingestion.IO_utils.Load', 'Load', (['AUDIO_DIR', 'FirstSong_fname'], {'sr': 'sr'}), '(AUDIO_DIR, FirstSong_fname, sr=sr)\n', (2447, 2482), False, 'from ingestion.IO_utils import Load, Save\n'), ((2500, 2540), 'ingestion.IO_utils.Load', 'Load', (['AUDIO_DIR', 'SecondSong_fname'], {'sr': 'sr'}), '(AUDIO_DIR, SecondSong_fname, sr=sr)\n', (2504, 2540), False, 'from ingestion.IO_utils import Load, Save\n'), ((2689, 2724), 'preprocess.SilenceRemoval.SR', 'SR', (['FirstSong', '"""end"""'], {'t_snip': 't_snip'}), "(FirstSong, 'end', t_snip=t_snip)\n", (2691, 2724), False, 'from preprocess.SilenceRemoval import SR\n'), ((2809, 2847), 'preprocess.SilenceRemoval.SR', 'SR', (['SecondSong', '"""begin"""'], {'t_snip': 't_snip'}), "(SecondSong, 'begin', t_snip=t_snip)\n", (2811, 2847), False, 'from preprocess.SilenceRemoval import SR\n'), ((3111, 3146), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(10, 5)'}), '(2, 1, figsize=(10, 5))\n', (3123, 3146), True, 'import matplotlib.pyplot as plt\n'), ((3267, 3278), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (3276, 3278), True, 'import streamlit as st\n'), ((3414, 3463), 'ingestion.IO_utils.Save', 'Save', (['output_dir', 'output_name1', 'FirstSong_end', 'sr'], {}), '(output_dir, output_name1, FirstSong_end, sr)\n', (3418, 3463), False, 'from ingestion.IO_utils import Load, Save\n'), ((3524, 3576), 'ingestion.IO_utils.Save', 'Save', (['output_dir', 'output_name2', 'SecondSong_begin', 'sr'], {}), '(output_dir, output_name2, SecondSong_begin, sr)\n', (3528, 3576), False, 'from ingestion.IO_utils import Load, Save\n'), ((3687, 3698), 'time.time', 'time.time', ([], {}), '()\n', (3696, 3698), False, 'import time\n'), ((3706, 3761), 'magenta.models.nsynth.wavenet.fastgen.encode', 'fastgen.encode', (['FirstSong_end', 'model_dir', 'sample_length'], {}), '(FirstSong_end, model_dir, sample_length)\n', (3720, 3761), False, 'from magenta.models.nsynth.wavenet import fastgen\n'), ((3769, 3827), 'magenta.models.nsynth.wavenet.fastgen.encode', 'fastgen.encode', (['SecondSong_begin', 'model_dir', 'sample_length'], {}), '(SecondSong_begin, model_dir, sample_length)\n', (3783, 3827), False, 'from magenta.models.nsynth.wavenet import fastgen\n'), ((3834, 3845), 'time.time', 'time.time', ([], {}), '()\n', (3843, 3845), False, 'import time\n'), ((4035, 4070), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(10, 7)'}), '(3, 1, figsize=(10, 7))\n', (4047, 4070), True, 'import matplotlib.pyplot as plt\n'), ((4236, 4247), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (4245, 4247), True, 'import streamlit as st\n'), ((4257, 4268), 'time.time', 'time.time', ([], {}), '()\n', (4266, 4268), False, 'import time\n'), ((4583, 4594), 'time.time', 'time.time', ([], {}), '()\n', (4592, 4594), False, 'import time\n'), ((4672, 4762), 'ingestion.IO_utils.Load', 'Load', (['output_dir', "('enc_' + fade_type + '_' + FirstSong_fname + SecondSong_fname)"], {'sr': 'sr'}), "(output_dir, 'enc_' + fade_type + '_' + FirstSong_fname +\n SecondSong_fname, sr=sr)\n", (4676, 4762), False, 'from ingestion.IO_utils import Load, Save\n'), ((4793, 4822), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (4805, 4822), True, 'import matplotlib.pyplot as plt\n'), ((4877, 4888), 'streamlit.pyplot', 'st.pyplot', ([], {}), '()\n', (4886, 4888), True, 'import streamlit as st\n'), ((4296, 4474), 'magenta.models.nsynth.wavenet.fastgen.synthesize', 'fastgen.synthesize', (['xfade_encoding'], {'checkpoint_path': 'model_dir', 'save_paths': "['enc_' + fade_type + '_' + FirstSong_fname + SecondSong_fname]", 'samples_per_save': 'sample_length'}), "(xfade_encoding, checkpoint_path=model_dir, save_paths=[\n 'enc_' + fade_type + '_' + FirstSong_fname + SecondSong_fname],\n samples_per_save=sample_length)\n", (4314, 4474), False, 'from magenta.models.nsynth.wavenet import fastgen\n'), ((2263, 2281), 'os.listdir', 'listdir', (['AUDIO_DIR'], {}), '(AUDIO_DIR)\n', (2270, 2281), False, 'from os import listdir\n'), ((514, 539), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'length'], {}), '(0, 1, length)\n', (525, 539), True, 'import numpy as np\n'), ((2292, 2310), 'os.path.join', 'join', (['AUDIO_DIR', 'f'], {}), '(AUDIO_DIR, f)\n', (2296, 2310), False, 'from os.path import isfile, join\n'), ((728, 745), 'numpy.arange', 'np.arange', (['length'], {}), '(length)\n', (737, 745), True, 'import numpy as np\n')]
|
"""
Adapted from https://github.com/hovinh/DeCNN
"""
import numpy as np
from keras import backend as K
class Backpropagation():
def __init__(self, model, layer_name, input_data, layer_idx=None, masking=None):
"""
@params:
- model: a Keras Model.
- layer_name: name of layer to be backpropagated, can be determined by
model.layers[layer_idx].name.
- input_data: a input data to be inspected, must be in proper format
to be able to be fed into model.
- layer_idx: equivalent to layer_name.
- masking: determine which units in the chosen layer to be backpropagated,
a numpy array with the same shape with chosen layer.
"""
self.model = model
self.layer_name = layer_name
self.layer = model.get_layer(layer_name)
self.input_data = input_data
if layer_idx is None:
for i, layer in enumerate(self.model.layers):
if layer.name == self.layer_name:
self.layer_idx = i
break
if masking is None:
shape = [1] + list(self.layer.output_shape[1:])
masking = np.ones(shape, 'float32')
self.masking = masking
def compute(self):
"""
@returns:
- output_data: obtained heatmap.
- func: a reuseable function to compute backpropagation in the same setting.
"""
loss = K.mean(self.layer.output * self.masking)
gradients = K.gradients(loss, self.model.input)[0]
func = K.function([self.model.input], [gradients])
output_data = func([self.input_data])[0]
output_data = self.filter_gradient(output_data)
return output_data, func
def filter_gradient(self, x):
"""
The gradients to be visualize has non-negative value.
"""
x_abs = np.abs(x)
x_max = np.amax(x_abs, axis=-1)
return x_max
class SmoothGrad(Backpropagation):
def __init__(self, model, layer_name, input_data, layer_idx=None, masking=None):
"""
For parameters, please refer to Backpropagation()
"""
super(SmoothGrad, self).__init__(model, layer_name, input_data, layer_idx, masking)
def compute(self, n_samples=50, batch_size=4):
"""
@params:
- n_samples: number of random sampled to be injected noise and taken average.
- batch_size: must be <= n_samples. If n_samples is too big, there may be there
are not enough memories to compute, hence we have to proceed them iteratively
batch-by-batch.
@returns:
- smooth_gradients: obtained heatmap.
"""
_, func = super().compute()
shape = [n_samples] + list(self.model.input.shape[1:])
new_gradients = np.zeros(shape)
for start_idx in range(0, n_samples, batch_size):
if n_samples >= start_idx + batch_size:
end_idx = start_idx + batch_size
else:
end_idx = n_samples
shape = [end_idx - start_idx] + list(self.model.input.shape[1:])
random_noise = np.random.random(shape)
# random_noise = np.random.random(self.input_data.shape)
new_images = random_noise + self.input_data
gradients = func([new_images])[0]
new_gradients[start_idx:end_idx, ...] = gradients
smooth_gradients = np.expand_dims(np.mean(new_gradients, axis=0), axis=0)
smooth_gradients = self.filter_gradient(smooth_gradients)
return smooth_gradients
class GuidedBackprop(Backpropagation):
def __init__(self, model, layer_name, input_data, layer_idx=None, masking=None):
"""
For parameters, please refer to Backpropagation()
"""
super(GuidedBackprop, self).__init__(model, layer_name, input_data, layer_idx, masking)
def compute(self):
"""
@returns:
- gradients_input: obtained heatmap.
"""
forward_values = [self.input_data] + self.feed_forward()
forward_values_dict = {self.model.layers[i].name: forward_values[i] for i in range(self.layer_idx + 1)}
gradients = self.masking
for layer_idx in range(self.layer_idx - 1, -1, -1):
layer_cur = self.model.layers[layer_idx + 1].output
layer_prev = self.model.layers[layer_idx].output
layer_prev_name = self.model.layers[layer_idx].name
gradients_cur = gradients
gate_b = (gradients_cur > 0.) * gradients_cur
gradients = self.guided_backprop_adjacent(layer_cur,
layer_prev,
forward_values_dict[layer_prev_name],
gate_b)
if gradients.min() != gradients.max():
gradients = self.normalize_gradient(gradients)
gradients_input = gradients
gradients_input = self.filter_gradient(gradients_input)
return gradients_input
def guided_backprop_adjacent(self, layer_cur, layer_prev, values_prev, gate_b):
loss = K.mean(layer_cur * gate_b)
gradients = K.gradients(loss, layer_prev)[0]
gate_f = K.cast(values_prev > 0., 'float32')
guided_gradients = gradients * gate_f
func = K.function([self.model.input], [guided_gradients])
output_data = func([self.input_data])[0]
return output_data
def feed_forward(self):
forward_layers = [layer.output for layer in self.model.layers[1:self.layer_idx + 1]]
func = K.function([self.model.input], forward_layers)
self.forward_values = func([self.input_data])
return self.forward_values
def normalize_gradient(self, img):
"""
Gradients computed tend to become pretty small, especially after many layers.
So after each layer, we will multiply them with a constant to keep them in acceptable
range (if applicable).
"""
gap = img.max() - img.min()
if abs(gap) > 1.:
return img
amplitude = 1. / gap
img *= amplitude
return img
class DeconvNet(GuidedBackprop):
def __init__(self, model, layer_name, input_data, layer_idx=None, masking=None):
"""
For parameters, please refer to Backpropagation()
"""
super(DeconvNet, self).__init__(model, layer_name, input_data, layer_idx, masking)
def compute(self):
"""
@returns:
- gradients_input: obtained heatmap.
"""
gradients = self.masking
for layer_idx in range(self.layer_idx - 1, -1, -1):
layer_prev = self.model.layers[layer_idx].output
layer_cur = self.model.layers[layer_idx + 1].output
forward_values_prev = np.ones(
[self.input_data.shape[0]] + list(self.model.layers[layer_idx].output_shape[1:]))
gradients_cur = gradients
gate_b = (gradients_cur > 0.) * gradients_cur
gradients = self.guided_backprop_adjacent(layer_cur,
layer_prev,
forward_values_prev,
gate_b)
if gradients.min() != gradients.max():
gradients = self.normalize_gradient(gradients)
gradients_input = gradients
gradients_input = self.filter_gradient(gradients_input)
return gradients_input
|
[
"numpy.abs",
"numpy.mean",
"keras.backend.cast",
"numpy.ones",
"numpy.random.random",
"keras.backend.mean",
"keras.backend.gradients",
"numpy.zeros",
"keras.backend.function",
"numpy.amax"
] |
[((1485, 1525), 'keras.backend.mean', 'K.mean', (['(self.layer.output * self.masking)'], {}), '(self.layer.output * self.masking)\n', (1491, 1525), True, 'from keras import backend as K\n'), ((1600, 1643), 'keras.backend.function', 'K.function', (['[self.model.input]', '[gradients]'], {}), '([self.model.input], [gradients])\n', (1610, 1643), True, 'from keras import backend as K\n'), ((1920, 1929), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (1926, 1929), True, 'import numpy as np\n'), ((1946, 1969), 'numpy.amax', 'np.amax', (['x_abs'], {'axis': '(-1)'}), '(x_abs, axis=-1)\n', (1953, 1969), True, 'import numpy as np\n'), ((2874, 2889), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2882, 2889), True, 'import numpy as np\n'), ((5257, 5283), 'keras.backend.mean', 'K.mean', (['(layer_cur * gate_b)'], {}), '(layer_cur * gate_b)\n', (5263, 5283), True, 'from keras import backend as K\n'), ((5354, 5390), 'keras.backend.cast', 'K.cast', (['(values_prev > 0.0)', '"""float32"""'], {}), "(values_prev > 0.0, 'float32')\n", (5360, 5390), True, 'from keras import backend as K\n'), ((5452, 5502), 'keras.backend.function', 'K.function', (['[self.model.input]', '[guided_gradients]'], {}), '([self.model.input], [guided_gradients])\n', (5462, 5502), True, 'from keras import backend as K\n'), ((5716, 5762), 'keras.backend.function', 'K.function', (['[self.model.input]', 'forward_layers'], {}), '([self.model.input], forward_layers)\n', (5726, 5762), True, 'from keras import backend as K\n'), ((1212, 1237), 'numpy.ones', 'np.ones', (['shape', '"""float32"""'], {}), "(shape, 'float32')\n", (1219, 1237), True, 'import numpy as np\n'), ((1546, 1581), 'keras.backend.gradients', 'K.gradients', (['loss', 'self.model.input'], {}), '(loss, self.model.input)\n', (1557, 1581), True, 'from keras import backend as K\n'), ((3210, 3233), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (3226, 3233), True, 'import numpy as np\n'), ((3510, 3540), 'numpy.mean', 'np.mean', (['new_gradients'], {'axis': '(0)'}), '(new_gradients, axis=0)\n', (3517, 3540), True, 'import numpy as np\n'), ((5304, 5333), 'keras.backend.gradients', 'K.gradients', (['loss', 'layer_prev'], {}), '(loss, layer_prev)\n', (5315, 5333), True, 'from keras import backend as K\n')]
|
import torch
import numpy as np
from rlbot.agents.base_agent import SimpleControllerState, BaseAgent
class OutputFormatter():
"""
A class to format model output
"""
def transform_action(self, action):
"""
Transforms the action into a controller state.
"""
action = action[0].detach().cpu().numpy()
# Convert the last 3 actions to their boolean values
action = np.concatenate((action[:5], (action[5:] >= 0)), axis = 0)
controller_out = BaseAgent.convert_output_to_v4(self, action)
return controller_out
def transform_output(self, model_output):
"""
Transforms the output to the new controller state and the action or
state value.
"""
action, val = model_output
action = self.transform_action(action)
val = val.detach()
return action, val
@staticmethod
def action_space():
"""
Returns the number of output actions.
"""
return 8
class RecurrentOutputFormatter(OutputFormatter):
def transform_action(self, action):
return OutputFormatter.transform_action(self, action[0])
|
[
"rlbot.agents.base_agent.BaseAgent.convert_output_to_v4",
"numpy.concatenate"
] |
[((428, 481), 'numpy.concatenate', 'np.concatenate', (['(action[:5], action[5:] >= 0)'], {'axis': '(0)'}), '((action[:5], action[5:] >= 0), axis=0)\n', (442, 481), True, 'import numpy as np\n'), ((512, 556), 'rlbot.agents.base_agent.BaseAgent.convert_output_to_v4', 'BaseAgent.convert_output_to_v4', (['self', 'action'], {}), '(self, action)\n', (542, 556), False, 'from rlbot.agents.base_agent import SimpleControllerState, BaseAgent\n')]
|
import numpy as np
from sklearn import svm
from data_loader import data_loader
N = 100
NUM_CLASS = 4
data_dir = "C:\\Users\\wsy\\Documents\\Audio\\*.m4a"
data_X, data_Y = data_loader(data_dir)
print(len(data_X))
clf_list = []
for idx in range(NUM_CLASS):
for i, X in enumerate(data_X):
if X == []:
continue
Y = data_Y[i][:, idx]
clf = svm.LinearSVC()
clf.fit(X, Y)
# validation
predicted = clf.predict(X[0:1, :])
print("Predict:", predicted, "\t Ground Truth:", Y[0])
clf_list.append(clf)
X = data_X[0]
test_X = X[3:4, :]
test_Y = data_Y[0][3:4, :]
test_predict = np.zeros(NUM_CLASS)
for idx in range(NUM_CLASS):
clf = clf_list[idx]
test_predict[idx] = clf.predict(test_X)
print("Predict:", test_predict, "\n Ground Truth:", test_Y)
|
[
"data_loader.data_loader",
"numpy.zeros",
"sklearn.svm.LinearSVC"
] |
[((181, 202), 'data_loader.data_loader', 'data_loader', (['data_dir'], {}), '(data_dir)\n', (192, 202), False, 'from data_loader import data_loader\n'), ((681, 700), 'numpy.zeros', 'np.zeros', (['NUM_CLASS'], {}), '(NUM_CLASS)\n', (689, 700), True, 'import numpy as np\n'), ((397, 412), 'sklearn.svm.LinearSVC', 'svm.LinearSVC', ([], {}), '()\n', (410, 412), False, 'from sklearn import svm\n')]
|
import numpy as np
#create array of weekly vaccination numbers from https://opendata-geohive.hub.arcgis.com/datasets/0101ed10351e42968535bb002f94c8c6_0.csv?outSR=%7B%22latestWkid%22%3A3857%2C%22wkid%22%3A102100%7D
a= np.array([3946,
43856,
52659,
49703,
51381,
56267,
32176,
86434,
88578,
88294,
91298,
64535,
133195,
139946,
131038,
155716,
188626,
211497,
245947,
323166,
331292,
305479,
277195,
290362,
357077,
370059,
370544,
390891,
373319,
336086,
300378,
232066,
232234,
229694,
183158,
121650,
108327,
95192,
63718,
43289,
23643,
21081,
24567,
22115,
18434,
15138,
21262,
21259,
19713,
14174,
14862,
])
#print(a.shape)
#print(np.mean(a))
#print(np.median(a))
#print(np.max(a))
#Generate min and % of adult population vaccinated per week
def uptake(value):
WkTotal = np.min(value)
WkTotalStr = "% s" % WkTotal
Str1="Minimum Uptake "
print(Str1 + WkTotalStr)
Str2="% of Population Vaccinated per Week"
print(Str2)
for i in a:
print(i / 4000000 * 100)
uptake(a)
|
[
"numpy.array",
"numpy.min"
] |
[((219, 633), 'numpy.array', 'np.array', (['[3946, 43856, 52659, 49703, 51381, 56267, 32176, 86434, 88578, 88294, 91298,\n 64535, 133195, 139946, 131038, 155716, 188626, 211497, 245947, 323166, \n 331292, 305479, 277195, 290362, 357077, 370059, 370544, 390891, 373319,\n 336086, 300378, 232066, 232234, 229694, 183158, 121650, 108327, 95192, \n 63718, 43289, 23643, 21081, 24567, 22115, 18434, 15138, 21262, 21259, \n 19713, 14174, 14862]'], {}), '([3946, 43856, 52659, 49703, 51381, 56267, 32176, 86434, 88578, \n 88294, 91298, 64535, 133195, 139946, 131038, 155716, 188626, 211497, \n 245947, 323166, 331292, 305479, 277195, 290362, 357077, 370059, 370544,\n 390891, 373319, 336086, 300378, 232066, 232234, 229694, 183158, 121650,\n 108327, 95192, 63718, 43289, 23643, 21081, 24567, 22115, 18434, 15138, \n 21262, 21259, 19713, 14174, 14862])\n', (227, 633), True, 'import numpy as np\n'), ((786, 799), 'numpy.min', 'np.min', (['value'], {}), '(value)\n', (792, 799), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
data = pd.DataFrame(data=pd.read_csv('enjoysport.csv'))
concepts = np.array(data.iloc[:,0:-1])
print('Concepts:', concepts)
target = np.array(data.iloc[:,-1])
print('Target:', target)
def learn(concepts, target):
print("Initialization of specific_h and general_h")
specific_h = concepts[0].copy()
print('\t specific_h:', specific_h)
general_h = [["?" for i in range(len(specific_h))] for i in range(len(specific_h))]
print('\t general_h:', general_h)
for i, h in enumerate(concepts):
if target[i] == "yes":
for x in range(len(specific_h)):
if h[x]!= specific_h[x]:
specific_h[x] ='?'
general_h[x][x] ='?'
if target[i] == "no":
for x in range(len(specific_h)):
if h[x]!= specific_h[x]:
general_h[x][x] = specific_h[x]
else:
general_h[x][x] = '?'
print("\n Steps of Candidate Elimination Algorithm",i+1)
print('\t specific_h', specific_h)
print('\t general_h:', general_h)
indices = [i for i, val in enumerate(general_h) if val == ['?', '?', '?', '?', '?', '?']]
for i in indices:
general_h.remove(['?', '?', '?', '?', '?', '?'])
return specific_h, general_h
s_final, g_final = learn(concepts, target)
print("\n Final specific_h:", s_final, sep="\n")
print("\n Final general_h:", g_final, sep="\n")
|
[
"numpy.array",
"pandas.read_csv"
] |
[((111, 139), 'numpy.array', 'np.array', (['data.iloc[:, 0:-1]'], {}), '(data.iloc[:, 0:-1])\n', (119, 139), True, 'import numpy as np\n'), ((180, 206), 'numpy.array', 'np.array', (['data.iloc[:, -1]'], {}), '(data.iloc[:, -1])\n', (188, 206), True, 'import numpy as np\n'), ((67, 96), 'pandas.read_csv', 'pd.read_csv', (['"""enjoysport.csv"""'], {}), "('enjoysport.csv')\n", (78, 96), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################
#-------------------------------------#
# Module: Frontera Eficiente #
#-------------------------------------#
# Creado: #
# 20. 04. 2019 #
# Ult. modificacion: #
# 23. 04. 2019 #
#-------------------------------------#
# Autor: <NAME> #
#-------------------------------------#
#-------------------------------------#
#-------------------------------------#
#-------------------------------------#
#######################################
import pandas as pd
import numpy as np
import seaborn as sns
sns.set(font_scale=1.5)
import datetime as dt
import matplotlib.pylab as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
import os
import pywt
from statsmodels.robust import mad
import statsmodels.formula.api as sm
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
import scipy.optimize as sco
#########################################
#---------------------------------------#
# Aux. functions
#---------------------------------------#
#########################################
def alpha_0( Num_Days ):
return 2./(Num_Days + 1.0)
def STDerror( m, b, sdata ):
time = [t for t in range(0,len(sdata))]
x = [(m*t + b) for t in time]
mt = np.mean(time)
num_slope = 0
den_slope = 0
for i in range(0,len(sdata)):
num_slope += (sdata[i] - x[i])**2
den_slope += (i - mt)**2
num_slope = np.sqrt(num_slope/(len(sdata)-2))
den1= np.sqrt(den_slope)
den2 = np.sqrt(len(sdata)*den_slope)
return [num_slope/den1, num_slope/den2]
def Slope(x1, y1, x2, y2):
slope = (y2-y1)/(x2-x1)
return slope
def YInt(x1, y1, x2, y2):
m = Slope(x1, y1, x2, y2)
return y1 - m*x1
#########################################
# END: Aux. functions #
#########################################
#########################################
#---------------------------------------#
# getData #
#---------------------------------------#
#########################################
class getData:
def __init__( self, file ):
self.file = file
# ----- #
df = pd.read_csv(self.file, index_col = 0)
df = self.index_to_datetime(df)
self.n = 22 # Days to ATR
# ----- #
self.timeseries = df
self.truerange = self.truerange()
self.atr = self.atr()
self.atr_return = self.atr_return()
self.cum_sum = self.cum_sum()
self.dataframe = self.dataframe()
def index_to_datetime( self, df ):
df.index = df.index.astype('str')
df.index = df.index.to_datetime()
return df
def truerange( self ):
adf = self.timeseries
s1 = pd.Series(np.abs(adf.DHigh - adf.DLow))
s2 = pd.Series(np.abs(adf.DHigh - adf.DClose.shift()))
s3 = pd.Series(np.abs(adf.DLow - adf.DClose.shift()))
TR = pd.Series(pd.concat([s1,s2,s3],axis=1).max(axis=1), name = 'TrueRange')
return TR
def atr( self ):
n = self.n
TR = self.truerange
ATR = pd.Series(pd.ewma(TR, span = n, min_periods = n), name = 'ATR_{}'.format(n))
return ATR
def atr_return( self ):
tday = self.timeseries.DClose
yday = self.timeseries.DClose.shift()
atryday = self.atr.shift()
atr_ret = (tday - yday) / atryday
atr_ret = atr_ret.rename('ATR_RET')
return atr_ret
def cum_sum( self ):
atr_ret = self.atr_return
cum_sum = atr_ret.cumsum(axis = 0)
cum_sum = cum_sum.rename('PATR')
return cum_sum
def dataframe( self ):
cols = ['DOpen', 'DHigh', 'DLow', 'DClose', 'TrueRange', 'ATR_{}'.format(22)]
cols += ['ATR_RET', 'PATR']
adf = self.timeseries.join([self.truerange,self.atr,self.atr_return,self.cum_sum])
adf = adf[cols]
return adf
def plot( self, Series, *args):
fig, ax = plt.subplots(1,figsize=(10, 7))
ser = self.dataframe[Series]
ser.plot()
plt.xlabel('Year')
plt.ylabel(Series)
if len(args) != 0:
plt.title(args[0])
plt.show()
#########################################
# END: getData #
#########################################
#########################################
#---------------------------------------#
# Regression #
#---------------------------------------#
#########################################
class Regression:
def __init__( self, data ):
self.time = range(0,len(data))
self.data = data
self.simple = self.SimpleRegression(self.time, self.data)
def Results( self ):
txts = 'Simple:\n\tSlope: {0:.5f}\tIntercept: {1:.5f}\n'.format(self.simple.slope, self.simple.intercept)
txts += '\tSSE: {0:.5f}\tISE: {1:.5f}\n\t'.format(self.simple.sse, self.simple.ise)
print ( txts )
class SimpleRegression:
def __init__(self, time, data):
X = data
y = [t for t in range(0,len(data))]
df = pd.concat([pd.Series(y,index=X.index,name='time'),X],axis=1)
model = sm.ols(formula='time ~ PATR', data=df)
result = model.fit()
self.slope = result.params[1]
self.intercept = result.params[0]
self.sse = STDerror(self.slope, self.intercept, data)[0] # Compared to the initial data
self.ise = STDerror(self.slope, self.intercept, data)[1] # Compared to the initial data
#########################################
# END: Regression #
#########################################
#########################################
#---------------------------------------#
# RegressionML #
#---------------------------------------#
#########################################
class RegressionML:
def __init__( self, data ):
self.time = range(0,len(data))
self.data = data
self.model = linear_model.LinearRegression()
self.simple = self.SimpleRegression(self.model, self.time, self.data)
def Results( self ):
txts = 'Simple Regression:\n\tSlope: {0:.5f}\tIntercept: {1:.5f}\n'.format(self.simple.slope, self.simple.intercept)
txts += '\tSSE: {0:.5f}\tISE: {1:.5f}\n\t'.format(self.simple.sse, self.simple.ise)
print ( txts )
def Plot( self, *args ):
fig, ax1 = plt.subplots(1,figsize=(10, 7))
ax1.plot(self.data,linestyle='-.',color='g',label='ATR Return (cumsum)')
ti = self.data.index[0]
tf = self.data.index[-1]
if len(args) == 0:
plt.xticks(rotation=30)
plt.legend()
plt.show()
else:
if args[0] == 's':
yi = self.simple.intercept
yf = self.simple.slope*(len(self.data)) + self.simple.intercept
ax1.plot([ti,tf],[yi,yf],color='r', label = 'Simple Regression')
plt.xticks(rotation=30)
plt.legend()
plt.show()
class SimpleRegression:
def __init__(self, model, time, data):
t = time
x = data
X_train, X_test, y_train, y_test = train_test_split(t, x, test_size=0., random_state=1)
X_train = [[i] for i in X_train]
model.fit(X_train,y_train)
self.slope = model.coef_[0]
self.intercept = model.intercept_
self.sse = STDerror(self.slope, self.intercept, data)[0] # Compared to the initial data
self.ise = STDerror(self.slope, self.intercept, data)[1] # Compared to the initial data
#########################################
# END: RegressionML #
#########################################
#######################################
#-------------------------------------#
# Portfolio #
#-------------------------------------#
#######################################
def portfolio( weights, mean_ret, cov_mat, riskfreerate):
mu = mean_ret.dot(weights)*250
sigma = np.sqrt(weights.dot(cov_mat.dot(weights)))*np.sqrt(250)
sharpe = (mu-riskfreerate)/sigma
return mu, sigma, sharpe # Expected value, Volatility, Sharpe ratio
def getReturn( weights, mean_ret, cov_mat, riskfreerate):
return portfolio(weights,mean_ret,cov_mat,riskfreerate)[0]
def getVolatility( weights, mean_ret, cov_mat, riskfreerate):
return portfolio(weights,mean_ret,cov_mat,riskfreerate)[1]
def negSharpeRatio( weights, mean_ret, cov_mat, riskfreerate):
return -portfolio(weights,mean_ret,cov_mat,riskfreerate)[2]
def random_weights(n):
k = np.random.random(n)
return k / sum(k)
#######################################
#-------------------------------------#
#######################################
#######################################
#-------------------------------------#
# Simulation #
#-------------------------------------#
#######################################
class simulation:
def __init__( self, stocks, data, riskfreerate, n_portfolios ):
self.stocks = stocks
self.rfr = riskfreerate
self.data = data
self.n_portfolios = n_portfolios
self.returns = data.pct_change()[1:]
self.mean_ret = self.returns.mean()
self.cov_mat = self.returns.cov()
self.simulation = self.do_simulation()
self.results = self.simulation[0]
self.max_sharpe_portfolio = self.simulation[1]
self.min_volatility_portfolio = self.simulation[2]
def do_simulation( self ):
means,stdvs,shrps,weights = [],[],[],[]
for i in range(self.n_portfolios):
w = random_weights(len(self.stocks))
p = portfolio(w,self.mean_ret,self.cov_mat,self.rfr)
means.append(p[0])
stdvs.append(p[1])
shrps.append(p[2])
weights.append(w)
# Convert to DataFrame
wght = {}
for i in range(len(self.stocks)):
wght[self.stocks[i]] = [j[i] for j in weights]
aux = {'Returns': means,'Volatility': stdvs,'Sharpe Ratio': shrps}
results = {**wght, **aux}
df = pd.DataFrame.from_dict(results)
max_sha_port = df.iloc[df['Sharpe Ratio'].idxmax()]
min_vol_port = df.iloc[df['Volatility'].idxmin()]
return df, max_sha_port, min_vol_port
def plot( self ):
df = self.simulation[0]
max_sh = self.simulation[1]
min_vol= self.simulation[2]
# Scatter plot colored by Sharpe Ratio
plt.style.use('seaborn-dark')
fig, ax = plt.subplots(figsize=(10,7))
df.plot(ax= ax, kind='scatter',x='Volatility', y='Returns', c='Sharpe Ratio', cmap='RdYlGn', edgecolors='black', grid=True, label = 'MC Simulation')
# Maximum Sharpe Ratio
ax.scatter(x=max_sh['Volatility'],y=max_sh['Returns'],marker='D',c='r',s=100,label='Maximum Sharpe Ratio')
# Minimum variance
ax.scatter(x=min_vol['Volatility'],y=min_vol['Returns'],marker='D',c='b',s=100,label='Minimum Volatility')
plt.legend()
ax.set_xlabel('Volatility (Std. Deviation)', fontsize=15)
ax.set_ylabel('Expected Returns', fontsize=15)
ax.set_title('Efficient Frontier', fontsize=22)
plt.show()
def print( self ):
max_sh = self.simulation[1]
min_vol= self.simulation[2]
print('Maximum Sharpe Ratio:\n{}'.format(
max_sh[max_sh.index.tolist()[0:len(self.stocks)]].to_frame(name='Weights').T))
print('{}'.format(max_sh[max_sh.index.tolist()[len(self.stocks):]].to_frame(name='Results').T))
print('\nMinimum Volatility:\n{}'.format(
min_vol[min_vol.index.tolist()[0:len(self.stocks)]].to_frame(name='Weights').T))
print('{}'.format(min_vol[min_vol.index.tolist()[len(self.stocks):]].to_frame(name='Results').T))
#######################################
# END: Simulation #
#######################################
#######################################
#-------------------------------------#
# Theory #
#-------------------------------------#
#######################################
def MaxSharpeRatio(meanReturns, covMatrix, riskFreeRate):
numAssets = len(meanReturns)
args = (meanReturns, covMatrix, riskFreeRate)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bounds = tuple( (0,1) for asset in range(numAssets))
opts = sco.minimize(negSharpeRatio, numAssets*[1./numAssets,], args=args, method='SLSQP', bounds=bounds, constraints=constraints)
return opts
def MinVolatility(meanReturns, covMatrix, riskFreeRate):
numAssets = len(meanReturns)
args = (meanReturns, covMatrix, riskFreeRate)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bounds = tuple( (0,1) for asset in range(numAssets))
opts = sco.minimize(getVolatility, numAssets*[1./numAssets,], args=args, method='SLSQP', bounds=bounds, constraints=constraints)
return opts
def EfficientReturn(meanReturns, covMatrix, riskFreeRate, targetReturn):
numAssets = len(meanReturns)
args = (meanReturns, covMatrix, riskFreeRate)
def getPortfolioReturn(weights):
return portfolio(weights, meanReturns, covMatrix, riskFreeRate)[0]
constraints = ({'type': 'eq', 'fun': lambda x: getPortfolioReturn(x) - targetReturn},
{'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bounds = tuple((0,1) for asset in range(numAssets))
opts = sco.minimize(getVolatility, numAssets*[1./numAssets,], args=args, method='SLSQP', bounds=bounds, constraints=constraints)
return opts
def EfficientFrontier(meanReturns, covMatrix, riskFreeRate, rangeOfReturns):
efficientPortfolios = []
for ret in rangeOfReturns:
efficientPortfolios.append(EfficientReturn(meanReturns, covMatrix, riskFreeRate, ret))
return efficientPortfolios
class theory:
def __init__( self, stocks, data, riskfreerate, n_portfolios ):
self.stocks = stocks
self.rfr = riskfreerate
self.data = data
self.n_portfolios = n_portfolios
self.returns = data.pct_change()[1:]
self.mean_ret = self.returns.mean()
self.cov_mat = self.returns.cov()
self.theory = self.do_theory()
self.results = self.theory[0]
self.max_sharpe_portfolio = self.theory[1]
self.min_volatility_portfolio = self.theory[2]
def do_theory( self ):
target = np.linspace(0.01, 0.30, self.n_portfolios)
eff_front = EfficientFrontier(self.mean_ret, self.cov_mat, self.rfr, target)
x = np.array([p['fun'] for p in eff_front])
df = pd.DataFrame({'Volatility':x, 'Returns':target})
# Create max_sharpe_port
max_sh = MaxSharpeRatio(self.mean_ret, self.cov_mat, self.rfr)['x']
x = dict(zip(self.stocks,max_sh))
port_max = portfolio(max_sh,self.mean_ret, self.cov_mat, self.rfr)
y = {'Returns' : port_max[0], 'Sharpe Ratio' : port_max[2], 'Volatility' : port_max[1]}
z = {**x, **y}
max_sharpe_port = pd.Series(z)
# Create min_vol_port
min_vo = MinVolatility(self.mean_ret, self.cov_mat, self.rfr)['x']
x_vo = dict(zip(self.stocks,min_vo))
port_min = portfolio(min_vo,self.mean_ret, self.cov_mat, self.rfr)
y_vo = {'Returns' : port_min[0], 'Sharpe Ratio' : port_min[2], 'Volatility' : port_min[1]}
z_vo = {**x_vo, **y_vo}
min_vol_port = pd.Series(z_vo)
return df, max_sharpe_port, min_vol_port
def plot( self ):
df = self.theory[0]
df = df.loc[df['Volatility'] < np.round(df['Volatility'].iloc[-1],7)]
max_sh = self.theory[1]
min_vol= self.theory[2]
# Scatter plot colored by Sharpe Ratio
plt.style.use('seaborn-dark')
fig, ax = plt.subplots(figsize=(10,7))
df.plot(ax= ax, kind='scatter',x='Volatility', y='Returns',edgecolors='black', grid=True, label = 'Theory')
# Maximum Sharpe Ratio
ax.scatter(x=max_sh['Volatility'],y=max_sh['Returns'],marker='o',c='r',s=100,label='Maximum Sharpe Ratio')
# Minimum variance
ax.scatter(x=min_vol['Volatility'],y=min_vol['Returns'],marker='o',c='b',s=100,label='Minimum Volatility')
plt.legend()
ax.set_xlabel('Volatility (Std. Deviation)', fontsize=15)
ax.set_ylabel('Expected Returns', fontsize=15)
ax.set_title('Efficient Frontier', fontsize=22)
plt.show()
def print( self ):
max_sh = self.theory[1]
min_vol= self.theory[2]
print('Maximum Sharpe Ratio:\n{}'.format(
max_sh[max_sh.index.tolist()[0:len(self.stocks)]].to_frame(name='Weights').T))
print('{}'.format(max_sh[max_sh.index.tolist()[len(self.stocks):]].to_frame(name='Results').T))
print('\nMinimum Volatility:\n{}'.format(
min_vol[min_vol.index.tolist()[0:len(self.stocks)]].to_frame(name='Weights').T))
print('{}'.format(min_vol[min_vol.index.tolist()[len(self.stocks):]].to_frame(name='Results').T))
#######################################
# END: Theory #
#######################################
#-------------------------------------#
# Plot All #
#-------------------------------------#
def plot_all( simulation, theory ):
# Scatter plot colored by Sharpe Ratio
plt.style.use('seaborn-dark')
fig, ax = plt.subplots(figsize=(10,7))
# Simulation
df = simulation.results
max_sh = simulation.max_sharpe_portfolio
min_vol= simulation.min_volatility_portfolio
df.plot(ax= ax, kind='scatter',x='Volatility', y='Returns', c='Sharpe Ratio', cmap='RdYlGn', edgecolors='black', grid=True, label = 'MC Simulation',alpha=0.5)
# Maximum Sharpe Ratio
ax.scatter(x=max_sh['Volatility'],y=max_sh['Returns'],marker='D',c='r',s=100,label='Maximum Sharpe Ratio (MC)')
# Minimum variance
ax.scatter(x=min_vol['Volatility'],y=min_vol['Returns'],marker='D',c='b',s=100,label='Minimum Volatility (MC)')
# Theory
df = theory.results
df = df.loc[df['Volatility'] < np.round(df['Volatility'].iloc[-1],7)]
max_sh = theory.max_sharpe_portfolio
min_vol= theory.min_volatility_portfolio
df.plot(ax= ax, kind='scatter',x='Volatility', y='Returns',edgecolors='black', label = 'Theory', grid=True)
# Maximum Sharpe Ratio
ax.scatter(x=max_sh['Volatility'],y=max_sh['Returns'],marker='o',c='r',s=100,label='Maximum Sharpe Ratio (theory)',alpha=0.5)
# Minimum variance
ax.scatter(x=min_vol['Volatility'],y=min_vol['Returns'],marker='o',c='b',s=100,label='Minimum Volatility (theory)',alpha=0.5)
plt.legend(loc=7)
ax.set_xlabel('Volatility (Std. Deviation)', fontsize=15)
ax.set_ylabel('Expected Returns', fontsize=15)
ax.set_title('Efficient Frontier', fontsize=22)
plt.show()
|
[
"matplotlib.pylab.xticks",
"matplotlib.pylab.subplots",
"numpy.sqrt",
"pandas.read_csv",
"numpy.array",
"matplotlib.pylab.show",
"pandas.ewma",
"numpy.mean",
"seaborn.set",
"numpy.random.random",
"matplotlib.pylab.legend",
"matplotlib.pylab.title",
"pandas.DataFrame.from_dict",
"numpy.linspace",
"matplotlib.pylab.style.use",
"pandas.DataFrame",
"warnings.simplefilter",
"numpy.round",
"numpy.abs",
"sklearn.model_selection.train_test_split",
"scipy.optimize.minimize",
"matplotlib.pylab.xlabel",
"sklearn.linear_model.LinearRegression",
"warnings.filterwarnings",
"pandas.Series",
"numpy.sum",
"statsmodels.formula.api.ols",
"pandas.concat",
"matplotlib.pylab.ylabel"
] |
[((695, 718), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.5)'}), '(font_scale=1.5)\n', (702, 718), True, 'import seaborn as sns\n'), ((968, 1030), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (989, 1030), False, 'import warnings\n'), ((1031, 1119), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'module': '"""scipy"""', 'message': '"""^internal gelsd"""'}), "(action='ignore', module='scipy', message=\n '^internal gelsd')\n", (1054, 1119), False, 'import warnings\n'), ((1498, 1511), 'numpy.mean', 'np.mean', (['time'], {}), '(time)\n', (1505, 1511), True, 'import numpy as np\n'), ((1692, 1710), 'numpy.sqrt', 'np.sqrt', (['den_slope'], {}), '(den_slope)\n', (1699, 1710), True, 'import numpy as np\n'), ((8040, 8059), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (8056, 8059), True, 'import numpy as np\n'), ((11494, 11624), 'scipy.optimize.minimize', 'sco.minimize', (['negSharpeRatio', '(numAssets * [1.0 / numAssets])'], {'args': 'args', 'method': '"""SLSQP"""', 'bounds': 'bounds', 'constraints': 'constraints'}), "(negSharpeRatio, numAssets * [1.0 / numAssets], args=args,\n method='SLSQP', bounds=bounds, constraints=constraints)\n", (11506, 11624), True, 'import scipy.optimize as sco\n'), ((11891, 12020), 'scipy.optimize.minimize', 'sco.minimize', (['getVolatility', '(numAssets * [1.0 / numAssets])'], {'args': 'args', 'method': '"""SLSQP"""', 'bounds': 'bounds', 'constraints': 'constraints'}), "(getVolatility, numAssets * [1.0 / numAssets], args=args,\n method='SLSQP', bounds=bounds, constraints=constraints)\n", (11903, 12020), True, 'import scipy.optimize as sco\n'), ((12481, 12610), 'scipy.optimize.minimize', 'sco.minimize', (['getVolatility', '(numAssets * [1.0 / numAssets])'], {'args': 'args', 'method': '"""SLSQP"""', 'bounds': 'bounds', 'constraints': 'constraints'}), "(getVolatility, numAssets * [1.0 / numAssets], args=args,\n method='SLSQP', bounds=bounds, constraints=constraints)\n", (12493, 12610), True, 'import scipy.optimize as sco\n'), ((16010, 16039), 'matplotlib.pylab.style.use', 'plt.style.use', (['"""seaborn-dark"""'], {}), "('seaborn-dark')\n", (16023, 16039), True, 'import matplotlib.pylab as plt\n'), ((16051, 16080), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (16063, 16080), True, 'import matplotlib.pylab as plt\n'), ((17227, 17244), 'matplotlib.pylab.legend', 'plt.legend', ([], {'loc': '(7)'}), '(loc=7)\n', (17237, 17244), True, 'import matplotlib.pylab as plt\n'), ((17402, 17412), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (17410, 17412), True, 'import matplotlib.pylab as plt\n'), ((2332, 2367), 'pandas.read_csv', 'pd.read_csv', (['self.file'], {'index_col': '(0)'}), '(self.file, index_col=0)\n', (2343, 2367), True, 'import pandas as pd\n'), ((3878, 3910), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)'], {'figsize': '(10, 7)'}), '(1, figsize=(10, 7))\n', (3890, 3910), True, 'import matplotlib.pylab as plt\n'), ((3956, 3974), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""Year"""'], {}), "('Year')\n", (3966, 3974), True, 'import matplotlib.pylab as plt\n'), ((3977, 3995), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['Series'], {}), '(Series)\n', (3987, 3995), True, 'import matplotlib.pylab as plt\n'), ((4041, 4051), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4049, 4051), True, 'import matplotlib.pylab as plt\n'), ((5684, 5715), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (5713, 5715), False, 'from sklearn import linear_model\n'), ((6073, 6105), 'matplotlib.pylab.subplots', 'plt.subplots', (['(1)'], {'figsize': '(10, 7)'}), '(1, figsize=(10, 7))\n', (6085, 6105), True, 'import matplotlib.pylab as plt\n'), ((7510, 7522), 'numpy.sqrt', 'np.sqrt', (['(250)'], {}), '(250)\n', (7517, 7522), True, 'import numpy as np\n'), ((9407, 9438), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results'], {}), '(results)\n', (9429, 9438), True, 'import pandas as pd\n'), ((9737, 9766), 'matplotlib.pylab.style.use', 'plt.style.use', (['"""seaborn-dark"""'], {}), "('seaborn-dark')\n", (9750, 9766), True, 'import matplotlib.pylab as plt\n'), ((9779, 9808), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (9791, 9808), True, 'import matplotlib.pylab as plt\n'), ((10225, 10237), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (10235, 10237), True, 'import matplotlib.pylab as plt\n'), ((10399, 10409), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (10407, 10409), True, 'import matplotlib.pylab as plt\n'), ((13389, 13430), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.3)', 'self.n_portfolios'], {}), '(0.01, 0.3, self.n_portfolios)\n', (13400, 13430), True, 'import numpy as np\n'), ((13517, 13556), 'numpy.array', 'np.array', (["[p['fun'] for p in eff_front]"], {}), "([p['fun'] for p in eff_front])\n", (13525, 13556), True, 'import numpy as np\n'), ((13564, 13614), 'pandas.DataFrame', 'pd.DataFrame', (["{'Volatility': x, 'Returns': target}"], {}), "({'Volatility': x, 'Returns': target})\n", (13576, 13614), True, 'import pandas as pd\n'), ((13942, 13954), 'pandas.Series', 'pd.Series', (['z'], {}), '(z)\n', (13951, 13954), True, 'import pandas as pd\n'), ((14292, 14307), 'pandas.Series', 'pd.Series', (['z_vo'], {}), '(z_vo)\n', (14301, 14307), True, 'import pandas as pd\n'), ((14560, 14589), 'matplotlib.pylab.style.use', 'plt.style.use', (['"""seaborn-dark"""'], {}), "('seaborn-dark')\n", (14573, 14589), True, 'import matplotlib.pylab as plt\n'), ((14602, 14631), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {'figsize': '(10, 7)'}), '(figsize=(10, 7))\n', (14614, 14631), True, 'import matplotlib.pylab as plt\n'), ((15007, 15019), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (15017, 15019), True, 'import matplotlib.pylab as plt\n'), ((15181, 15191), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (15189, 15191), True, 'import matplotlib.pylab as plt\n'), ((2829, 2857), 'numpy.abs', 'np.abs', (['(adf.DHigh - adf.DLow)'], {}), '(adf.DHigh - adf.DLow)\n', (2835, 2857), True, 'import numpy as np\n'), ((3136, 3170), 'pandas.ewma', 'pd.ewma', (['TR'], {'span': 'n', 'min_periods': 'n'}), '(TR, span=n, min_periods=n)\n', (3143, 3170), True, 'import pandas as pd\n'), ((4020, 4038), 'matplotlib.pylab.title', 'plt.title', (['args[0]'], {}), '(args[0])\n', (4029, 4038), True, 'import matplotlib.pylab as plt\n'), ((4941, 4979), 'statsmodels.formula.api.ols', 'sm.ols', ([], {'formula': '"""time ~ PATR"""', 'data': 'df'}), "(formula='time ~ PATR', data=df)\n", (4947, 4979), True, 'import statsmodels.formula.api as sm\n'), ((6257, 6280), 'matplotlib.pylab.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (6267, 6280), True, 'import matplotlib.pylab as plt\n'), ((6284, 6296), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (6294, 6296), True, 'import matplotlib.pylab as plt\n'), ((6300, 6310), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6308, 6310), True, 'import matplotlib.pylab as plt\n'), ((6698, 6751), 'sklearn.model_selection.train_test_split', 'train_test_split', (['t', 'x'], {'test_size': '(0.0)', 'random_state': '(1)'}), '(t, x, test_size=0.0, random_state=1)\n', (6714, 6751), False, 'from sklearn.model_selection import train_test_split\n'), ((16700, 16738), 'numpy.round', 'np.round', (["df['Volatility'].iloc[-1]", '(7)'], {}), "(df['Volatility'].iloc[-1], 7)\n", (16708, 16738), True, 'import numpy as np\n'), ((6513, 6536), 'matplotlib.pylab.xticks', 'plt.xticks', ([], {'rotation': '(30)'}), '(rotation=30)\n', (6523, 6536), True, 'import matplotlib.pylab as plt\n'), ((6541, 6553), 'matplotlib.pylab.legend', 'plt.legend', ([], {}), '()\n', (6551, 6553), True, 'import matplotlib.pylab as plt\n'), ((6558, 6568), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (6566, 6568), True, 'import matplotlib.pylab as plt\n'), ((11416, 11425), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (11422, 11425), True, 'import numpy as np\n'), ((11813, 11822), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (11819, 11822), True, 'import numpy as np\n'), ((14426, 14464), 'numpy.round', 'np.round', (["df['Volatility'].iloc[-1]", '(7)'], {}), "(df['Volatility'].iloc[-1], 7)\n", (14434, 14464), True, 'import numpy as np\n'), ((2990, 3021), 'pandas.concat', 'pd.concat', (['[s1, s2, s3]'], {'axis': '(1)'}), '([s1, s2, s3], axis=1)\n', (2999, 3021), True, 'import pandas as pd\n'), ((4880, 4920), 'pandas.Series', 'pd.Series', (['y'], {'index': 'X.index', 'name': '"""time"""'}), "(y, index=X.index, name='time')\n", (4889, 4920), True, 'import pandas as pd\n'), ((12404, 12413), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (12410, 12413), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import norm as lpnorm
if __name__ == "__main__":
N = 1000 # Precision
p = 0.5 # p-norm
# Discretize unit-circle
angles = np.linspace(0, 2*np.pi, N)
# Create unit-circle points
points = np.stack((np.cos(angles), np.sin(angles)), 1)
# Normalize them with p-norm
points = (points.T / np.array([lpnorm(point, p) for point in points])).T
# Plot
plt.plot(points[:, 0], points[:, 1], linestyle='-')
plt.gca().set_aspect('equal', adjustable='box')
plt.gca().set_title('Unit Circle: p = ' + str(p))
plt.show()
|
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.plot",
"numpy.linspace",
"numpy.cos",
"scipy.linalg.norm",
"numpy.sin",
"matplotlib.pyplot.show"
] |
[((215, 243), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'N'], {}), '(0, 2 * np.pi, N)\n', (226, 243), True, 'import numpy as np\n'), ((460, 511), 'matplotlib.pyplot.plot', 'plt.plot', (['points[:, 0]', 'points[:, 1]'], {'linestyle': '"""-"""'}), "(points[:, 0], points[:, 1], linestyle='-')\n", (468, 511), True, 'import matplotlib.pyplot as plt\n'), ((622, 632), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (630, 632), True, 'import matplotlib.pyplot as plt\n'), ((297, 311), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (303, 311), True, 'import numpy as np\n'), ((313, 327), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (319, 327), True, 'import numpy as np\n'), ((516, 525), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (523, 525), True, 'import matplotlib.pyplot as plt\n'), ((568, 577), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (575, 577), True, 'import matplotlib.pyplot as plt\n'), ((402, 418), 'scipy.linalg.norm', 'lpnorm', (['point', 'p'], {}), '(point, p)\n', (408, 418), True, 'from scipy.linalg import norm as lpnorm\n')]
|
import numpy as np
from .. import T
from ..layer import ShapedLayer
from ..initialization import initialize_weights
from .full import Linear
from .. import stats
__all__ = ['Gaussian', 'Bernoulli', 'IdentityVariance']
class Gaussian(Linear):
def __init__(self, *args, **kwargs):
self.cov_type = kwargs.pop('cov_type', 'diagonal')
self.min_stdev = kwargs.pop('min_stdev', 1e-2)
super(Gaussian, self).__init__(*args, **kwargs)
assert not self.elementwise
def initialize(self):
if not self.elementwise:
dim_in, dim_out = self.get_dim_in()[-1], self.get_dim_out()[-1]
left = initialize_weights(self.initialization, [dim_in, dim_out // 2])
right = T.zeros([dim_in, dim_out // 2])
self.create_parameter('W', [dim_in, dim_out], initial_value=(
T.concatenate([
right, left
], -1)
))
self.create_parameter('b', [dim_out], initial_value=np.zeros([dim_out]))
def get_dim_out(self):
return [self.dim_out[0] * 2]
def activate(self, X):
if self.cov_type == 'diagonal':
scale_diag, mu = T.split(X, 2, axis=-1)
if hasattr(self, 'min_stdev'):
scale_diag = T.softplus(scale_diag) + self.min_stdev
else:
scale_diag = T.softplus(scale_diag) + 1e-5
return stats.GaussianScaleDiag([scale_diag, mu], parameter_type='regular')
raise Exception("Undefined covariance type: %s" % self.cov_type)
def __str__(self):
return "Gaussian(%s)" % self.dim_out
class Bernoulli(Linear):
def __init__(self, *args, **kwargs):
self.parameter_type = kwargs.pop('parameter_type', 'natural')
super(Bernoulli, self).__init__(*args, **kwargs)
def activate(self, X):
if self.elementwise:
return stats.Bernoulli(X, parameter_type=self.parameter_type)
return stats.Bernoulli(X, parameter_type=self.parameter_type)
def __str__(self):
return "Bernoulli(%s)" % self.dim_out
class IdentityVariance(ShapedLayer):
def __init__(self, variance=1e-4, *args, **kwargs):
self.variance = variance
super(IdentityVariance, self).__init__(*args, **kwargs)
def initialize(self):
pass
def get_parameters(self):
return []
def infer_shape(self, shape):
if shape is None: return
if self.elementwise:
self.dim_in = shape
self.dim_out = shape
return
if self.dim_in is None:
self.dim_in = shape
def forward(self, X):
return stats.GaussianScaleDiag([np.sqrt(self.variance) * T.ones_like(X), X])
|
[
"numpy.zeros",
"numpy.sqrt"
] |
[((1005, 1024), 'numpy.zeros', 'np.zeros', (['[dim_out]'], {}), '([dim_out])\n', (1013, 1024), True, 'import numpy as np\n'), ((2690, 2712), 'numpy.sqrt', 'np.sqrt', (['self.variance'], {}), '(self.variance)\n', (2697, 2712), True, 'import numpy as np\n')]
|
import numpy as np
#DEFINE INNER FUNCTIONS
def inv_log_func(x, a, b):
return ((a * starting_score) / (2 + np.log(b * x)))
def bump_func(x,e):
return (e * np.sin(x - np.pi / 2)) + e
def sin_vals(ampl,steps):
if (steps < 1): steps = 1
sin_step = (np.pi * 2.0) / steps
x_range = np.arange(0,np.pi * 2.0 + 0.1,sin_step)
sin_vals = [bump_func(x,ampl) for x in x_range]
return sin_vals
def make_line_trio_data(ami,a,ama,bmi,b,bma):
min_data = [int(inv_log_func(x,ami,bmi)) if x <= 360 else int(inv_log_func(x,ami,bmi) + 0.003 * x) for x in data_range]
mid_data = [int(inv_log_func(x,a,b)) if x < 120 else int(inv_log_func(x,a,b) - 0.017 * x) for x in data_range]
max_data = [int(inv_log_func(x,ama,bma) - 0.029 * x) for x in data_range]
return min_data,mid_data,max_data
def add_bump_func(vals,ampl,frm,to):
bump_vals = sin_vals(ampl,to - frm)
sini = 0
for i in range(frm,to):
sini += 1
vals[i] += bump_vals[sini] * 0.5
vals[i] = int(vals[i])
def add_bump_trio(ampl,xmi,x,xma,frm,to):
add_bump_func(xmi,ampl,frm,to)
add_bump_func(x,ampl,frm,to)
add_bump_func(xma,ampl,frm,to)
def make_trio(a,b,c,d,ampl):
mini,mid,maxi = make_line_trio_data(a[0],a[1],a[2],b[0],b[1],b[2])
bump_start = int(len(mid) * c)
bump_end = len(mid) - d
add_bump_trio(ampl,mini,mid,maxi,bump_start,bump_end)
return [a[0]] + mini,[a[1]] + mid,[a[2]] + maxi
def get_az():
ami = np.random.uniform(low=0.39, high=0.7)
amd = np.random.uniform(low=0.71, high=0.98)
ama = np.random.uniform(low=0.985, high=1.205)
return [ami,amd,ama]
def get_bz():
bmi = b * np.random.uniform(low=0.9, high=0.95)
bmd = b * np.random.uniform(low=0.96, high=1.1)
bma = b * np.random.uniform(low=1.15, high=1.67)
return [bmi,bmd,bma]
def make_trios(count):
all_lines = []
az = [a_min,a,a_max]
bz = [b_min,b,b_max]
c = 0.62
d = 6
e = 10
for i in range(count):
all_lines.extend(make_trio(az,bz,c,d,e))
az = get_az()
bz = get_bz()
c = np.random.uniform(low=0.21, high=0.9)
d = int(np.random.uniform(low=1, high=30))
e = int(np.random.uniform(low=5, high=13))
return all_lines
#DEFINE STARTING VALUES
starting_score = 342.5
a = 1.0
b = 0.025
a_max = 1.2
b_max = b * 1.15
a_min = 0.8
b_min = b * 0.85
e = 10
line_count = 150
range_start = 60
range_end = 1200
step = 15
data_range = np.arange(range_start,range_end,step)
rand_seed = 21
np.random.seed(rand_seed)
csv_path = 'W:\Datasets\synth_scoring\lines.csv'
#PUT IT ALL TOGETHER
all_lines = make_trios(line_count)
for line in all_lines:
line = np.asarray(line)
all_lines = np.asarray(all_lines)
np.savetxt(csv_path,all_lines)
|
[
"numpy.log",
"numpy.asarray",
"numpy.random.seed",
"numpy.savetxt",
"numpy.random.uniform",
"numpy.sin",
"numpy.arange"
] |
[((2457, 2496), 'numpy.arange', 'np.arange', (['range_start', 'range_end', 'step'], {}), '(range_start, range_end, step)\n', (2466, 2496), True, 'import numpy as np\n'), ((2511, 2536), 'numpy.random.seed', 'np.random.seed', (['rand_seed'], {}), '(rand_seed)\n', (2525, 2536), True, 'import numpy as np\n'), ((2709, 2730), 'numpy.asarray', 'np.asarray', (['all_lines'], {}), '(all_lines)\n', (2719, 2730), True, 'import numpy as np\n'), ((2731, 2762), 'numpy.savetxt', 'np.savetxt', (['csv_path', 'all_lines'], {}), '(csv_path, all_lines)\n', (2741, 2762), True, 'import numpy as np\n'), ((299, 340), 'numpy.arange', 'np.arange', (['(0)', '(np.pi * 2.0 + 0.1)', 'sin_step'], {}), '(0, np.pi * 2.0 + 0.1, sin_step)\n', (308, 340), True, 'import numpy as np\n'), ((1467, 1504), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.39)', 'high': '(0.7)'}), '(low=0.39, high=0.7)\n', (1484, 1504), True, 'import numpy as np\n'), ((1515, 1553), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.71)', 'high': '(0.98)'}), '(low=0.71, high=0.98)\n', (1532, 1553), True, 'import numpy as np\n'), ((1564, 1604), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.985)', 'high': '(1.205)'}), '(low=0.985, high=1.205)\n', (1581, 1604), True, 'import numpy as np\n'), ((2679, 2695), 'numpy.asarray', 'np.asarray', (['line'], {}), '(line)\n', (2689, 2695), True, 'import numpy as np\n'), ((1659, 1696), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.9)', 'high': '(0.95)'}), '(low=0.9, high=0.95)\n', (1676, 1696), True, 'import numpy as np\n'), ((1711, 1748), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.96)', 'high': '(1.1)'}), '(low=0.96, high=1.1)\n', (1728, 1748), True, 'import numpy as np\n'), ((1763, 1801), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1.15)', 'high': '(1.67)'}), '(low=1.15, high=1.67)\n', (1780, 1801), True, 'import numpy as np\n'), ((2086, 2123), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.21)', 'high': '(0.9)'}), '(low=0.21, high=0.9)\n', (2103, 2123), True, 'import numpy as np\n'), ((111, 124), 'numpy.log', 'np.log', (['(b * x)'], {}), '(b * x)\n', (117, 124), True, 'import numpy as np\n'), ((164, 185), 'numpy.sin', 'np.sin', (['(x - np.pi / 2)'], {}), '(x - np.pi / 2)\n', (170, 185), True, 'import numpy as np\n'), ((2140, 2173), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1)', 'high': '(30)'}), '(low=1, high=30)\n', (2157, 2173), True, 'import numpy as np\n'), ((2191, 2224), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(5)', 'high': '(13)'}), '(low=5, high=13)\n', (2208, 2224), True, 'import numpy as np\n')]
|
#################################################################################################
# #
# MULTI-ARMED BANDITS ---- 10-ARM TESTBED SOFTMAX METHOD #
# #
# Author: <NAME> #
# #
# References: #
# 1) Sutton, R.S. and Barto, A.G., 2018. Reinforcement learning: An introduction. MIT press #
# 2) GitHub: #
# i) Sahana Ramnath - https://github.com/SahanaRamnath/MultiArmedBandit_RL #
# ii) <NAME> - https://github.com/jettdlee/10_armed_bandit #
# #
#################################################################################################
import numpy as np
import matplotlib.pyplot as plt
import random
import time
#Begin time counter
start_time = time.time()
n = 2000 # Number of bandit problems
k = 10 # Number of Arms
p = 1000 # Number of plays
T = [0.01,0.2,1,10] # Set of Temperature values
#Expected Reward for a selected action
q_t = np.random.normal(0,1,(n,k)) # q(a) = E [ R | A = a ]
#Optimal Action
A_t = np.argmax(q_t,1) # A_t = argmax [ Q_t (a) ]
#Initialize Plots
f1 = plt.figure().add_subplot(111)
f2 = plt.figure().add_subplot(111)
f1.title.set_text(r'$Temperature$-greedy : Average Reward Vs Steps for 10 arms')
f1.set_ylabel('Average Reward')
f1.set_xlabel('Steps')
f2.title.set_text(r'$temperature$ : $\%$ Optimal Action Vs Steps for 10 arms')
f2.set_ylabel(r'$\%$ Optimal Action')
f2.set_xlabel('Steps')
f2.set_ylim(0,100)
Legend_Entries1 = []
Legend_Text1 = []
Legend_Entries2 = []
Legend_Text2 = []
for temp in range(len(T)):
print('Start trials for temperature = ', T[temp])
time_e = time.time()
# Initialize Matrices
Q = np.zeros((n,k)) # Estimated Reward
N = np.ones((n,k)) # Number of Times each Arm was chosen
# Pull Each Arm atleast once. Therefore, assign random value > 1 for each arm
#Initial pull for all arms
Q_i = np.random.normal(q_t,1)
R_t = [0] #Initialize vector for reward values for each epsilon
R_t.append(np.mean(Q_i))
R_t_opt = [] # Optimal Reward for each epsilon
for pull in range(2, p + 1):
#print(pull)
R_p = [] # Initialize vector for all rewards for the pull
count_opt_arm_pulls = 0 # Initialize counter for counting number of pulls of the optimal pulls
for i in range(n):
#print(pull)
#print(i)
Q_ex = np.exp(Q[i]/T[temp])
Q_softmax = Q_ex/np.sum(Q_ex)
j = np.random.choice(range(k),1,p=Q_softmax)
temp_R = np.random.normal(q_t[i][j],1)
R_p.append(temp_R)
if j == A_t[i]:
count_opt_arm_pulls = count_opt_arm_pulls + 1
N[i][j] = N[i][j] + 1
Q[i][j] = Q[i][j] + (temp_R - Q[i][j]) / N[i][j]
R_p_avg = np.mean(R_p)
R_t.append(R_p_avg)
R_t_opt.append(float(count_opt_arm_pulls)*100/n)
f1.plot(range(0,p+1),R_t)
f2.plot(range(2,p+1),R_t_opt)
p1 = f1.plot(range(0,p+1),R_t)
p2 = f2.plot(range(2,p+1),R_t_opt)
Legend_Entries1.append(p1)
Legend_Entries2.append(p2)
if (T[temp] == 0):
print("Temperature = 0")
Legend_Text1.append(r"$T = $"+str(T[temp])+" (greedy) ")
Legend_Text2.append(r"$T = $"+str(T[temp])+" (greedy) ")
else:
Legend_Text1.append(r"$T = $"+str(T[temp]))
Legend_Text2.append(r"$T = $"+str(T[temp]))
#print(Legend_Text1)
print('Trials done for temperature = ', T[temp])
print("Execution Time for temperature " + str(T[temp]) + " = %s" % (time.time() - time_e) )
print("Total Execution time: %s seconds" % (time.time() - start_time))
f1.legend((Legend_Text1),loc='best')
f2.legend((Legend_Text2),loc='best')
plt.show()
|
[
"numpy.random.normal",
"numpy.mean",
"numpy.ones",
"numpy.argmax",
"numpy.exp",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"time.time",
"matplotlib.pyplot.show"
] |
[((843, 854), 'time.time', 'time.time', ([], {}), '()\n', (852, 854), False, 'import time\n'), ((1058, 1088), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(n, k)'], {}), '(0, 1, (n, k))\n', (1074, 1088), True, 'import numpy as np\n'), ((1135, 1152), 'numpy.argmax', 'np.argmax', (['q_t', '(1)'], {}), '(q_t, 1)\n', (1144, 1152), True, 'import numpy as np\n'), ((3619, 3629), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3627, 3629), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1747), 'time.time', 'time.time', ([], {}), '()\n', (1745, 1747), False, 'import time\n'), ((1778, 1794), 'numpy.zeros', 'np.zeros', (['(n, k)'], {}), '((n, k))\n', (1786, 1794), True, 'import numpy as np\n'), ((1821, 1836), 'numpy.ones', 'np.ones', (['(n, k)'], {}), '((n, k))\n', (1828, 1836), True, 'import numpy as np\n'), ((1992, 2016), 'numpy.random.normal', 'np.random.normal', (['q_t', '(1)'], {}), '(q_t, 1)\n', (2008, 2016), True, 'import numpy as np\n'), ((1207, 1219), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1217, 1219), True, 'import matplotlib.pyplot as plt\n'), ((1242, 1254), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1252, 1254), True, 'import matplotlib.pyplot as plt\n'), ((2100, 2112), 'numpy.mean', 'np.mean', (['Q_i'], {}), '(Q_i)\n', (2107, 2112), True, 'import numpy as np\n'), ((2764, 2776), 'numpy.mean', 'np.mean', (['R_p'], {}), '(R_p)\n', (2771, 2776), True, 'import numpy as np\n'), ((2434, 2456), 'numpy.exp', 'np.exp', (['(Q[i] / T[temp])'], {}), '(Q[i] / T[temp])\n', (2440, 2456), True, 'import numpy as np\n'), ((2550, 2580), 'numpy.random.normal', 'np.random.normal', (['q_t[i][j]', '(1)'], {}), '(q_t[i][j], 1)\n', (2566, 2580), True, 'import numpy as np\n'), ((3518, 3529), 'time.time', 'time.time', ([], {}), '()\n', (3527, 3529), False, 'import time\n'), ((2475, 2487), 'numpy.sum', 'np.sum', (['Q_ex'], {}), '(Q_ex)\n', (2481, 2487), True, 'import numpy as np\n'), ((3449, 3460), 'time.time', 'time.time', ([], {}), '()\n', (3458, 3460), False, 'import time\n')]
|
import os
import sys
import tempfile
import subprocess
import cv2
import pymesh
import numpy as np
import torch
import triangle as tr
from tridepth import BaseMesh
from tridepth.extractor import calculate_canny_edges
from tridepth.extractor import SVGReader
from tridepth.extractor import resolve_self_intersection, cleanup
from tridepth.extractor import add_frame
class Mesh2DExtractor:
def __init__(self, canny_params={"denoise": False}, at_params={"filter_itr": 4, "error_thresh": 0.01}):
self.canny_params = canny_params # TODO
self.autotrace_cmd = ['autotrace',
'--centerline',
'--remove-adjacent-corners',
'--filter-iterations', str(at_params["filter_itr"]),
'--error-threshold', str(at_params["error_thresh"]),
'--input-format=bmp',
'--output-format=svg']
def _execute_autotrace(self, filename, debug=False):
"""Execute autotrace with input (bmp-file)
- https://github.com/autotrace/autotrace
Returns:
svg_string: string starting from '<svg/>'
"""
# Execute autotrace
p = subprocess.Popen(self.autotrace_cmd + [filename], stdout=subprocess.PIPE)
# Read the converted svg contents
svg_string = p.communicate()[0]
if not len(svg_string):
print("autotrace_cmd: " + ' '.join(self.autotrace_cmd + [filename]), file=sys.stderr)
print("ERROR: returned nothing, leaving tmp bmp file around for you to debug", file=sys.stderr)
sys.exit(1)
else:
if debug:
print(filename)
sys.exit(1)
else:
os.unlink(filename) # Remove the tempolary file
return svg_string
def _read_polygon_from_svg(self, svg_string):
"""
"""
# Extract polygon information from svg-string
# - https://github.com/guyc/scadtrace/blob/master/svg.py
svg_reader = SVGReader(svg_string)
verts_2d, edges = svg_reader.run()
# Store polygons as wire-format (w/ cleaning)
# - https://github.com/PyMesh/PyMesh/blob/master/scripts/svg_to_mesh.py
if verts_2d.shape[0] == 0:
wires = pymesh.wires.WireNetwork.create_empty()
else:
wires = pymesh.wires.WireNetwork.create_from_data(verts_2d, edges)
wires = resolve_self_intersection(wires, min_edge_size=1.5)
wires = cleanup(wires)
return wires
def _triangulation(self, np_edge, wires, output_size, debug=False):
"""
"""
height, width = output_size
# We use cython wrapper of Triangle,
# since other implementations (Pymesh) can't output edges :(
# - https://github.com/drufat/triangle
input_dic = {}
input_dic["vertices"] = wires.vertices.copy()
input_dic["segments"] = wires.edges.copy()
# [Options]
# p: Triangulates a Planar Straight Line Graph.
# q: no angles smaller than 20 degrees
try:
t = tr.triangulate(input_dic, 'pq')
except:
import uuid
unique_filename = str(uuid.uuid4()) + ".png"
print(wires.vertices.shape, wires.edges.shape)
cv2.imwrite(unique_filename, np_edge)
exit()
if debug:
import matplotlib.pyplot as plt
plt.gca().invert_yaxis()
# plt.imshow(np_edge)
for edge in wires.edges:
v1x, v1y = wires.vertices[edge[0]]
v2x, v2y = wires.vertices[edge[1]]
plt.plot([v1x, v2x], [v1y, v2y], 'k-', color='r', linewidth=1.0)
for tri in t['triangles']:
v1x, v1y = t['vertices'][tri[0]]
v2x, v2y = t['vertices'][tri[1]]
v3x, v3y = t['vertices'][tri[2]]
plt.plot([v1x, v2x], [v1y, v2y], 'k-', color='black', linewidth=1.0)
plt.plot([v2x, v3x], [v2y, v3y], 'k-', color='black', linewidth=1.0)
plt.plot([v3x, v1x], [v3y, v1y], 'k-', color='black', linewidth=1.0)
plt.scatter(wires.vertices[:, 0], wires.vertices[:, 1], s=3.0, c="black")
plt.show()
print(t['vertices'].shape, t['triangles'].shape)
exit()
# Normalize (range=[0,1])
vertices = t["vertices"]
t["vertices"] = np.concatenate((vertices[:, :1] / width,
vertices[:, 1:2] / height,
vertices[:, 2:]), 1)
t["edgemap"] = np_edge
return t
def __call__(self, np_scene):
"""
Args:
np_scene: [H,W,3] (ndarray, uint8)
"""
height, width, _ = np_scene.shape
# Calculate canny edge
np_edge, _ = calculate_canny_edges(np_scene, denoise=self.canny_params["denoise"])
# Save into temp file as bmp-format
with tempfile.NamedTemporaryFile(suffix='.bmp', delete=False) as temp:
cv2.imwrite(temp.name, np_edge)
# Execute vectorization (by Autotrace)
svg_string = self._execute_autotrace(temp.name)
# Extract polygon information
wires = self._read_polygon_from_svg(svg_string)
# Triangulation
wires = add_frame(wires, output_size=(height, width))
mesh_dic = self._triangulation(np_edge, wires, output_size=(height, width))
# Finally integrate all the information, and create disconnected mesh
mesh = BaseMesh(mesh_dic)
return mesh
|
[
"pymesh.wires.WireNetwork.create_empty",
"pymesh.wires.WireNetwork.create_from_data",
"triangle.triangulate",
"tridepth.BaseMesh",
"sys.exit",
"subprocess.Popen",
"tridepth.extractor.calculate_canny_edges",
"matplotlib.pyplot.plot",
"os.unlink",
"numpy.concatenate",
"tempfile.NamedTemporaryFile",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.gca",
"uuid.uuid4",
"matplotlib.pyplot.show",
"tridepth.extractor.add_frame",
"tridepth.extractor.SVGReader",
"cv2.imwrite",
"tridepth.extractor.cleanup",
"tridepth.extractor.resolve_self_intersection"
] |
[((1252, 1325), 'subprocess.Popen', 'subprocess.Popen', (['(self.autotrace_cmd + [filename])'], {'stdout': 'subprocess.PIPE'}), '(self.autotrace_cmd + [filename], stdout=subprocess.PIPE)\n', (1268, 1325), False, 'import subprocess\n'), ((2094, 2115), 'tridepth.extractor.SVGReader', 'SVGReader', (['svg_string'], {}), '(svg_string)\n', (2103, 2115), False, 'from tridepth.extractor import SVGReader\n'), ((4529, 4621), 'numpy.concatenate', 'np.concatenate', (['(vertices[:, :1] / width, vertices[:, 1:2] / height, vertices[:, 2:])', '(1)'], {}), '((vertices[:, :1] / width, vertices[:, 1:2] / height,\n vertices[:, 2:]), 1)\n', (4543, 4621), True, 'import numpy as np\n'), ((4962, 5031), 'tridepth.extractor.calculate_canny_edges', 'calculate_canny_edges', (['np_scene'], {'denoise': "self.canny_params['denoise']"}), "(np_scene, denoise=self.canny_params['denoise'])\n", (4983, 5031), False, 'from tridepth.extractor import calculate_canny_edges\n'), ((5440, 5485), 'tridepth.extractor.add_frame', 'add_frame', (['wires'], {'output_size': '(height, width)'}), '(wires, output_size=(height, width))\n', (5449, 5485), False, 'from tridepth.extractor import add_frame\n'), ((5664, 5682), 'tridepth.BaseMesh', 'BaseMesh', (['mesh_dic'], {}), '(mesh_dic)\n', (5672, 5682), False, 'from tridepth import BaseMesh\n'), ((1659, 1670), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1667, 1670), False, 'import sys\n'), ((2351, 2390), 'pymesh.wires.WireNetwork.create_empty', 'pymesh.wires.WireNetwork.create_empty', ([], {}), '()\n', (2388, 2390), False, 'import pymesh\n'), ((2425, 2483), 'pymesh.wires.WireNetwork.create_from_data', 'pymesh.wires.WireNetwork.create_from_data', (['verts_2d', 'edges'], {}), '(verts_2d, edges)\n', (2466, 2483), False, 'import pymesh\n'), ((2504, 2555), 'tridepth.extractor.resolve_self_intersection', 'resolve_self_intersection', (['wires'], {'min_edge_size': '(1.5)'}), '(wires, min_edge_size=1.5)\n', (2529, 2555), False, 'from tridepth.extractor import resolve_self_intersection, cleanup\n'), ((2576, 2590), 'tridepth.extractor.cleanup', 'cleanup', (['wires'], {}), '(wires)\n', (2583, 2590), False, 'from tridepth.extractor import resolve_self_intersection, cleanup\n'), ((3194, 3225), 'triangle.triangulate', 'tr.triangulate', (['input_dic', '"""pq"""'], {}), "(input_dic, 'pq')\n", (3208, 3225), True, 'import triangle as tr\n'), ((4260, 4333), 'matplotlib.pyplot.scatter', 'plt.scatter', (['wires.vertices[:, 0]', 'wires.vertices[:, 1]'], {'s': '(3.0)', 'c': '"""black"""'}), "(wires.vertices[:, 0], wires.vertices[:, 1], s=3.0, c='black')\n", (4271, 4333), True, 'import matplotlib.pyplot as plt\n'), ((4346, 4356), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4354, 4356), True, 'import matplotlib.pyplot as plt\n'), ((5090, 5146), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".bmp"""', 'delete': '(False)'}), "(suffix='.bmp', delete=False)\n", (5117, 5146), False, 'import tempfile\n'), ((5168, 5199), 'cv2.imwrite', 'cv2.imwrite', (['temp.name', 'np_edge'], {}), '(temp.name, np_edge)\n', (5179, 5199), False, 'import cv2\n'), ((1755, 1766), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1763, 1766), False, 'import sys\n'), ((1801, 1820), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (1810, 1820), False, 'import os\n'), ((3394, 3431), 'cv2.imwrite', 'cv2.imwrite', (['unique_filename', 'np_edge'], {}), '(unique_filename, np_edge)\n', (3405, 3431), False, 'import cv2\n'), ((3740, 3804), 'matplotlib.pyplot.plot', 'plt.plot', (['[v1x, v2x]', '[v1y, v2y]', '"""k-"""'], {'color': '"""r"""', 'linewidth': '(1.0)'}), "([v1x, v2x], [v1y, v2y], 'k-', color='r', linewidth=1.0)\n", (3748, 3804), True, 'import matplotlib.pyplot as plt\n'), ((4008, 4076), 'matplotlib.pyplot.plot', 'plt.plot', (['[v1x, v2x]', '[v1y, v2y]', '"""k-"""'], {'color': '"""black"""', 'linewidth': '(1.0)'}), "([v1x, v2x], [v1y, v2y], 'k-', color='black', linewidth=1.0)\n", (4016, 4076), True, 'import matplotlib.pyplot as plt\n'), ((4093, 4161), 'matplotlib.pyplot.plot', 'plt.plot', (['[v2x, v3x]', '[v2y, v3y]', '"""k-"""'], {'color': '"""black"""', 'linewidth': '(1.0)'}), "([v2x, v3x], [v2y, v3y], 'k-', color='black', linewidth=1.0)\n", (4101, 4161), True, 'import matplotlib.pyplot as plt\n'), ((4178, 4246), 'matplotlib.pyplot.plot', 'plt.plot', (['[v3x, v1x]', '[v3y, v1y]', '"""k-"""'], {'color': '"""black"""', 'linewidth': '(1.0)'}), "([v3x, v1x], [v3y, v1y], 'k-', color='black', linewidth=1.0)\n", (4186, 4246), True, 'import matplotlib.pyplot as plt\n'), ((3526, 3535), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3533, 3535), True, 'import matplotlib.pyplot as plt\n'), ((3300, 3312), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3310, 3312), False, 'import uuid\n')]
|
"""
## Minería de textos
Universidad de Alicante, curso 2021-2022
Esta documentación forma parte de la práctica "[Lectura y documentación de un sistema de
extracción de entidades](https://jaspock.github.io/mtextos2122/bloque2_practica.html)" y se
basa en el código del curso [CS230](https://github.com/cs230-stanford/cs230-code-examples)
de la Universidad de Stanford.
**Autores de los comentarios:** <NAME> & <NAME>
Este módulo define la red neuronal, la función de pérdida y la métrica de aciertos
para la evaluación del modelo. Se hace uso de la libería torch.
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
"""
### Clase 'Net'
Definición de la clase red neuronal
"""
def __init__(self, params):
"""
### Constructor
Se define una red neuronal recurrente para la obtención de entidades
nombradas de un texto. Se compone de tres capas: capa lineal de embedding,
capa LSTM y capa 'fully-connceted'.
#### Parámetros:
* 'params': parámetros con 'vocab_size', 'embedding_dim' y 'lstm_hidden_dim'
#### Devuelve:
* Tres capas para la red nuronal
"""
"""
Llama al constructor de la clase 'Params', se construye su clase y a
continuación la clase hija 'Net'
"""
super(Net, self).__init__()
"""
Se le da el tamaño del vocabulario y las dimensiones del embedding
a la capa de embedding
"""
self.embedding = nn.Embedding(params.vocab_size, params.embedding_dim)
"""
Capa LSTM que recibe como parámetros las dimensiones del embedding
y las dimensiones del estado 'hidden' que no tienen porqué coincidir
batch_first = True -> hace que los tensores de entrada y salida se den
de forma batch,seq,feature
"""
self.lstm = nn.LSTM(params.embedding_dim,
params.lstm_hidden_dim, batch_first=True)
"""
Capa 'fully-connected', es la capa que da el output final, me dice la
probabilidad de que la palabra sea una ner (named entitty recognition) tag
de cierto tipo (nombre, tiempo, lugar...)
"""
self.fc = nn.Linear(params.lstm_hidden_dim, params.number_of_tags)
"""
En resumen la primera capa, dada una palabra, me da su embedding, en la segunda ese embedding
se lleva a otros espacio de embeddings que no tiene porque tener la misma dimension, y la tercera
capa se lleva este nuevo embedding a otro espacio, el número de etiqueta
"""
def forward(self, s):
"""
### Función 'forward'
A partir de un batch input obtiene las probablidades logits de los tokens
#### Parámetros:
* 's': argumento con un 'lote' de oraciones organizados en filas
y de dimensión tamaño del batch x longitud frase más larga. A las
frases más cortas se le aplica padding.
#### Devuelve:
* probabilidades logits de los tokens
"""
"""
aplicamos una capa de embedding
las dimensiones resultantes son(x,dimension de los embeddings)
"""
s = self.embedding(s)
"""
Aplicación de la LSTM
"""
s, _ = self.lstm(s)
"""
Se hace una copia del tensor en memoria
"""
s = s.contiguous()
"""
Cambiamos la forma de la variable s (es una matriz) de tal manera que cada fila tiene un token.
Con el -1 le indicamos que calcule la dimensión automáticamente para obtener dos dimensiones. Y el
s.shape[2] es lstm_hidden_dim. Se le pone el [2] porque el [0] es el tamaño de batch y el [1] es
el máximo de la secuencia
"""
s = s.view(-1, s.shape[2])
"""
Última capa 'fully-connected'proyecta el nuevo embedding hacia un espacio con el número de etqiuetas
"""
s = self.fc(s)
"""
No obstante, aun no tenemos probabilidades hay que aplicar una softmax. Por una mayor
eficiencia se aplica un log(softmax) por lo que las probabilidades de 0 a 1 pasan a ser
negativas. Cuanto más cerca estemos del cero más alta es la probabilidad.
"""
return F.log_softmax(s, dim=1)
def loss_fn(outputs, labels):
"""
### Función 'loss_fn'
Método función de pérdida
#### Parámetros:
* 'outputs': resultados del modelo
* 'labels': las etiqeutas para evaluar la pérdida
#### Devuelve:
* La entro`pía cruzada de todos los tokens, menos los de padding
"""
"""
aplana la variable
"""
labels = labels.view(-1)
"""
Los inputs de una red neuronal deben tener la misma forma y tamaño, para que esto sea así al pasar oraciones
se hace 'padding', que añade ceros a las secuencias o corta oraciones largas. Estos token tienen -1 como etiqueta,
por lo que podemos usar una máscara que los excluya del cálculo de la función de pérdida.
"""
mask = (labels >= 0).float()
"""
Conversión de las etiquetas en positivas (por los padding tokens)
"""
labels = labels % outputs.shape[1]
num_tokens = int(torch.sum(mask))
return -torch.sum(outputs[range(outputs.shape[0]), labels]*mask)/num_tokens
"""
Se devuelve la entropía cruzada de todos los tokens, menos los de padding, mediante el uso
de la variable 'mask' que hace de máscara, la cual hemos definido antes
"""
def accuracy(outputs, labels):
"""
### Función 'accuracy'
Cálculo de la precisión a partir de las etiquetas y las salidas teniendo en cuenta los términos
de padding
#### Parámetros:
* 'outputs': resultados del modelo
* 'labels': las etiqeutas para evaluar la pérdida
#### Devuelve:
* Tasa de acierto
"""
"""
Aplanamiento de la variable
"""
labels = labels.ravel()
"""
Máscara similar al anterior método 'loss_fn'
"""
mask = (labels >= 0)
"""
Índices con los mayores valores, es decir, obtención de las clases más probables de cada token
"""
outputs = np.argmax(outputs, axis=1)
return np.sum(outputs == labels)/float(np.sum(mask))
metrics = {
'accuracy': accuracy,
}
|
[
"torch.nn.LSTM",
"numpy.argmax",
"numpy.sum",
"torch.sum",
"torch.nn.functional.log_softmax",
"torch.nn.Linear",
"torch.nn.Embedding"
] |
[((6365, 6391), 'numpy.argmax', 'np.argmax', (['outputs'], {'axis': '(1)'}), '(outputs, axis=1)\n', (6374, 6391), True, 'import numpy as np\n'), ((1609, 1662), 'torch.nn.Embedding', 'nn.Embedding', (['params.vocab_size', 'params.embedding_dim'], {}), '(params.vocab_size, params.embedding_dim)\n', (1621, 1662), True, 'import torch.nn as nn\n'), ((1975, 2046), 'torch.nn.LSTM', 'nn.LSTM', (['params.embedding_dim', 'params.lstm_hidden_dim'], {'batch_first': '(True)'}), '(params.embedding_dim, params.lstm_hidden_dim, batch_first=True)\n', (1982, 2046), True, 'import torch.nn as nn\n'), ((2331, 2387), 'torch.nn.Linear', 'nn.Linear', (['params.lstm_hidden_dim', 'params.number_of_tags'], {}), '(params.lstm_hidden_dim, params.number_of_tags)\n', (2340, 2387), True, 'import torch.nn as nn\n'), ((4439, 4462), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['s'], {'dim': '(1)'}), '(s, dim=1)\n', (4452, 4462), True, 'import torch.nn.functional as F\n'), ((5401, 5416), 'torch.sum', 'torch.sum', (['mask'], {}), '(mask)\n', (5410, 5416), False, 'import torch\n'), ((6405, 6430), 'numpy.sum', 'np.sum', (['(outputs == labels)'], {}), '(outputs == labels)\n', (6411, 6430), True, 'import numpy as np\n'), ((6437, 6449), 'numpy.sum', 'np.sum', (['mask'], {}), '(mask)\n', (6443, 6449), True, 'import numpy as np\n')]
|
import olll
import numpy as np
test1 = [[1,0,0,1,1,0,1],[0,1,0,5,0,0,0],[0,0,1,0,5,0,5]]
test2 = [[1,0,0,2,-1,1],[0,1,0,3,-4,-2],[0,0,1,5,-10,-8]]
test3 = [[1,0,0,1,1,0,1], [0,1,0,4,-1,0,-1], [0,0,1,1,1,0,1]]
test4 = [[1,0,0,2,5,3],[0,1,0,1,1,1,],[0,0,1,4,-2,0]]
test5 = [[1,0,0,0,0,0,2,1,1,2],[0,1,0,0,0,0,1,1,-1,-1],[0,0,1,0,0,0,-1,0,-2,-3],[0,0,0,1,0,0,1,-1,1,-1],[0,0,0,0,1,0,-1,2,-4,-3],[0,0,0,0,0,1,1,0,0,1]]
test6 = [[1, 0, 0, 5, 0, 0, 0],[0, 1, 0, 0, 5, 0, 5],[0, 0, 1, 1, 1, 0, 1]]
test7 = [[1, 0, 0, 20, 0, 0, 0],[0, 1, 0, 0, 20, 0, 20],[0, 0, 1, 4, 4, 0, 4]]
test8 = [[1, 0, 0, 10, 0, 0, 0],[0, 1, 0, 0, 10, 0, 10],[0, 0, 1, 2, 2, 0, 2]]
n = input("Please enter n: \n")
n = int(n)
k = input("Please enter k: \n")
k = int(k)
p = input("Please enter p: \n")
p = int(p)
id = np.identity(k)
A = [[]] * k
print("Please enter the generating set:\n")
for i in range(k):
print("\nEnter the generator a[",i,"]: ")
a = list(map(int,input().strip().split()))[:n]
#print(i, a)
a = [x * (2**p) for x in a]
y = list(id[i])
print(y[i],type(y[i]))
A[i] = y+a
print(A[i], type(A[i]))
print(A, type(A))
print(test7, type(test7))
rb = olll.reduction(test7,0.75)
print("Basis: ", rb)
|
[
"numpy.identity",
"olll.reduction"
] |
[((787, 801), 'numpy.identity', 'np.identity', (['k'], {}), '(k)\n', (798, 801), True, 'import numpy as np\n'), ((1169, 1196), 'olll.reduction', 'olll.reduction', (['test7', '(0.75)'], {}), '(test7, 0.75)\n', (1183, 1196), False, 'import olll\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets
from mesostat.utils.opencv_helper import cvWriter
from mesostat.utils.arrays import numpy_merge_dimensions
from sklearn.decomposition import PCA
def distance_matrix(data):
nDim, nTime = data.shape
dataExtr = np.repeat(data[..., None], nTime, axis=2)
delta = dataExtr - dataExtr.transpose((0,2,1))
return np.linalg.norm(delta, axis=0)
def decorrelate(data):
# Leading dimension must be channels
if data.ndim > 2:
dataEff = numpy_merge_dimensions(data, 1, data.ndim+1)
else:
dataEff = data
pca = PCA(n_components=48)
rez = pca.fit_transform(dataEff.T)
print(rez.shape)
rez /= np.std(rez, axis=0)
return rez.T.reshape(data.shape)
class RecurrencePlot:
def __init__(self, data, w0):
self.nPoint, self.nTime = data.shape
# Compute the recurrence plots with no threshold
self.dist = distance_matrix(data)
# Plot first
picdata = self.get_plotdata(w0)
self.fig, self.ax = plt.subplots(figsize=(5, 5))
self.pic = self.ax.imshow(picdata, cmap='binary', origin='lower')
self.ax.set_title('Recurrence Plot', fontsize=16)
def get_plotdata(self, w):
thr = np.percentile(self.dist.flatten(), w)
return self.dist <= thr
def update(self, w):
picnew = self.get_plotdata(w)
self.pic.set_data(picnew)
self.fig.canvas.draw_idle()
def interact(self):
ipywidgets.interact(self.update, w=(0, 100, 1))
def write_video(self, fname, frate=10.0, codec='XVID'):
frameDim = (self.nTime, self.nTime)
with cvWriter(fname, frameDim, frate=frate, codec=codec) as writer:
for w in range(101):
data = self.get_plotdata(w)
writer.write(data)
class RecurrencePlotMultitrial:
def __init__(self, data3D, w0):
self.nTrial, self.nPoint, self.nTime = data3D.shape
binarize = lambda dist, w: dist <= np.percentile(dist, w)
# Compute the recurrence plots with no threshold
self.rezMat = np.zeros((101, self.nTime, self.nTime), dtype=float)
for data in data3D:
dist = distance_matrix(data)
for w in range(101):
self.rezMat[w] += binarize(dist, w).astype(float)
self.rezMat /= self.nTrial
# Plot first
self.fig, self.ax = plt.subplots(figsize=(5, 5))
self.pic = self.ax.imshow(self.rezMat[w0], cmap='viridis', origin='lower', vmin=0, vmax=1)
self.ax.set_title('Recurrence Plot', fontsize=16)
def update(self, w):
self.pic.set_data(self.rezMat[w])
self.fig.canvas.draw_idle()
def interact(self):
ipywidgets.interact(self.update, w=(0, 100, 1))
def write_video(self, fname, frate=10.0, codec='XVID'):
frameDim = (self.nTime, self.nTime)
with cvWriter(fname, frameDim, frate=frate, codec=codec) as writer:
for w in range(101):
writer.write(self.rezMat[w])
|
[
"numpy.repeat",
"sklearn.decomposition.PCA",
"numpy.linalg.norm",
"numpy.zeros",
"mesostat.utils.opencv_helper.cvWriter",
"ipywidgets.interact",
"numpy.std",
"numpy.percentile",
"mesostat.utils.arrays.numpy_merge_dimensions",
"matplotlib.pyplot.subplots"
] |
[((288, 329), 'numpy.repeat', 'np.repeat', (['data[..., None]', 'nTime'], {'axis': '(2)'}), '(data[..., None], nTime, axis=2)\n', (297, 329), True, 'import numpy as np\n'), ((392, 421), 'numpy.linalg.norm', 'np.linalg.norm', (['delta'], {'axis': '(0)'}), '(delta, axis=0)\n', (406, 421), True, 'import numpy as np\n'), ((617, 637), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(48)'}), '(n_components=48)\n', (620, 637), False, 'from sklearn.decomposition import PCA\n'), ((711, 730), 'numpy.std', 'np.std', (['rez'], {'axis': '(0)'}), '(rez, axis=0)\n', (717, 730), True, 'import numpy as np\n'), ((528, 574), 'mesostat.utils.arrays.numpy_merge_dimensions', 'numpy_merge_dimensions', (['data', '(1)', '(data.ndim + 1)'], {}), '(data, 1, data.ndim + 1)\n', (550, 574), False, 'from mesostat.utils.arrays import numpy_merge_dimensions\n'), ((1061, 1089), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1073, 1089), True, 'import matplotlib.pyplot as plt\n'), ((1505, 1552), 'ipywidgets.interact', 'ipywidgets.interact', (['self.update'], {'w': '(0, 100, 1)'}), '(self.update, w=(0, 100, 1))\n', (1524, 1552), False, 'import ipywidgets\n'), ((2123, 2175), 'numpy.zeros', 'np.zeros', (['(101, self.nTime, self.nTime)'], {'dtype': 'float'}), '((101, self.nTime, self.nTime), dtype=float)\n', (2131, 2175), True, 'import numpy as np\n'), ((2430, 2458), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2442, 2458), True, 'import matplotlib.pyplot as plt\n'), ((2753, 2800), 'ipywidgets.interact', 'ipywidgets.interact', (['self.update'], {'w': '(0, 100, 1)'}), '(self.update, w=(0, 100, 1))\n', (2772, 2800), False, 'import ipywidgets\n'), ((1672, 1723), 'mesostat.utils.opencv_helper.cvWriter', 'cvWriter', (['fname', 'frameDim'], {'frate': 'frate', 'codec': 'codec'}), '(fname, frameDim, frate=frate, codec=codec)\n', (1680, 1723), False, 'from mesostat.utils.opencv_helper import cvWriter\n'), ((2920, 2971), 'mesostat.utils.opencv_helper.cvWriter', 'cvWriter', (['fname', 'frameDim'], {'frate': 'frate', 'codec': 'codec'}), '(fname, frameDim, frate=frate, codec=codec)\n', (2928, 2971), False, 'from mesostat.utils.opencv_helper import cvWriter\n'), ((2020, 2042), 'numpy.percentile', 'np.percentile', (['dist', 'w'], {}), '(dist, w)\n', (2033, 2042), True, 'import numpy as np\n')]
|
import sys
import os
import numpy as np
import scipy.io as sio
import random
from decimal import Decimal
import argparse
import csv
from keras.models import load_model
import f_model
from f_preprocess import fill_length
# Usage: python rematch_challenge.py test_file_path
def arg_parse():
"""
Parse arguements
"""
parser = argparse.ArgumentParser(description='Rematch test of ECG Contest')
parser.add_argument("--test_path", dest='test_path', help=
"the file path of Test Data",
default="your test_path", type=str)
#You need to write your test data path with the argparse parameter.
#For your convenience when testing with local data, you can write your local test set path to default
return parser.parse_args()
def main():
args = arg_parse()
test_path = args.test_path
print(test_path)
## Add your codes to classify normal and diseases.
model01_path = './model/model_01.h5'
#model02_path = '/media/uuser/data/final_run/model/model_02.h5'
#modelxg_path = './model/model.pkl'
#feature_path = './data/feature.csv'
keysname = ('I','II','III','aVR','aVL','aVF', \
'V1','V2','V3','V4','V5','V6','age','sex')
t_len = 25000
len_target=t_len
model_01 = f_model.build_model_01(num_classes=10,len_target=len_target)
model_01.load_weights(model01_path)
## Classify the samples of the test set and write the results into answers.txt,
## and each row representing a prediction of one sample.
## Here we use random numbers as prediction labels as an example and
## you should replace it with your own results.
Data_list = os.listdir(test_path)
classes = np.asarray([1,1,2,3,4,5,6,7,8,9])
with open('answers.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['File_name', 'label1', 'label2', 'label3', 'label4', 'label5', 'label6', 'label7', 'label8', 'label9', 'label10'])
for file_name in Data_list:
if file_name.endswith('.mat'):
answer = []
record_name = file_name.strip('.mat')
answer.append(record_name)
# model 01
ecg = np.empty([t_len,12])
mypath=test_path+file_name
data = sio.loadmat(mypath)
# read 12 leads
for lead in range(12):
temp=data[keysname[lead]]
ecg[:,lead] = fill_length(temp,t_len)
data_x = ecg.reshape((1,t_len,12))
pred_1 = model_01.predict(data_x)
# model 02
# model xgboost
preds = pred_1
preds[preds>=0.5] = 1
preds[preds<0.5] = 0
pred_out = preds * classes
y_out =[]
for i in range(10):
if pred_out[0][i]==classes[i]:
y_out.append(i)
for x in range(10-len(y_out)):
y_out.append('')
writer.writerow(answer+y_out)
csvfile.close()
if __name__ == "__main__":
main()
|
[
"f_preprocess.fill_length",
"os.listdir",
"argparse.ArgumentParser",
"csv.writer",
"numpy.asarray",
"scipy.io.loadmat",
"numpy.empty",
"f_model.build_model_01"
] |
[((343, 409), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Rematch test of ECG Contest"""'}), "(description='Rematch test of ECG Contest')\n", (366, 409), False, 'import argparse\n'), ((1293, 1354), 'f_model.build_model_01', 'f_model.build_model_01', ([], {'num_classes': '(10)', 'len_target': 'len_target'}), '(num_classes=10, len_target=len_target)\n', (1315, 1354), False, 'import f_model\n'), ((1685, 1706), 'os.listdir', 'os.listdir', (['test_path'], {}), '(test_path)\n', (1695, 1706), False, 'import os\n'), ((1721, 1763), 'numpy.asarray', 'np.asarray', (['[1, 1, 2, 3, 4, 5, 6, 7, 8, 9]'], {}), '([1, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n', (1731, 1763), True, 'import numpy as np\n'), ((1818, 1837), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (1828, 1837), False, 'import csv\n'), ((2232, 2253), 'numpy.empty', 'np.empty', (['[t_len, 12]'], {}), '([t_len, 12])\n', (2240, 2253), True, 'import numpy as np\n'), ((2319, 2338), 'scipy.io.loadmat', 'sio.loadmat', (['mypath'], {}), '(mypath)\n', (2330, 2338), True, 'import scipy.io as sio\n'), ((2490, 2514), 'f_preprocess.fill_length', 'fill_length', (['temp', 't_len'], {}), '(temp, t_len)\n', (2501, 2514), False, 'from f_preprocess import fill_length\n')]
|
import csv
import numpy as np
import time
from pathlib import Path
from Panalyzer.utils.wr_extractor import wr_extractor
from Panalyzer.TraceParser.logic_masking import *
def arm32buffered_csv2np(fcsv, buffersize, num_reg):
detailded_info = {'wr': None, 'regval': None, 'tick': None, 'masking': None, 'src1': None, 'src2': None,
'op': None}
tick_list = np.zeros([buffersize], dtype=np.int64)
wr_list = np.full([num_reg, 2, buffersize], False, dtype=bool)
reg_val_table = np.zeros([num_reg, buffersize], dtype=np.int64)
op_list = []
src1_list = []
src2_list = []
with open(fcsv, mode='r') as infocsv:
info_reader = csv.reader(infocsv)
buffer_idx = 0
chunk_counter = 0
for idx, row in enumerate(info_reader):
if idx % buffersize == 0:
buffer_idx = 0
chunk_counter = chunk_counter + 1
print(chunk_counter)
tick_list = np.zeros([buffersize], dtype=np.int64)
wr_list = np.full([num_reg, 2, buffersize], False, dtype=bool)
reg_val_table = np.zeros([num_reg, buffersize], dtype=np.int64)
op_list = []
src1_list = []
src2_list = []
else:
buffer_idx = buffer_idx + 1
tick_list[buffer_idx] = row[0] # Tick number list: an 1 x line_number np array
op_id = row[3]
op_list.append(op_id) # Opname is just a simple list of strings
# Variables required for utility.wr_extractor, feed into the function, then abstract the required
# data structure
op_dst1 = row[4]
op_dst2 = row[5]
op_src1 = row[6]
op_src2 = row[7]
src1_list.append(op_src1)
src2_list.append(op_src2)
data = row[-1]
for k in range(num_reg): # kth register
val_prev = reg_val_table[k, buffer_idx - 1]
reg_name = 'r' + str(k) # fp, lr, sp ,pc are renamed, simply
wr_list[k, 0, buffer_idx] = \
wr_extractor(reg_name, op_dst1, op_dst2, op_src1, op_src2, op_id, data, val_prev)[0]
wr_list[k, 1, buffer_idx] = \
wr_extractor(reg_name, op_dst1, op_dst2, op_src1, op_src2, op_id, data, val_prev)[1]
reg_val_table[k, buffer_idx] = \
wr_extractor(reg_name, op_dst1, op_dst2, op_src1, op_src2, op_id, data, val_prev)[2]
return tick_list
if __name__ == "__main__":
project_dir = Path(__file__).resolve().parent.parent
csv_dir = project_dir.joinpath('tempcsv')
fname = "fftbaseline.csv"
start_time = time.perf_counter() # Time counter starts
T = arm32buffered_csv2np(csv_dir / fname, 10000, 16)
elapsed_time_pandas = time.perf_counter() - start_time # Stop point of the timer
# tickexample = T['tick']
# wrexample = T['wr']
# regvalexample = T['regval']
# masking_table = T['masking']
# ops_list = T['op']
#
# # print('tick \n', tickexample, '\n wr: \n', wrexample, '\n regval:\n', regvalexample)
# print(ops_list)
|
[
"pathlib.Path",
"Panalyzer.utils.wr_extractor.wr_extractor",
"time.perf_counter",
"numpy.zeros",
"numpy.full",
"csv.reader"
] |
[((402, 440), 'numpy.zeros', 'np.zeros', (['[buffersize]'], {'dtype': 'np.int64'}), '([buffersize], dtype=np.int64)\n', (410, 440), True, 'import numpy as np\n'), ((456, 508), 'numpy.full', 'np.full', (['[num_reg, 2, buffersize]', '(False)'], {'dtype': 'bool'}), '([num_reg, 2, buffersize], False, dtype=bool)\n', (463, 508), True, 'import numpy as np\n'), ((530, 577), 'numpy.zeros', 'np.zeros', (['[num_reg, buffersize]'], {'dtype': 'np.int64'}), '([num_reg, buffersize], dtype=np.int64)\n', (538, 577), True, 'import numpy as np\n'), ((2905, 2924), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2922, 2924), False, 'import time\n'), ((706, 725), 'csv.reader', 'csv.reader', (['infocsv'], {}), '(infocsv)\n', (716, 725), False, 'import csv\n'), ((3033, 3052), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3050, 3052), False, 'import time\n'), ((1017, 1055), 'numpy.zeros', 'np.zeros', (['[buffersize]'], {'dtype': 'np.int64'}), '([buffersize], dtype=np.int64)\n', (1025, 1055), True, 'import numpy as np\n'), ((1083, 1135), 'numpy.full', 'np.full', (['[num_reg, 2, buffersize]', '(False)'], {'dtype': 'bool'}), '([num_reg, 2, buffersize], False, dtype=bool)\n', (1090, 1135), True, 'import numpy as np\n'), ((1169, 1216), 'numpy.zeros', 'np.zeros', (['[num_reg, buffersize]'], {'dtype': 'np.int64'}), '([num_reg, buffersize], dtype=np.int64)\n', (1177, 1216), True, 'import numpy as np\n'), ((2768, 2782), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2772, 2782), False, 'from pathlib import Path\n'), ((2281, 2366), 'Panalyzer.utils.wr_extractor.wr_extractor', 'wr_extractor', (['reg_name', 'op_dst1', 'op_dst2', 'op_src1', 'op_src2', 'op_id', 'data', 'val_prev'], {}), '(reg_name, op_dst1, op_dst2, op_src1, op_src2, op_id, data,\n val_prev)\n', (2293, 2366), False, 'from Panalyzer.utils.wr_extractor import wr_extractor\n'), ((2442, 2527), 'Panalyzer.utils.wr_extractor.wr_extractor', 'wr_extractor', (['reg_name', 'op_dst1', 'op_dst2', 'op_src1', 'op_src2', 'op_id', 'data', 'val_prev'], {}), '(reg_name, op_dst1, op_dst2, op_src1, op_src2, op_id, data,\n val_prev)\n', (2454, 2527), False, 'from Panalyzer.utils.wr_extractor import wr_extractor\n'), ((2606, 2691), 'Panalyzer.utils.wr_extractor.wr_extractor', 'wr_extractor', (['reg_name', 'op_dst1', 'op_dst2', 'op_src1', 'op_src2', 'op_id', 'data', 'val_prev'], {}), '(reg_name, op_dst1, op_dst2, op_src1, op_src2, op_id, data,\n val_prev)\n', (2618, 2691), False, 'from Panalyzer.utils.wr_extractor import wr_extractor\n')]
|
import os
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from loguru import logger
import config
from Train import train
from Model import EEGNet
class OptunaTrainer:
def __init__(self, checkpointPath, epochs, batchsize, logPath=None):
self.checkpointPath = checkpointPath
self.logpath = logPath
self.epochs = epochs
self.batchsize = batchsize
def __call__(self, trial, dataset, crossVal=False, **kwargs):
if isinstance(dataset, tuple):
dataset = {
"noname": dataset
}
info = "Trial #{} metric values:\n".format(trial.number)
metrics = []
for key, value in dataset.items():
if "augmenter" in kwargs:
kwargs["augmenter"].setState(False)
shape = list(value[0].shape[-2:])
shape[1] = int(config.window[1] * config.sampleRate) - int(config.window[0] * config.sampleRate)
model = self.buildModel(trial, shape)
auc, precision = train(
model=model,
dataset=value,
weightsPath=self.checkpointPath,
epochs=self.epochs,
batchsize=self.batchsize,
crossVal=crossVal,
**kwargs
)
info += "{}: auc {:.2f} pr {:.2f}\t".format(key, auc, precision)
metrics.append((auc, precision))
metrics = np.array(metrics)
mean = np.mean(metrics, axis=0).round(2)
median = np.median(metrics, axis=0).round(2)
for i, metric in enumerate(["auc", "precision"]):
info += "\nMetric - {}. Mean: {}\tMedian: {}".format(metric, mean[i], median[i])
logger.info(info)
logger.info(trial.params)
return mean[0]
@staticmethod
def chooseOptimizer(trial):
kwargs = {}
optimizer_options = ["RMSprop", "Adam"]
optimizer_selected = trial.suggest_categorical("optimizer", optimizer_options)
if optimizer_selected == "RMSprop":
kwargs["learning_rate"] = trial.suggest_loguniform("rmsprop_learning_rate", 1e-5, 1e-1)
kwargs["decay"] = trial.suggest_discrete_uniform("rmsprop_decay", 0.85, 0.99, 0.01)
kwargs["momentum"] = trial.suggest_loguniform("rmsprop_momentum", 1e-5, 1e-1)
elif optimizer_selected == "Adam":
kwargs["learning_rate"] = trial.suggest_loguniform("adam_learning_rate", 1e-5, 1e-1)
optimizer = getattr(tf.optimizers, optimizer_selected)(**kwargs)
return optimizer
@staticmethod
def chooseLoss(trial):
loss_functions = {
"binary_crossentropy": tf,
"sigmoid_focal_crossentropy": tfa
}
loss_selected = trial.suggest_categorical("loss", list(loss_functions.keys()))
loss = getattr(loss_functions[loss_selected].losses, loss_selected)
return loss
def buildModel(self, trial, shape):
samples = shape[-1]
assert samples // 2 > 16
temporalLength = int(trial.suggest_discrete_uniform("temporal_length", 16, samples // 2, 4))
dropoutRate = trial.suggest_discrete_uniform("dropout_rate", 0.1, 0.5, 0.05)
D = trial.suggest_int("depth_multiplier", 1, 4)
poolKernel = int(trial.suggest_discrete_uniform("pool_kernel", 4, 16, 2))
model = EEGNet(
categoriesN=2,
electrodes=shape[0],
samples=shape[1],
temporalLength=temporalLength,
dropoutRate=dropoutRate,
D=D,
poolPad="same",
poolKernel=poolKernel
)
optimizer = self.chooseOptimizer(trial)
loss = self.chooseLoss(trial)
model.compile(
loss=loss,
optimizer=optimizer,
metrics=["accuracy"]
)
return model
def studyInfo(study, bestN=7, file=None):
logger.info("Number of finished trials: {}", len(study.trials))
logger.info("Best {} trials:".format(bestN))
trials = sorted(study.trials, key=lambda elem: elem.value, reverse=True)[:bestN]
for i, trial in enumerate(trials):
logger.info("Trial {}", i)
logger.info("\tValue: {:.2f}", trial.value)
logger.info("\tParams: ")
for key, value in trial.params.items():
logger.info("\t\t{}: {}", key, value)
if file is not None:
os.makedirs(os.path.dirname(file), exist_ok=True)
studyDF = study.trials_dataframe()
studyDF.to_csv(file)
logger.info("Study file has been written to {}", file)
|
[
"numpy.mean",
"numpy.median",
"loguru.logger.info",
"Train.train",
"os.path.dirname",
"numpy.array",
"Model.EEGNet"
] |
[((1200, 1217), 'numpy.array', 'np.array', (['metrics'], {}), '(metrics)\n', (1208, 1217), True, 'import numpy as np\n'), ((1449, 1466), 'loguru.logger.info', 'logger.info', (['info'], {}), '(info)\n', (1460, 1466), False, 'from loguru import logger\n'), ((1469, 1494), 'loguru.logger.info', 'logger.info', (['trial.params'], {}), '(trial.params)\n', (1480, 1494), False, 'from loguru import logger\n'), ((2908, 3077), 'Model.EEGNet', 'EEGNet', ([], {'categoriesN': '(2)', 'electrodes': 'shape[0]', 'samples': 'shape[1]', 'temporalLength': 'temporalLength', 'dropoutRate': 'dropoutRate', 'D': 'D', 'poolPad': '"""same"""', 'poolKernel': 'poolKernel'}), "(categoriesN=2, electrodes=shape[0], samples=shape[1], temporalLength\n =temporalLength, dropoutRate=dropoutRate, D=D, poolPad='same',\n poolKernel=poolKernel)\n", (2914, 3077), False, 'from Model import EEGNet\n'), ((3549, 3575), 'loguru.logger.info', 'logger.info', (['"""Trial {}"""', 'i'], {}), "('Trial {}', i)\n", (3560, 3575), False, 'from loguru import logger\n'), ((3578, 3621), 'loguru.logger.info', 'logger.info', (['"""\tValue: {:.2f}"""', 'trial.value'], {}), "('\\tValue: {:.2f}', trial.value)\n", (3589, 3621), False, 'from loguru import logger\n'), ((3625, 3650), 'loguru.logger.info', 'logger.info', (['"""\tParams: """'], {}), "('\\tParams: ')\n", (3636, 3650), False, 'from loguru import logger\n'), ((3872, 3926), 'loguru.logger.info', 'logger.info', (['"""Study file has been written to {}"""', 'file'], {}), "('Study file has been written to {}', file)\n", (3883, 3926), False, 'from loguru import logger\n'), ((907, 1053), 'Train.train', 'train', ([], {'model': 'model', 'dataset': 'value', 'weightsPath': 'self.checkpointPath', 'epochs': 'self.epochs', 'batchsize': 'self.batchsize', 'crossVal': 'crossVal'}), '(model=model, dataset=value, weightsPath=self.checkpointPath, epochs=\n self.epochs, batchsize=self.batchsize, crossVal=crossVal, **kwargs)\n', (912, 1053), False, 'from Train import train\n'), ((3696, 3733), 'loguru.logger.info', 'logger.info', (['"""\t\t{}: {}"""', 'key', 'value'], {}), "('\\t\\t{}: {}', key, value)\n", (3707, 3733), False, 'from loguru import logger\n'), ((3771, 3792), 'os.path.dirname', 'os.path.dirname', (['file'], {}), '(file)\n', (3786, 3792), False, 'import os\n'), ((1228, 1252), 'numpy.mean', 'np.mean', (['metrics'], {'axis': '(0)'}), '(metrics, axis=0)\n', (1235, 1252), True, 'import numpy as np\n'), ((1273, 1299), 'numpy.median', 'np.median', (['metrics'], {'axis': '(0)'}), '(metrics, axis=0)\n', (1282, 1299), True, 'import numpy as np\n')]
|
import numpy
import sympy
from matplotlib import pyplot
from sympy.utilities.lambdify import lambdify
# Set the font family and size to use for Matplotlib figures.
pyplot.rcParams['font.family'] = 'serif'
pyplot.rcParams['font.size'] = 16
sympy.init_printing()
x, nu, t = sympy.symbols('x nu t')
phi = (sympy.exp(-(x - 4 * t)**2 / (4 * nu * (t + 1))) +
sympy.exp(-(x - 4 * t - 2 * numpy.pi)**2 / (4 * nu * (t + 1))))
phiprime = phi.diff(x)
u = -2 * nu * (phiprime / phi) + 4
u_lamb = lambdify((t, x, nu), u)
# Set parameters.
nx = 101 # number of spatial grid points
L = 2.0 * numpy.pi # length of the domain
dx = L / (nx - 1) # spatial grid size
nu = 0.07 # viscosity
nt = 100 # number of time steps to compute
sigma = 0.1 # CFL limit
dt = sigma * dx**2 / nu # time-step size
# Discretize the domain.
x = numpy.linspace(0.0, L, num=nx)
# Set initial conditions.
t = 0.0
u0 = numpy.array([u_lamb(t, xi, nu) for xi in x])
# Integrate the Burgers' equation in time.
u = u0.copy()
for n in range(nt):
un = u.copy()
# Update all interior points.
u[1:-1] = (un[1:-1] -
un[1:-1] * dt / dx * (un[1:-1] - un[:-2]) +
nu * dt / dx**2 * (un[2:] - 2 * un[1:-1] + un[:-2]))
# Update boundary points.
u[0] = (un[0] -
un[0] * dt / dx * (un[0] - un[-1]) +
nu * dt / dx**2 * (un[1] - 2 * un[0] + un[-1]))
u[-1] = (un[-1] -
un[-1] * dt / dx * (un[-1] - un[-2]) +
nu * dt / dx**2 * (un[0] - 2 * un[-1] + un[-2]))
# Compute the analytical solution.
u_analytical = numpy.array([u_lamb(nt * dt, xi, nu) for xi in x])
# Plot the numerical solution along with the analytical solution.
pyplot.figure(figsize=(6.0, 4.0))
pyplot.xlabel('x')
pyplot.ylabel('u')
pyplot.grid()
pyplot.plot(x, u, label='Numerical',
color='C0', linestyle='-', linewidth=2)
pyplot.plot(x, u_analytical, label='Analytical',
color='C1', linestyle='--', linewidth=2)
pyplot.legend()
pyplot.xlim(0.0, L)
pyplot.ylim(0.0, 10.0);
pyplot.show()
pyplot.clf()
|
[
"sympy.utilities.lambdify.lambdify",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"sympy.init_printing",
"sympy.symbols",
"numpy.linspace",
"matplotlib.pyplot.figure",
"sympy.exp",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((241, 262), 'sympy.init_printing', 'sympy.init_printing', ([], {}), '()\n', (260, 262), False, 'import sympy\n'), ((275, 298), 'sympy.symbols', 'sympy.symbols', (['"""x nu t"""'], {}), "('x nu t')\n", (288, 298), False, 'import sympy\n'), ((496, 519), 'sympy.utilities.lambdify.lambdify', 'lambdify', (['(t, x, nu)', 'u'], {}), '((t, x, nu), u)\n', (504, 519), False, 'from sympy.utilities.lambdify import lambdify\n'), ((827, 857), 'numpy.linspace', 'numpy.linspace', (['(0.0)', 'L'], {'num': 'nx'}), '(0.0, L, num=nx)\n', (841, 857), False, 'import numpy\n'), ((1690, 1723), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {'figsize': '(6.0, 4.0)'}), '(figsize=(6.0, 4.0))\n', (1703, 1723), False, 'from matplotlib import pyplot\n'), ((1724, 1742), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""x"""'], {}), "('x')\n", (1737, 1742), False, 'from matplotlib import pyplot\n'), ((1743, 1761), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""u"""'], {}), "('u')\n", (1756, 1761), False, 'from matplotlib import pyplot\n'), ((1762, 1775), 'matplotlib.pyplot.grid', 'pyplot.grid', ([], {}), '()\n', (1773, 1775), False, 'from matplotlib import pyplot\n'), ((1776, 1852), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'u'], {'label': '"""Numerical"""', 'color': '"""C0"""', 'linestyle': '"""-"""', 'linewidth': '(2)'}), "(x, u, label='Numerical', color='C0', linestyle='-', linewidth=2)\n", (1787, 1852), False, 'from matplotlib import pyplot\n'), ((1865, 1958), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'u_analytical'], {'label': '"""Analytical"""', 'color': '"""C1"""', 'linestyle': '"""--"""', 'linewidth': '(2)'}), "(x, u_analytical, label='Analytical', color='C1', linestyle='--',\n linewidth=2)\n", (1876, 1958), False, 'from matplotlib import pyplot\n'), ((1967, 1982), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {}), '()\n', (1980, 1982), False, 'from matplotlib import pyplot\n'), ((1983, 2002), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(0.0)', 'L'], {}), '(0.0, L)\n', (1994, 2002), False, 'from matplotlib import pyplot\n'), ((2003, 2025), 'matplotlib.pyplot.ylim', 'pyplot.ylim', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (2014, 2025), False, 'from matplotlib import pyplot\n'), ((2027, 2040), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2038, 2040), False, 'from matplotlib import pyplot\n'), ((2041, 2053), 'matplotlib.pyplot.clf', 'pyplot.clf', ([], {}), '()\n', (2051, 2053), False, 'from matplotlib import pyplot\n'), ((306, 355), 'sympy.exp', 'sympy.exp', (['(-(x - 4 * t) ** 2 / (4 * nu * (t + 1)))'], {}), '(-(x - 4 * t) ** 2 / (4 * nu * (t + 1)))\n', (315, 355), False, 'import sympy\n'), ((363, 427), 'sympy.exp', 'sympy.exp', (['(-(x - 4 * t - 2 * numpy.pi) ** 2 / (4 * nu * (t + 1)))'], {}), '(-(x - 4 * t - 2 * numpy.pi) ** 2 / (4 * nu * (t + 1)))\n', (372, 427), False, 'import sympy\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for reservoir_nn.keras.rewiring."""
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from reservoir_nn.keras import rewiring
import tensorflow as tf
class AdaptiveSparseReservoirTest(parameterized.TestCase):
def test_layer_with_num_connections_works(self):
x = tf.constant([1.])
layer = rewiring.AdaptiveSparseReservoir(
units=10,
reservoir_initializer=10,
)
layer(x)
def test_layer_with_large_num_connections_fails(self):
x = tf.constant([1.])
layer = rewiring.AdaptiveSparseReservoir(
units=10,
reservoir_initializer=100,
)
with self.assertRaisesRegex(ValueError,
"Cannot build layer.*(100).*(1).*(10)"):
layer(x)
def test_layer_with_reservoir_works(self):
initial_reservoir = np.arange(10).reshape(1, 10)
x = tf.constant([1.])
layer = rewiring.AdaptiveSparseReservoir(
units=10,
reservoir_initializer=initial_reservoir,
)
np.testing.assert_array_equal(layer(x), np.arange(10))
def test_layer_with_misshaped_reservoir_fails(self):
initial_reservoir = np.arange(10).reshape(2, 5)
x = tf.constant([1.])
layer = rewiring.AdaptiveSparseReservoir(
units=10,
reservoir_initializer=initial_reservoir,
)
with self.assertRaisesRegex(
ValueError,
r"Reservoir has a shape of \(2, 5\), but the layer expects \(1, 10\)"):
layer(x)
def test_get_coo_weight_matrix_works(self):
initial_reservoir = np.arange(10).reshape(2, 5)
x = tf.keras.Input(shape=(2,))
layer = rewiring.AdaptiveSparseReservoir(
units=5,
reservoir_initializer=initial_reservoir,
)
layer(x)
coo = layer.get_coo_weight_matrix().toarray()
np.testing.assert_array_equal(coo, initial_reservoir)
def test_get_coo_age_matrix_works(self):
initial_reservoir = np.arange(10).reshape(2, 5)
x = tf.keras.Input(shape=(2,))
layer = rewiring.AdaptiveSparseReservoir(
units=5,
reservoir_initializer=initial_reservoir,
)
layer(x)
coo = layer.get_coo_age_matrix().toarray()
np.testing.assert_array_equal(coo, np.zeros((2, 5)))
policy = rewiring.MutationPolicy(
candidate_fraction=0.0,
candidate_mutation_rate=1.0,
)
policy.mutation_step(layer)
coo = layer.get_coo_age_matrix().toarray()
np.testing.assert_array_equal(
coo,
[[0., 1, 1, 1, 1], [1., 1, 1, 1, 1]],
)
def test_apply_global_policy_works(self):
policy = rewiring.MutationPolicy(
candidate_fraction=0.5,
candidate_mutation_rate=0.5,
)
gpolicy = rewiring.GlobalPolicy(
scale_candidate_fraction=0.5, scale_candidate_mutation_rate=0.5)
policy = policy.apply_global_policy(gpolicy)
self.assertEqual(
policy,
rewiring.MutationPolicy(
candidate_fraction=0.25, candidate_mutation_rate=0.25))
def test_compute_mutation_probability_works(self):
initial_reservoir = np.arange(10).reshape(2, 5)
x = tf.keras.Input(shape=(2,))
layer = rewiring.AdaptiveSparseReservoir(
units=5,
reservoir_initializer=initial_reservoir,
)
layer(x)
policy = rewiring.MutationPolicy(
candidate_fraction=1.0,
candidate_mutation_rate=1.0,
)
p = policy.compute_mutation_probability(
sparse_values=layer.sparse_values.value(),
sparse_ages=layer.sparse_ages.value(),
)
coo = layer.get_coo_weight_matrix().copy()
coo.data[:] = p
np.testing.assert_allclose(
coo.toarray(), [
[0., 0.7, 0.3, 0, 0],
[0., 0, 0, 0, 0],
], atol=0.1)
def test_mutation_works(self):
initial_reservoir = np.arange(10).reshape(2, 5)
x = tf.keras.Input(shape=(2,))
layer = rewiring.AdaptiveSparseReservoir(
units=5,
reservoir_initializer=initial_reservoir,
)
layer(x)
coo = layer.get_coo_weight_matrix()
np.testing.assert_allclose(coo.toarray(), [
[0., 1, 2, 3, 4],
[5., 6, 7, 8, 9],
])
rng = np.random.RandomState(1234)
policy = rewiring.MutationPolicy(
candidate_fraction=0.2,
candidate_mutation_rate=1.0,
)
policy.mutation_step(layer, rng)
coo = layer.get_coo_weight_matrix()
# least active connections are replenished with zeros with 100%
# probability.
np.testing.assert_allclose(coo.toarray(), [
[0., 0, 2, 3, 4],
[5., 6, 7, 8, 9],
])
class SparseEvolutionEnd2EndTest(parameterized.TestCase):
def test_fit_small_model_works(self):
"""Testing fitting a 4x4 sparse reservoir with 4 truth connections."""
nunits = 4
inputs = tf.keras.Input(shape=(nunits,))
policy = rewiring.MutationPolicy(
candidate_fraction=0.5,
candidate_mutation_rate=0.1,
)
layer = rewiring.AdaptiveSparseReservoir(
units=nunits,
reservoir_initializer=2 * nunits,
# regularizer helps sparsify the redundant connections.
kernel_regularizer=tf.keras.regularizers.l2(1e-2),
)
outputs = layer(inputs)
rng = np.random.RandomState(1333)
model = tf.keras.Model(inputs, outputs)
# Low dimension model prefers SGD:
model.compile(
optimizer=tf.keras.optimizers.SGD(lr=0.1, nesterov=True),
loss="mse",
metrics=["mse"])
x = rng.uniform(size=(1000000, nunits)) - 0.5
y = x[:, ::-1].copy()
truth = np.eye(4)[::-1]
def mutation_schedule(epoch):
del epoch
return rewiring.GlobalPolicy()
model.fit(
x,
y,
batch_size=int(len(x) / 100),
epochs=10,
verbose=False,
callbacks=rewiring.MutationCallback(
policy={layer: policy},
mutation_schedule=mutation_schedule,
rng=rng,
verbose=1))
connection = layer.get_coo_weight_matrix().toarray()
# Use truth * 0.65 here because L2 biases the fit towards zero.
np.testing.assert_allclose(connection, truth * 0.65, atol=0.10)
def test_fit_large_model_works(self):
"""Testing fitting a 100x100 sparse reservoir with 100 truth connections."""
nunits = 100
inputs = tf.keras.Input(shape=(nunits,))
policy = rewiring.MutationPolicy(
candidate_fraction=0.2,
candidate_mutation_rate=0.8,
)
layer = rewiring.AdaptiveSparseReservoir(
units=nunits,
reservoir_initializer=2 * nunits,
)
outputs = layer(inputs)
model = tf.keras.Model(inputs, outputs)
model.compile(loss="mse", metrics=["mse"])
x = np.random.uniform(size=(100000, nunits)) - 0.5
truth = np.eye(nunits)[::-1].copy()
y = np.einsum("ij,jk->ik", x, truth)
class Reporter(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
connection = layer.get_coo_weight_matrix().toarray()
ages = layer.get_coo_age_matrix().toarray()
cross = np.abs(connection) * truth
print(sorted(zip(*np.nonzero(cross))))
significant_elements = np.sum(cross > 0.02)
print(significant_elements)
print(ages[cross > 0.02])
model.fit(
x,
y,
batch_size=1600,
epochs=20,
verbose=True,
callbacks=[
rewiring.MutationCallback(policy=policy, verbose=1),
Reporter()
])
connection = layer.get_coo_weight_matrix().todense()
# We shall have some correlation with the truth after training for a while.
cross = np.abs(connection) * truth
np.testing.assert_allclose(np.sum(cross > 0.02), 80, atol=20)
if __name__ == "__main__":
absltest.main()
|
[
"numpy.einsum",
"reservoir_nn.keras.rewiring.AdaptiveSparseReservoir",
"numpy.arange",
"numpy.random.RandomState",
"numpy.testing.assert_allclose",
"tensorflow.keras.optimizers.SGD",
"reservoir_nn.keras.rewiring.GlobalPolicy",
"reservoir_nn.keras.rewiring.MutationPolicy",
"numpy.testing.assert_array_equal",
"numpy.abs",
"numpy.eye",
"absl.testing.absltest.main",
"tensorflow.keras.Input",
"numpy.nonzero",
"reservoir_nn.keras.rewiring.MutationCallback",
"numpy.sum",
"numpy.zeros",
"tensorflow.constant",
"numpy.random.uniform",
"tensorflow.keras.Model",
"tensorflow.keras.regularizers.l2"
] |
[((8276, 8291), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (8289, 8291), False, 'from absl.testing import absltest\n'), ((899, 917), 'tensorflow.constant', 'tf.constant', (['[1.0]'], {}), '([1.0])\n', (910, 917), True, 'import tensorflow as tf\n'), ((929, 997), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': '(10)', 'reservoir_initializer': '(10)'}), '(units=10, reservoir_initializer=10)\n', (961, 997), False, 'from reservoir_nn.keras import rewiring\n'), ((1100, 1118), 'tensorflow.constant', 'tf.constant', (['[1.0]'], {}), '([1.0])\n', (1111, 1118), True, 'import tensorflow as tf\n'), ((1130, 1199), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': '(10)', 'reservoir_initializer': '(100)'}), '(units=10, reservoir_initializer=100)\n', (1162, 1199), False, 'from reservoir_nn.keras import rewiring\n'), ((1464, 1482), 'tensorflow.constant', 'tf.constant', (['[1.0]'], {}), '([1.0])\n', (1475, 1482), True, 'import tensorflow as tf\n'), ((1494, 1582), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': '(10)', 'reservoir_initializer': 'initial_reservoir'}), '(units=10, reservoir_initializer=\n initial_reservoir)\n', (1526, 1582), False, 'from reservoir_nn.keras import rewiring\n'), ((1778, 1796), 'tensorflow.constant', 'tf.constant', (['[1.0]'], {}), '([1.0])\n', (1789, 1796), True, 'import tensorflow as tf\n'), ((1808, 1896), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': '(10)', 'reservoir_initializer': 'initial_reservoir'}), '(units=10, reservoir_initializer=\n initial_reservoir)\n', (1840, 1896), False, 'from reservoir_nn.keras import rewiring\n'), ((2172, 2198), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (2186, 2198), True, 'import tensorflow as tf\n'), ((2211, 2298), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': '(5)', 'reservoir_initializer': 'initial_reservoir'}), '(units=5, reservoir_initializer=\n initial_reservoir)\n', (2243, 2298), False, 'from reservoir_nn.keras import rewiring\n'), ((2385, 2438), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['coo', 'initial_reservoir'], {}), '(coo, initial_reservoir)\n', (2414, 2438), True, 'import numpy as np\n'), ((2544, 2570), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (2558, 2570), True, 'import tensorflow as tf\n'), ((2583, 2670), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': '(5)', 'reservoir_initializer': 'initial_reservoir'}), '(units=5, reservoir_initializer=\n initial_reservoir)\n', (2615, 2670), False, 'from reservoir_nn.keras import rewiring\n'), ((2821, 2897), 'reservoir_nn.keras.rewiring.MutationPolicy', 'rewiring.MutationPolicy', ([], {'candidate_fraction': '(0.0)', 'candidate_mutation_rate': '(1.0)'}), '(candidate_fraction=0.0, candidate_mutation_rate=1.0)\n', (2844, 2897), False, 'from reservoir_nn.keras import rewiring\n'), ((3004, 3078), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['coo', '[[0.0, 1, 1, 1, 1], [1.0, 1, 1, 1, 1]]'], {}), '(coo, [[0.0, 1, 1, 1, 1], [1.0, 1, 1, 1, 1]])\n', (3033, 3078), True, 'import numpy as np\n'), ((3158, 3234), 'reservoir_nn.keras.rewiring.MutationPolicy', 'rewiring.MutationPolicy', ([], {'candidate_fraction': '(0.5)', 'candidate_mutation_rate': '(0.5)'}), '(candidate_fraction=0.5, candidate_mutation_rate=0.5)\n', (3181, 3234), False, 'from reservoir_nn.keras import rewiring\n'), ((3272, 3362), 'reservoir_nn.keras.rewiring.GlobalPolicy', 'rewiring.GlobalPolicy', ([], {'scale_candidate_fraction': '(0.5)', 'scale_candidate_mutation_rate': '(0.5)'}), '(scale_candidate_fraction=0.5,\n scale_candidate_mutation_rate=0.5)\n', (3293, 3362), False, 'from reservoir_nn.keras import rewiring\n'), ((3673, 3699), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (3687, 3699), True, 'import tensorflow as tf\n'), ((3712, 3799), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': '(5)', 'reservoir_initializer': 'initial_reservoir'}), '(units=5, reservoir_initializer=\n initial_reservoir)\n', (3744, 3799), False, 'from reservoir_nn.keras import rewiring\n'), ((3846, 3922), 'reservoir_nn.keras.rewiring.MutationPolicy', 'rewiring.MutationPolicy', ([], {'candidate_fraction': '(1.0)', 'candidate_mutation_rate': '(1.0)'}), '(candidate_fraction=1.0, candidate_mutation_rate=1.0)\n', (3869, 3922), False, 'from reservoir_nn.keras import rewiring\n'), ((4402, 4428), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(2,)'}), '(shape=(2,))\n', (4416, 4428), True, 'import tensorflow as tf\n'), ((4441, 4528), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': '(5)', 'reservoir_initializer': 'initial_reservoir'}), '(units=5, reservoir_initializer=\n initial_reservoir)\n', (4473, 4528), False, 'from reservoir_nn.keras import rewiring\n'), ((4718, 4745), 'numpy.random.RandomState', 'np.random.RandomState', (['(1234)'], {}), '(1234)\n', (4739, 4745), True, 'import numpy as np\n'), ((4760, 4836), 'reservoir_nn.keras.rewiring.MutationPolicy', 'rewiring.MutationPolicy', ([], {'candidate_fraction': '(0.2)', 'candidate_mutation_rate': '(1.0)'}), '(candidate_fraction=0.2, candidate_mutation_rate=1.0)\n', (4783, 4836), False, 'from reservoir_nn.keras import rewiring\n'), ((5338, 5369), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(nunits,)'}), '(shape=(nunits,))\n', (5352, 5369), True, 'import tensorflow as tf\n'), ((5384, 5460), 'reservoir_nn.keras.rewiring.MutationPolicy', 'rewiring.MutationPolicy', ([], {'candidate_fraction': '(0.5)', 'candidate_mutation_rate': '(0.1)'}), '(candidate_fraction=0.5, candidate_mutation_rate=0.1)\n', (5407, 5460), False, 'from reservoir_nn.keras import rewiring\n'), ((5762, 5789), 'numpy.random.RandomState', 'np.random.RandomState', (['(1333)'], {}), '(1333)\n', (5783, 5789), True, 'import numpy as np\n'), ((5803, 5834), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (5817, 5834), True, 'import tensorflow as tf\n'), ((6623, 6685), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['connection', '(truth * 0.65)'], {'atol': '(0.1)'}), '(connection, truth * 0.65, atol=0.1)\n', (6649, 6685), True, 'import numpy as np\n'), ((6839, 6870), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(nunits,)'}), '(shape=(nunits,))\n', (6853, 6870), True, 'import tensorflow as tf\n'), ((6884, 6960), 'reservoir_nn.keras.rewiring.MutationPolicy', 'rewiring.MutationPolicy', ([], {'candidate_fraction': '(0.2)', 'candidate_mutation_rate': '(0.8)'}), '(candidate_fraction=0.2, candidate_mutation_rate=0.8)\n', (6907, 6960), False, 'from reservoir_nn.keras import rewiring\n'), ((6996, 7081), 'reservoir_nn.keras.rewiring.AdaptiveSparseReservoir', 'rewiring.AdaptiveSparseReservoir', ([], {'units': 'nunits', 'reservoir_initializer': '(2 * nunits)'}), '(units=nunits, reservoir_initializer=2 * nunits\n )\n', (7028, 7081), False, 'from reservoir_nn.keras import rewiring\n'), ((7141, 7172), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (7155, 7172), True, 'import tensorflow as tf\n'), ((7326, 7358), 'numpy.einsum', 'np.einsum', (['"""ij,jk->ik"""', 'x', 'truth'], {}), "('ij,jk->ik', x, truth)\n", (7335, 7358), True, 'import numpy as np\n'), ((1646, 1659), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1655, 1659), True, 'import numpy as np\n'), ((2789, 2805), 'numpy.zeros', 'np.zeros', (['(2, 5)'], {}), '((2, 5))\n', (2797, 2805), True, 'import numpy as np\n'), ((3465, 3543), 'reservoir_nn.keras.rewiring.MutationPolicy', 'rewiring.MutationPolicy', ([], {'candidate_fraction': '(0.25)', 'candidate_mutation_rate': '(0.25)'}), '(candidate_fraction=0.25, candidate_mutation_rate=0.25)\n', (3488, 3543), False, 'from reservoir_nn.keras import rewiring\n'), ((6095, 6104), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (6101, 6104), True, 'import numpy as np\n'), ((6175, 6198), 'reservoir_nn.keras.rewiring.GlobalPolicy', 'rewiring.GlobalPolicy', ([], {}), '()\n', (6196, 6198), False, 'from reservoir_nn.keras import rewiring\n'), ((7230, 7270), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(100000, nunits)'}), '(size=(100000, nunits))\n', (7247, 7270), True, 'import numpy as np\n'), ((8152, 8170), 'numpy.abs', 'np.abs', (['connection'], {}), '(connection)\n', (8158, 8170), True, 'import numpy as np\n'), ((8210, 8230), 'numpy.sum', 'np.sum', (['(cross > 0.02)'], {}), '(cross > 0.02)\n', (8216, 8230), True, 'import numpy as np\n'), ((1426, 1439), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1435, 1439), True, 'import numpy as np\n'), ((1741, 1754), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1750, 1754), True, 'import numpy as np\n'), ((2135, 2148), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2144, 2148), True, 'import numpy as np\n'), ((2507, 2520), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2516, 2520), True, 'import numpy as np\n'), ((3636, 3649), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (3645, 3649), True, 'import numpy as np\n'), ((4365, 4378), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4374, 4378), True, 'import numpy as np\n'), ((5685, 5715), 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (5709, 5715), True, 'import tensorflow as tf\n'), ((5912, 5958), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'lr': '(0.1)', 'nesterov': '(True)'}), '(lr=0.1, nesterov=True)\n', (5935, 5958), True, 'import tensorflow as tf\n'), ((6335, 6446), 'reservoir_nn.keras.rewiring.MutationCallback', 'rewiring.MutationCallback', ([], {'policy': '{layer: policy}', 'mutation_schedule': 'mutation_schedule', 'rng': 'rng', 'verbose': '(1)'}), '(policy={layer: policy}, mutation_schedule=\n mutation_schedule, rng=rng, verbose=1)\n', (6360, 6446), False, 'from reservoir_nn.keras import rewiring\n'), ((7687, 7707), 'numpy.sum', 'np.sum', (['(cross > 0.02)'], {}), '(cross > 0.02)\n', (7693, 7707), True, 'import numpy as np\n'), ((7289, 7303), 'numpy.eye', 'np.eye', (['nunits'], {}), '(nunits)\n', (7295, 7303), True, 'import numpy as np\n'), ((7582, 7600), 'numpy.abs', 'np.abs', (['connection'], {}), '(connection)\n', (7588, 7600), True, 'import numpy as np\n'), ((7914, 7965), 'reservoir_nn.keras.rewiring.MutationCallback', 'rewiring.MutationCallback', ([], {'policy': 'policy', 'verbose': '(1)'}), '(policy=policy, verbose=1)\n', (7939, 7965), False, 'from reservoir_nn.keras import rewiring\n'), ((7635, 7652), 'numpy.nonzero', 'np.nonzero', (['cross'], {}), '(cross)\n', (7645, 7652), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from progress.bar import Bar
import torch
import math
import copy
from .base_debugger import BaseDebugger
from models.utils import _tranpose_and_gather_feat, _gather_feat
from models.decode import _topk_original, _topk, _topk_channel, _nms
from datasets.dataset.utils import _bbox_overlaps
from utils.image import transform_preds
class TriCtdetDebugger(BaseDebugger):
def __init__(self, opt):
super(TriCtdetDebugger, self).__init__(opt)
def forward(self, images):
with torch.no_grad():
output = self.model(images)[-1]
tl = output['tl'].sigmoid_()
bl = output['bl'].sigmoid_()
br = output['br'].sigmoid_()
ct = output['ct'].sigmoid_()
tl_tag = output['tl_tag']
bl_tag = output['bl_tag']
br_tag = output['br_tag']
tl_reg = output['tl_reg']
bl_reg = output['bl_reg']
br_reg = output['br_reg']
ct_reg = output['ct_reg']
detections = {'tl_heatmap':tl, 'bl_heatmap':bl, 'br_heatmap':br, 'ct_heatmap':ct,
'tl_reg':tl_reg, 'bl_reg':bl_reg, 'br_reg':br_reg, 'ct_reg':ct_reg,
'tl_tag':tl_tag, 'bl_tag':bl_tag, 'br_tag':br_tag}
return detections
def debug(self, detections, targets, ae_threshold):
tl_heat = detections['tl_heatmap']
bl_heat = detections['bl_heatmap']
br_heat = detections['br_heatmap']
ct_heat = detections['ct_heatmap']
targets['tl_tag'] = targets['tl_tag'][targets['reg_mask']].unsqueeze(0)
targets['bl_tag'] = targets['bl_tag'][targets['reg_mask']].unsqueeze(0)
targets['br_tag'] = targets['br_tag'][targets['reg_mask']].unsqueeze(0)
targets['ct_tag'] = targets['ct_tag'][targets['reg_mask']].unsqueeze(0)
targets['tl_reg'] = targets['tl_reg'][targets['reg_mask']].unsqueeze(0)
targets['bl_reg'] = targets['bl_reg'][targets['reg_mask']].unsqueeze(0)
targets['br_reg'] = targets['br_reg'][targets['reg_mask']].unsqueeze(0)
targets['ct_reg'] = targets['ct_reg'][targets['reg_mask']].unsqueeze(0)
batch, cat, height, width = tl_heat.size()
# tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = _topk(tl_heat, K=256)
# bl_scores, bl_inds, bl_clses, bl_ys, bl_xs = _topk(bl_heat, K=256)
# br_scores, br_inds, br_clses, br_ys, br_xs = _topk(br_heat, K=256)
# ct_scores, ct_inds, ct_clses, ct_ys, ct_xs = _topk(ct_heat, K=256)
tl_tag = detections['tl_tag']
bl_tag = detections['bl_tag']
br_tag = detections['br_tag']
tl_reg = detections['tl_reg']
bl_reg = detections['bl_reg']
br_reg = detections['br_reg']
ct_reg = detections['ct_reg']
# gather by gt
tl_tag = _tranpose_and_gather_feat(tl_tag, targets['tl_tag'].to(torch.device("cuda")))
bl_tag = _tranpose_and_gather_feat(bl_tag, targets['bl_tag'].to(torch.device("cuda")))
br_tag = _tranpose_and_gather_feat(br_tag, targets['br_tag'].to(torch.device("cuda")))
# gather by top k
# tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
# bl_tag = _tranpose_and_gather_feat(bl_tag, bl_inds)
# br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
avg_tag = (tl_tag + bl_tag + br_tag) / 3
dists_tl = torch.abs(avg_tag - tl_tag).to(torch.device("cpu")).numpy()
dists_bl = torch.abs(bl_tag - avg_tag).to(torch.device("cpu")).numpy()
dists_br = torch.abs(avg_tag - br_tag).to(torch.device("cpu")).numpy()
dists_avg = (dists_tl.sum() + dists_bl.sum() + dists_br.sum()) / dists_tl.shape[1] / 3
min_tl = dists_tl.min()
max_tl = dists_tl.max()
min_bl = dists_bl.min()
max_bl = dists_bl.max()
min_br = dists_br.min()
max_br = dists_br.max()
# gather by gt
tl_reg = _tranpose_and_gather_feat(tl_reg, targets['tl_tag'].to(torch.device("cuda")))
bl_reg = _tranpose_and_gather_feat(bl_reg, targets['bl_tag'].to(torch.device("cuda")))
br_reg = _tranpose_and_gather_feat(br_reg, targets['br_tag'].to(torch.device("cuda")))
ct_reg = _tranpose_and_gather_feat(ct_reg, targets['ct_tag'].to(torch.device("cuda")))
# reg_diff_tl = tl_reg - targets['tl_reg'].to(torch.device("cuda"))
# reg_diff_tl = torch.sqrt(reg_diff_tl[..., 0]*reg_diff_tl[..., 0] + reg_diff_tl[..., 1]*reg_diff_tl[..., 1])
# reg_diff_bl = bl_reg - targets['bl_reg'].to(torch.device("cuda"))
# reg_diff_bl = torch.sqrt(reg_diff_bl[..., 0] * reg_diff_bl[..., 0] + reg_diff_bl[..., 1] * reg_diff_bl[..., 1])
# reg_diff_br = br_reg - targets['br_reg'].to(torch.device("cuda"))
# reg_diff_br = torch.sqrt(reg_diff_br[..., 0] * reg_diff_br[..., 0] + reg_diff_br[..., 1] * reg_diff_br[..., 1])
# reg_diff_ct = ct_reg - targets['ct_reg'].to(torch.device("cuda"))
# reg_diff_ct = torch.sqrt(reg_diff_ct[..., 0] * reg_diff_ct[..., 0] + reg_diff_ct[..., 1] * reg_diff_ct[..., 1])
tl_xs = ((targets['tl_tag'] % (width * height)) % width).int().float().to(torch.device("cuda"))
tl_ys = ((targets['tl_tag'] % (width * height)) / width).int().float().to(torch.device("cuda"))
bl_xs = ((targets['bl_tag'] % (width * height)) % width).int().float().to(torch.device("cuda"))
bl_ys = ((targets['bl_tag'] % (width * height)) / width).int().float().to(torch.device("cuda"))
br_xs = ((targets['br_tag'] % (width * height)) % width).int().float().to(torch.device("cuda"))
br_ys = ((targets['br_tag'] % (width * height)) / width).int().float().to(torch.device("cuda"))
ct_xs = ((targets['ct_tag'] % (width * height)) % width).int().float().to(torch.device("cuda"))
ct_ys = ((targets['ct_tag'] % (width * height)) / width).int().float().to(torch.device("cuda"))
tl_xs_pr = (tl_xs + tl_reg[..., 0]).squeeze(0).to(torch.device("cpu")).numpy()
tl_ys_pr = (tl_ys + tl_reg[..., 1]).squeeze(0).to(torch.device("cpu")).numpy()
bl_xs_pr = (bl_xs + bl_reg[..., 0]).squeeze(0).to(torch.device("cpu")).numpy()
bl_ys_pr = (bl_ys + bl_reg[..., 1]).squeeze(0).to(torch.device("cpu")).numpy()
br_xs_pr = (br_xs + br_reg[..., 0]).squeeze(0).to(torch.device("cpu")).numpy()
br_ys_pr = (br_ys + br_reg[..., 1]).squeeze(0).to(torch.device("cpu")).numpy()
ct_xs_pr = (ct_xs + ct_reg[..., 0]).squeeze(0).to(torch.device("cpu")).numpy()
ct_ys_pr = (ct_ys + ct_reg[..., 1]).squeeze(0).to(torch.device("cpu")).numpy()
tl_xs_gt = (tl_xs + targets['tl_reg'][..., 0].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
tl_ys_gt = (tl_ys + targets['tl_reg'][..., 1].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
bl_xs_gt = (bl_xs + targets['bl_reg'][..., 0].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
bl_ys_gt = (bl_ys + targets['bl_reg'][..., 1].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
br_xs_gt = (br_xs + targets['br_reg'][..., 0].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
br_ys_gt = (br_ys + targets['br_reg'][..., 1].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
ct_xs_gt = (ct_xs + targets['ct_reg'][..., 0].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
ct_ys_gt = (ct_ys + targets['ct_reg'][..., 1].to(torch.device("cuda"))).squeeze(0).to(torch.device("cpu")).numpy()
bboxes_gt = targets['bbox'][targets['reg_mask']]
nm_instances =tl_xs_pr.shape[0]
for i in range(nm_instances):
bbox_gt = bboxes_gt[i, :]
# prediction
bbox_coord_pr = []
tl_x_pr = tl_xs_pr[i]
tl_y_pr = tl_ys_pr[i]
bl_x_pr = bl_xs_pr[i]
bl_y_pr = bl_ys_pr[i]
br_x_pr = br_xs_pr[i]
br_y_pr = br_ys_pr[i]
# center
x_c = (tl_x_pr + br_x_pr) / 2.
y_c = (tl_y_pr + br_y_pr) / 2.
if bl_x_pr == br_x_pr:
p_y = tl_y_pr
p_x = br_x_pr
if br_y_pr > bl_y_pr:
angle = np.pi / 2.
else:
angle = -np.pi / 2.
elif bl_y_pr == br_y_pr:
p_x = tl_x_pr
p_y = br_y_pr
angle = 0.
else:
# angle
angle = math.atan2(-(br_y_pr - bl_y_pr), br_x_pr - bl_x_pr)
# find intersected point
a = (br_x_pr - bl_x_pr) / (br_y_pr - bl_y_pr)
b = br_y_pr - a * br_x_pr
delta_x = br_x_pr - bl_x_pr
delta_y = br_y_pr - bl_y_pr
p_x = (delta_x * tl_x_pr + delta_y * tl_y_pr - delta_x * b) / (delta_x + delta_x * a)
p_y = a * p_x + b
# w, h
w = np.sqrt((br_x_pr - p_x) * (br_x_pr - p_x) + (br_y_pr - p_y) * (br_y_pr - p_y))
h = np.sqrt((tl_x_pr - p_x) * (tl_x_pr - p_x) + (tl_y_pr - p_y) * (tl_y_pr - p_y))
bbox_coord_pr.append([x_c - w / 2, y_c - h / 2, x_c + w / 2, y_c + h / 2, angle])
bbox_coord_pr = np.array(bbox_coord_pr)
# groundtruth
boxes_coord_gt = []
tl_x_gt = tl_xs_gt[i]
tl_y_gt = tl_ys_gt[i]
bl_x_gt = bl_xs_gt[i]
bl_y_gt = bl_ys_gt[i]
br_x_gt = br_xs_gt[i]
br_y_gt = br_ys_gt[i]
if bl_x_gt == br_x_gt:
p_y = tl_y_gt
p_x = bl_x_gt
if br_y_gt > bl_y_gt:
angle = np.pi / 4
else:
angle = -np.pi / 4
else:
# center
x_c = (tl_x_gt + br_x_gt) / 2.
y_c = (tl_y_gt + br_y_gt) / 2.
# angle
angle = math.atan(-(br_y_gt - bl_y_gt)/(br_x_gt - bl_x_gt))
# find intersected point
a = (br_y_gt - bl_y_gt) / (br_x_gt - bl_x_gt)
b = br_y_gt - a * br_x_gt
delta_x = br_x_gt - bl_x_gt
delta_y = br_y_gt - bl_y_gt
p_x = (delta_x * tl_x_gt + delta_y * tl_y_gt - delta_y * b) / (delta_x + delta_y * a)
p_y = a * p_x + b
# w, h
w = np.sqrt((br_x_gt - p_x) * (br_x_gt - p_x) + (br_y_gt - p_y) * (br_y_gt - p_y))
h = np.sqrt((tl_x_gt - p_x) * (tl_x_gt - p_x) + (tl_y_gt - p_y) * (tl_y_gt - p_y))
boxes_coord_gt.append([x_c - w / 2, y_c - h / 2, x_c + w / 2, y_c + h / 2, angle])
boxes_coord_gt = np.array(boxes_coord_gt)
# print(np.array_equal(bbox_gt, boxes_coord_gt))
overlaps = _bbox_overlaps(np.ascontiguousarray(bbox_coord_pr[:, :4], dtype=np.float32),
np.ascontiguousarray(boxes_coord_gt[:, :4], dtype=np.float32),
bbox_coord_pr[:, -1], boxes_coord_gt[:, -1],
128, 128)
flag_suc = False
flag_exit = 0
for i in range(overlaps.shape[0]):
for j in range(overlaps.shape[1]):
value_overlap = overlaps[i, j]
angle_diff = math.fabs(bbox_coord_pr[i, -1] - boxes_coord_gt[j, -1])
if value_overlap > 0.25 and angle_diff < np.pi / 6:
flag_suc = True
flag_exit = 1
break
if flag_exit:
break
if flag_exit:
break
return min_tl, max_tl, min_bl, max_bl, min_br, max_br, dists_avg, flag_suc
def process(self, images, kernel=1, ae_threshold=1, K=100, num_dets=100):
with torch.no_grad():
output = self.model(images)[-1]
tl_heat = output['tl'].sigmoid_()
bl_heat = output['bl'].sigmoid_()
br_heat = output['br'].sigmoid_()
ct_heat = output['ct'].sigmoid_()
tl_tag = output['tl_tag']
bl_tag = output['bl_tag']
br_tag = output['br_tag']
tl_reg = output['tl_reg']
bl_reg = output['bl_reg']
br_reg = output['br_reg']
ct_reg = output['ct_reg']
batch, cat, height, width = tl_heat.size()
tl_heat = _nms(tl_heat, kernel=3)
bl_heat = _nms(bl_heat, kernel=3)
br_heat = _nms(br_heat, kernel=3)
ct_heat = _nms(ct_heat, kernel=3)
tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = _topk(tl_heat, K=K)
bl_scores, bl_inds, bl_clses, bl_ys, bl_xs = _topk(bl_heat, K=K)
br_scores, br_inds, br_clses, br_ys, br_xs = _topk(br_heat, K=K)
ct_scores, ct_inds, ct_clses, ct_ys, ct_xs = _topk(ct_heat, K=K)
tl_ys = tl_ys.view(batch, K, 1, 1).expand(batch, K, K, K)
tl_xs = tl_xs.view(batch, K, 1, 1).expand(batch, K, K, K)
bl_ys = bl_ys.view(batch, 1, K, 1).expand(batch, K, K, K)
bl_xs = bl_xs.view(batch, 1, K, 1).expand(batch, K, K, K)
br_ys = br_ys.view(batch, 1, 1, K).expand(batch, K, K, K)
br_xs = br_xs.view(batch, 1, 1, K).expand(batch, K, K, K)
ct_ys = ct_ys.view(batch, 1, K).expand(batch, K, K)
ct_xs = ct_xs.view(batch, 1, K).expand(batch, K, K)
if tl_reg is not None and bl_reg is not None and br_reg is not None:
tl_reg = _tranpose_and_gather_feat(tl_reg, tl_inds)
tl_reg = tl_reg.view(batch, K, 1, 1, 2)
bl_reg = _tranpose_and_gather_feat(bl_reg, bl_inds)
bl_reg = bl_reg.view(batch, 1, K, 1, 2)
br_reg = _tranpose_and_gather_feat(br_reg, br_inds)
br_reg = br_reg.view(batch, 1, 1, K, 2)
ct_reg = _tranpose_and_gather_feat(ct_reg, ct_inds)
ct_reg = ct_reg.view(batch, 1, K, 2)
tl_xs = tl_xs + tl_reg[..., 0]
tl_ys = tl_ys + tl_reg[..., 1]
bl_xs = bl_xs + bl_reg[..., 0]
bl_ys = bl_ys + bl_reg[..., 1]
br_xs = br_xs + br_reg[..., 0]
br_ys = br_ys + br_reg[..., 1]
ct_xs = ct_xs + ct_reg[..., 0]
ct_ys = ct_ys + ct_reg[..., 1]
# all possible boxes based on top k corners (ignoring class)
bboxes = torch.stack((tl_xs, tl_ys, bl_xs, bl_ys, br_xs, br_ys), dim=4)
tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
tl_tag = tl_tag.view(batch, K, 1, 1)
bl_tag = _tranpose_and_gather_feat(bl_tag, bl_inds)
bl_tag = bl_tag.view(batch, 1, K, 1)
br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
br_tag = br_tag.view(batch, 1, 1, K)
avg_tag = (tl_tag + bl_tag + br_tag) / 3
dists = (torch.abs(tl_tag - avg_tag) + torch.abs(bl_tag - avg_tag) + torch.abs(br_tag - avg_tag)) / 3
tl_scores = tl_scores.view(batch, K, 1, 1).expand(batch, K, K, K)
bl_scores = bl_scores.view(batch, 1, K, 1).expand(batch, K, K, K)
br_scores = br_scores.view(batch, 1, 1, K).expand(batch, K, K, K)
# reject boxes based on corner scores
# sc_inds = (tl_scores < scores_thresh) | (bl_scores < scores_thresh) | (br_scores < scores_thresh)
scores = (tl_scores + bl_scores + br_scores) / 3
# reject boxes based on classes
tl_clses = tl_clses.view(batch, K, 1, 1).expand(batch, K, K, K)
bl_clses = bl_clses.view(batch, 1, K, 1).expand(batch, K, K, K)
br_clses = br_clses.view(batch, 1, 1, K).expand(batch, K, K, K)
cls_inds = (tl_clses != bl_clses) | (bl_clses != br_clses) | (tl_clses != br_clses)
# reject boxes based on distances
dist_inds = (dists > ae_threshold)
scores[cls_inds] = -1
scores[dist_inds] = -1
# scores[sc_inds] = -1
scores = scores.view(batch, -1)
scores, inds = torch.topk(scores, num_dets)
scores = scores.unsqueeze(2)
bboxes = bboxes.view(batch, -1, 6)
bboxes = _gather_feat(bboxes, inds)
clses = bl_clses.contiguous().view(batch, -1, 1)
clses = _gather_feat(clses, inds).float()
tl_scores = tl_scores.contiguous().view(batch, -1, 1)
tl_scores = _gather_feat(tl_scores, inds).float()
bl_scores = bl_scores.contiguous().view(batch, -1, 1)
bl_scores = _gather_feat(bl_scores, inds).float()
br_scores = br_scores.contiguous().view(batch, -1, 1)
br_scores = _gather_feat(br_scores, inds).float()
ct_xs = ct_xs[:, 0, :]
ct_ys = ct_ys[:, 0, :]
centers = torch.cat([ct_xs.unsqueeze(2), ct_ys.unsqueeze(2), ct_clses.float().unsqueeze(2), ct_scores.unsqueeze(2)], dim=2)
detections = torch.cat([bboxes, scores, tl_scores, bl_scores, br_scores, clses], dim=2)
# tl_heat = output['tl'].sigmoid_()
# bl_heat = output['bl'].sigmoid_()
# br_heat = output['br'].sigmoid_()
# ct_heat = output['ct'].sigmoid_()
#
# tl_tag = output['tl_tag']
# bl_tag = output['bl_tag']
# br_tag = output['br_tag']
#
# tl_reg = output['tl_reg']
# bl_reg = output['bl_reg']
# br_reg = output['br_reg']
# ct_reg = output['ct_reg']
#
# kernel = self.opt.nms_kernel
# ae_threshold = self.opt.ae_threshold
# K = self.opt.K
#
# batch, cat, height, width = tl_heat.size()
#
# # perform nms on heatmaps
# tl_heat = _nms(tl_heat, kernel=kernel)
# bl_heat = _nms(bl_heat, kernel=kernel)
# br_heat = _nms(br_heat, kernel=kernel)
# ct_heat = _nms(ct_heat, kernel=kernel)
#
# tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = _topk(tl_heat, K=K)
# bl_scores, bl_inds, bl_clses, bl_ys, bl_xs = _topk(bl_heat, K=K)
# br_scores, br_inds, br_clses, br_ys, br_xs = _topk(br_heat, K=K)
# ct_scores, ct_inds, ct_clses, ct_ys, ct_xs = _topk(ct_heat, K=K)
#
# tl_ys = tl_ys.view(batch, K, 1, 1).expand(batch, K, K, K)
# tl_xs = tl_xs.view(batch, K, 1, 1).expand(batch, K, K, K)
# bl_ys = bl_ys.view(batch, 1, K, 1).expand(batch, K, K, K)
# bl_xs = bl_xs.view(batch, 1, K, 1).expand(batch, K, K, K)
# br_ys = br_ys.view(batch, 1, 1, K).expand(batch, K, K, K)
# br_xs = br_xs.view(batch, 1, 1, K).expand(batch, K, K, K)
# ct_ys = ct_ys.view(batch, 1, K).expand(batch, K, K)
# ct_xs = ct_xs.view(batch, 1, K).expand(batch, K, K)
#
# if tl_reg is not None and bl_reg is not None and br_reg is not None:
# tl_reg = _tranpose_and_gather_feat(tl_reg, tl_inds)
# tl_reg = tl_reg.view(batch, K, 1, 1, 2)
# bl_reg = _tranpose_and_gather_feat(bl_reg, bl_inds)
# bl_reg = bl_reg.view(batch, 1, K, 1, 2)
# br_reg = _tranpose_and_gather_feat(br_reg, br_inds)
# br_reg = br_reg.view(batch, 1, 1, K, 2)
# ct_reg = _tranpose_and_gather_feat(ct_reg, ct_inds)
# ct_reg = ct_reg.view(batch, 1, K, 2)
#
# tl_xs = tl_xs + tl_reg[..., 0]
# tl_ys = tl_ys + tl_reg[..., 1]
# bl_xs = bl_xs + bl_reg[..., 0]
# bl_ys = bl_ys + bl_reg[..., 1]
# br_xs = br_xs + br_reg[..., 0]
# br_ys = br_ys + br_reg[..., 1]
# ct_xs = ct_xs + ct_reg[..., 0]
# ct_ys = ct_ys + ct_reg[..., 1]
#
# # all possible boxes based on top k corners (ignoring class)
# bboxes = torch.stack((tl_xs, tl_ys, bl_xs, bl_ys, br_xs, br_ys), dim=4)
#
# tl_tag = _tranpose_and_gather_feat(tl_tag, tl_inds)
# tl_tag = tl_tag.view(batch, K, 1, 1).expand(batch, K, K, K)
# bl_tag = _tranpose_and_gather_feat(bl_tag, bl_inds)
# bl_tag = bl_tag.view(batch, 1, K, 1).expand(batch, K, K, K)
# br_tag = _tranpose_and_gather_feat(br_tag, br_inds)
# br_tag = br_tag.view(batch, 1, 1, K).expand(batch, K, K, K)
# avg_tag = (tl_tag + bl_tag + br_tag) / 3
# dists = (torch.abs(tl_tag - avg_tag) + torch.abs(bl_tag - avg_tag) + torch.abs(br_tag - avg_tag)) / 3
#
# tl_scores = tl_scores.view(batch, K, 1, 1).expand(batch, K, K, K)
# bl_scores = bl_scores.view(batch, 1, K, 1).expand(batch, K, K, K)
# br_scores = br_scores.view(batch, 1, 1, K).expand(batch, K, K, K)
# scores = (tl_scores + bl_scores + br_scores) / 3
#
# # reject boxes based on classes
# tl_clses = tl_clses.view(batch, K, 1, 1).expand(batch, K, K, K)
# bl_clses = bl_clses.view(batch, 1, K, 1).expand(batch, K, K, K)
# br_clses = br_clses.view(batch, 1, 1, K).expand(batch, K, K, K)
# cls_inds = (tl_clses != bl_clses) | (bl_clses != br_clses) | (tl_clses != br_clses)
#
# # reject boxes based on distances
# dist_inds = (dists > ae_threshold)
#
# # instead of filtering prediction according to the out-of-bound rotation, do data augmentation to mirror groundtruth
#
# scores[cls_inds] = -1
# scores[dist_inds] = -1
#
# scores = scores.view(batch, -1)
# scores, inds = torch.topk(scores, 100)
# scores = scores.unsqueeze(2)
#
# bboxes = bboxes.view(batch, -1, 6)
# bboxes = _gather_feat(bboxes, inds)
#
# tl_tag = tl_tag.contiguous().view(batch, -1, 1)
# tl_tag = _gather_feat(tl_tag, inds)
# bl_tag = bl_tag.contiguous().view(batch, -1, 1)
# bl_tag = _gather_feat(bl_tag, inds)
# br_tag = br_tag.contiguous().view(batch, -1, 1)
# br_tag = _gather_feat(br_tag, inds)
# avg_tag = avg_tag.contiguous().view(batch, -1, 1)
# avg_tag = _gather_feat(avg_tag, inds)
#
# clses = bl_clses.contiguous().view(batch, -1, 1)
# clses = _gather_feat(clses, inds).float()
#
# tl_scores = tl_scores.contiguous().view(batch, -1, 1)
# tl_scores = _gather_feat(tl_scores, inds).float()
# bl_scores = bl_scores.contiguous().view(batch, -1, 1)
# bl_scores = _gather_feat(bl_scores, inds).float()
# br_scores = br_scores.contiguous().view(batch, -1, 1)
# br_scores = _gather_feat(br_scores, inds).float()
#
# ct_xs = ct_xs[:, 0, :]
# ct_ys = ct_ys[:, 0, :]
#
# centers = torch.cat(
# [ct_xs.unsqueeze(2), ct_ys.unsqueeze(2), ct_clses.float().unsqueeze(2), ct_scores.unsqueeze(2)], dim=2)
# detections = torch.cat([bboxes, scores, tl_scores, bl_scores, br_scores, clses, tl_tag, bl_tag, br_tag, avg_tag], dim=2)
return detections, centers
def post_process(self, detections, centers, num_classes, bbox_size_threshold, ori_threshold):
detections = detections.detach().cpu().numpy()
centers = centers.detach().cpu().numpy()
detections = detections.reshape(1, -1, detections.shape[2])
centers = centers.reshape(1, -1, centers.shape[2])
ret = []
for i in range(detections.shape[0]):
top_preds = {}
detections[i, :, 0:2] *= 4.
detections[i, :, 2:4] *= 4.
detections[i, :, 4:6] *= 4.
centers[i, :, 0:2] *= 4.
# Dump bbox whose central region has no center point
detections = np.concatenate(detections, axis=1)
centers = np.concatenate(centers, axis=1)
# filter by orientation distance between quantized and continuous predicted angle
classes = detections[..., -1]
quant_ori = (5.0 * classes - 85.0) / 180 * np.pi
bl_x = detections[..., 2]
bl_y = detections[..., 3]
br_x = detections[..., 4]
br_y = detections[..., 5]
cont_ori = np.arctan(-(br_y - bl_y) / (br_x - bl_x))
dist_ori = np.fabs(quant_ori - cont_ori)
ori_ind = dist_ori < ori_threshold
valid_detections = detections[ori_ind]
valid_ind = valid_detections[:, 6] > -1
valid_detections = valid_detections[valid_ind]
# valid_ind = detections[:, 6] > -1
# valid_detections = detections[valid_ind]
box_width = np.sqrt(np.power(valid_detections[:, 2] - valid_detections[:, 4], 2) + \
np.power(valid_detections[:, 3] - valid_detections[:, 5], 2))
box_height = np.sqrt(np.power(valid_detections[:, 2] - valid_detections[:, 0], 2) + \
np.power(valid_detections[:, 3] - valid_detections[:, 1], 2))
s_ind = (box_width * box_height <= bbox_size_threshold)
l_ind = (box_width * box_height > bbox_size_threshold)
s_detections = valid_detections[s_ind]
l_detections = valid_detections[l_ind]
# pro-process for small bounding box
s_tl_x = (2 * s_detections[:, 0] + s_detections[:, 4]) / 3
s_br_x = (s_detections[:, 0] + 2 * s_detections[:, 4]) / 3
s_tl_y = (2 * s_detections[:, 1] + s_detections[:, 5]) / 3
s_br_y = (s_detections[:, 1] + 2 * s_detections[:, 5]) / 3
s_temp_score = copy.copy(s_detections[:, 6])
s_detections[:, 6] = -1
center_x = centers[:, 0][:, np.newaxis]
center_y = centers[:, 1][:, np.newaxis]
s_tl_x = s_tl_x[np.newaxis, :]
s_br_x = s_br_x[np.newaxis, :]
s_tl_y = s_tl_y[np.newaxis, :]
s_br_y = s_br_y[np.newaxis, :]
ind_x1 = (center_x > s_tl_x) & (center_x < s_br_x)
ind_x2 = (center_x < s_tl_x) & (center_x > s_br_x)
ind_y1 = (center_y > s_tl_y) & (center_y < s_br_y)
ind_y2 = (center_y < s_tl_y) & (center_y > s_br_y)
ind_cls = (centers[:, 2][:, np.newaxis] - s_detections[:, -1][np.newaxis, :]) == 0
ind_s_new_score = np.max((((ind_x1 + 0) & (ind_y1 + 0) & (ind_cls + 0)) |
((ind_x1 + 0) & (ind_y2 + 0) & (ind_cls + 0)) |
((ind_x2 + 0) & (ind_y2 + 0) & (ind_cls + 0))), axis=0) == 1
index_s_new_score = np.argmax((((ind_x1 + 0) & (ind_y1 + 0) & (ind_cls + 0)) |
((ind_x1 + 0) & (ind_y2 + 0) & (ind_cls + 0)) |
((ind_x2 + 0) & (ind_y2 + 0) & (ind_cls + 0)))[:, ind_s_new_score], axis=0)
s_corner_score = s_temp_score[ind_s_new_score]
s_center_score = centers[index_s_new_score, 3]
s_detections[:, 6][ind_s_new_score] = (s_corner_score * 3 + s_center_score) / 4
# pro-process for large bounding box
l_tl_x = (2 * l_detections[:, 0] + l_detections[:, 4]) / 3
l_br_x = (l_detections[:, 0] + 2 * l_detections[:, 4]) / 3
l_tl_y = (2 * l_detections[:, 1] + l_detections[:, 5]) / 3
l_br_y = (l_detections[:, 1] + 2 * l_detections[:, 5]) / 3
l_temp_score = copy.copy(l_detections[:, 6])
l_detections[:, 6] = -1
center_x = centers[:, 0][:, np.newaxis]
center_y = centers[:, 1][:, np.newaxis]
l_tl_x = l_tl_x[np.newaxis, :]
l_br_x = l_br_x[np.newaxis, :]
l_tl_y = l_tl_y[np.newaxis, :]
l_br_y = l_br_y[np.newaxis, :]
ind_x1 = (center_x > l_tl_x) & (center_x < l_br_x)
ind_x2 = (center_x < l_tl_x) & (center_x > l_br_x)
ind_y1 = (center_y > l_tl_y) & (center_y < l_br_y)
ind_y2 = (center_y < l_tl_y) & (center_y > l_br_y)
ind_cls = (centers[:, 2][:, np.newaxis] - l_detections[:, -1][np.newaxis, :]) == 0
ind_l_new_score = np.max((((ind_x1 + 0) & (ind_y1 + 0) & (ind_cls + 0)) |
((ind_x1 + 0) & (ind_y2 + 0) & (ind_cls + 0)) |
((ind_x2 + 0) & (ind_y2 + 0) & (ind_cls + 0))), axis=0) == 1
index_l_new_score = np.argmax((((ind_x1 + 0) & (ind_y1 + 0) & (ind_cls + 0)) |
((ind_x1 + 0) & (ind_y2 + 0) & (ind_cls + 0)) |
((ind_x2 + 0) & (ind_y2 + 0) & (ind_cls + 0)))[:, ind_l_new_score], axis=0)
l_corner_score = l_temp_score[ind_l_new_score]
l_center_score = centers[index_l_new_score, 3]
l_detections[:, 6][ind_l_new_score] = (l_corner_score * 3 + l_center_score) / 4
detections = np.concatenate([l_detections, s_detections], axis=0)
detections = detections[np.argsort(-detections[:, 6])]
classes = detections[..., -1]
# reject detections with negative scores
keep_inds = (detections[:, 6] > -1)
detections = detections[keep_inds]
classes = classes[keep_inds]
detections = np.expand_dims(detections, axis=0)
for j in range(num_classes):
inds = (classes == j)
top_preds[j + 1] = detections[i, inds, :].astype(np.float32).tolist()
ret.append(top_preds)
for j in range(1, num_classes + 1):
ret[0][j] = np.array(ret[0][j], dtype=np.float32).reshape(-1, 11)
return ret[0]
def merge_outputs(self, detections):
results = {}
for j in range(1, self.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0).astype(np.float32)
if len(self.scales) > 1 or self.opt.nms:
soft_nms(results[j], Nt=0.5, method=2)
scores = np.hstack(
[results[j][:, 6] for j in range(1, self.num_classes + 1)])
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.num_classes + 1):
keep_inds = (results[j][:, 6] >= thresh)
results[j] = results[j][keep_inds]
return results
|
[
"numpy.sqrt",
"models.decode._topk",
"models.utils._tranpose_and_gather_feat",
"numpy.ascontiguousarray",
"numpy.argsort",
"numpy.array",
"copy.copy",
"math.atan",
"numpy.partition",
"numpy.max",
"math.fabs",
"numpy.concatenate",
"numpy.arctan",
"torch.abs",
"torch.topk",
"numpy.argmax",
"models.utils._gather_feat",
"math.atan2",
"torch.no_grad",
"torch.cat",
"torch.device",
"numpy.fabs",
"numpy.power",
"torch.stack",
"numpy.expand_dims",
"models.decode._nms"
] |
[((626, 641), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (639, 641), False, 'import torch\n'), ((5076, 5096), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5088, 5096), False, 'import torch\n'), ((5178, 5198), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5190, 5198), False, 'import torch\n'), ((5280, 5300), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5292, 5300), False, 'import torch\n'), ((5382, 5402), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5394, 5402), False, 'import torch\n'), ((5484, 5504), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5496, 5504), False, 'import torch\n'), ((5586, 5606), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5598, 5606), False, 'import torch\n'), ((5688, 5708), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5700, 5708), False, 'import torch\n'), ((5790, 5810), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (5802, 5810), False, 'import torch\n'), ((8815, 8893), 'numpy.sqrt', 'np.sqrt', (['((br_x_pr - p_x) * (br_x_pr - p_x) + (br_y_pr - p_y) * (br_y_pr - p_y))'], {}), '((br_x_pr - p_x) * (br_x_pr - p_x) + (br_y_pr - p_y) * (br_y_pr - p_y))\n', (8822, 8893), True, 'import numpy as np\n'), ((8908, 8986), 'numpy.sqrt', 'np.sqrt', (['((tl_x_pr - p_x) * (tl_x_pr - p_x) + (tl_y_pr - p_y) * (tl_y_pr - p_y))'], {}), '((tl_x_pr - p_x) * (tl_x_pr - p_x) + (tl_y_pr - p_y) * (tl_y_pr - p_y))\n', (8915, 8986), True, 'import numpy as np\n'), ((9106, 9129), 'numpy.array', 'np.array', (['bbox_coord_pr'], {}), '(bbox_coord_pr)\n', (9114, 9129), True, 'import numpy as np\n'), ((10210, 10288), 'numpy.sqrt', 'np.sqrt', (['((br_x_gt - p_x) * (br_x_gt - p_x) + (br_y_gt - p_y) * (br_y_gt - p_y))'], {}), '((br_x_gt - p_x) * (br_x_gt - p_x) + (br_y_gt - p_y) * (br_y_gt - p_y))\n', (10217, 10288), True, 'import numpy as np\n'), ((10303, 10381), 'numpy.sqrt', 'np.sqrt', (['((tl_x_gt - p_x) * (tl_x_gt - p_x) + (tl_y_gt - p_y) * (tl_y_gt - p_y))'], {}), '((tl_x_gt - p_x) * (tl_x_gt - p_x) + (tl_y_gt - p_y) * (tl_y_gt - p_y))\n', (10310, 10381), True, 'import numpy as np\n'), ((10502, 10526), 'numpy.array', 'np.array', (['boxes_coord_gt'], {}), '(boxes_coord_gt)\n', (10510, 10526), True, 'import numpy as np\n'), ((11632, 11647), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11645, 11647), False, 'import torch\n'), ((12197, 12220), 'models.decode._nms', '_nms', (['tl_heat'], {'kernel': '(3)'}), '(tl_heat, kernel=3)\n', (12201, 12220), False, 'from models.decode import _topk_original, _topk, _topk_channel, _nms\n'), ((12241, 12264), 'models.decode._nms', '_nms', (['bl_heat'], {'kernel': '(3)'}), '(bl_heat, kernel=3)\n', (12245, 12264), False, 'from models.decode import _topk_original, _topk, _topk_channel, _nms\n'), ((12285, 12308), 'models.decode._nms', '_nms', (['br_heat'], {'kernel': '(3)'}), '(br_heat, kernel=3)\n', (12289, 12308), False, 'from models.decode import _topk_original, _topk, _topk_channel, _nms\n'), ((12329, 12352), 'models.decode._nms', '_nms', (['ct_heat'], {'kernel': '(3)'}), '(ct_heat, kernel=3)\n', (12333, 12352), False, 'from models.decode import _topk_original, _topk, _topk_channel, _nms\n'), ((12409, 12428), 'models.decode._topk', '_topk', (['tl_heat'], {'K': 'K'}), '(tl_heat, K=K)\n', (12414, 12428), False, 'from models.decode import _topk_original, _topk, _topk_channel, _nms\n'), ((12484, 12503), 'models.decode._topk', '_topk', (['bl_heat'], {'K': 'K'}), '(bl_heat, K=K)\n', (12489, 12503), False, 'from models.decode import _topk_original, _topk, _topk_channel, _nms\n'), ((12559, 12578), 'models.decode._topk', '_topk', (['br_heat'], {'K': 'K'}), '(br_heat, K=K)\n', (12564, 12578), False, 'from models.decode import _topk_original, _topk, _topk_channel, _nms\n'), ((12634, 12653), 'models.decode._topk', '_topk', (['ct_heat'], {'K': 'K'}), '(ct_heat, K=K)\n', (12639, 12653), False, 'from models.decode import _topk_original, _topk, _topk_channel, _nms\n'), ((14196, 14258), 'torch.stack', 'torch.stack', (['(tl_xs, tl_ys, bl_xs, bl_ys, br_xs, br_ys)'], {'dim': '(4)'}), '((tl_xs, tl_ys, bl_xs, bl_ys, br_xs, br_ys), dim=4)\n', (14207, 14258), False, 'import torch\n'), ((14279, 14321), 'models.utils._tranpose_and_gather_feat', '_tranpose_and_gather_feat', (['tl_tag', 'tl_inds'], {}), '(tl_tag, tl_inds)\n', (14304, 14321), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((14388, 14430), 'models.utils._tranpose_and_gather_feat', '_tranpose_and_gather_feat', (['bl_tag', 'bl_inds'], {}), '(bl_tag, bl_inds)\n', (14413, 14430), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((14497, 14539), 'models.utils._tranpose_and_gather_feat', '_tranpose_and_gather_feat', (['br_tag', 'br_inds'], {}), '(br_tag, br_inds)\n', (14522, 14539), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((15812, 15840), 'torch.topk', 'torch.topk', (['scores', 'num_dets'], {}), '(scores, num_dets)\n', (15822, 15840), False, 'import torch\n'), ((15945, 15971), 'models.utils._gather_feat', '_gather_feat', (['bboxes', 'inds'], {}), '(bboxes, inds)\n', (15957, 15971), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((16681, 16755), 'torch.cat', 'torch.cat', (['[bboxes, scores, tl_scores, bl_scores, br_scores, clses]'], {'dim': '(2)'}), '([bboxes, scores, tl_scores, bl_scores, br_scores, clses], dim=2)\n', (16690, 16755), False, 'import torch\n'), ((23561, 23595), 'numpy.concatenate', 'np.concatenate', (['detections'], {'axis': '(1)'}), '(detections, axis=1)\n', (23575, 23595), True, 'import numpy as np\n'), ((23614, 23645), 'numpy.concatenate', 'np.concatenate', (['centers'], {'axis': '(1)'}), '(centers, axis=1)\n', (23628, 23645), True, 'import numpy as np\n'), ((23987, 24028), 'numpy.arctan', 'np.arctan', (['(-(br_y - bl_y) / (br_x - bl_x))'], {}), '(-(br_y - bl_y) / (br_x - bl_x))\n', (23996, 24028), True, 'import numpy as np\n'), ((24048, 24077), 'numpy.fabs', 'np.fabs', (['(quant_ori - cont_ori)'], {}), '(quant_ori - cont_ori)\n', (24055, 24077), True, 'import numpy as np\n'), ((25298, 25327), 'copy.copy', 'copy.copy', (['s_detections[:, 6]'], {}), '(s_detections[:, 6])\n', (25307, 25327), False, 'import copy\n'), ((26228, 26395), 'numpy.argmax', 'np.argmax', (['(ind_x1 + 0 & ind_y1 + 0 & ind_cls + 0 | ind_x1 + 0 & ind_y2 + 0 & ind_cls +\n 0 | ind_x2 + 0 & ind_y2 + 0 & ind_cls + 0)[:, ind_s_new_score]'], {'axis': '(0)'}), '((ind_x1 + 0 & ind_y1 + 0 & ind_cls + 0 | ind_x1 + 0 & ind_y2 + 0 &\n ind_cls + 0 | ind_x2 + 0 & ind_y2 + 0 & ind_cls + 0)[:, ind_s_new_score\n ], axis=0)\n', (26237, 26395), True, 'import numpy as np\n'), ((27025, 27054), 'copy.copy', 'copy.copy', (['l_detections[:, 6]'], {}), '(l_detections[:, 6])\n', (27034, 27054), False, 'import copy\n'), ((27955, 28122), 'numpy.argmax', 'np.argmax', (['(ind_x1 + 0 & ind_y1 + 0 & ind_cls + 0 | ind_x1 + 0 & ind_y2 + 0 & ind_cls +\n 0 | ind_x2 + 0 & ind_y2 + 0 & ind_cls + 0)[:, ind_l_new_score]'], {'axis': '(0)'}), '((ind_x1 + 0 & ind_y1 + 0 & ind_cls + 0 | ind_x1 + 0 & ind_y2 + 0 &\n ind_cls + 0 | ind_x2 + 0 & ind_y2 + 0 & ind_cls + 0)[:, ind_l_new_score\n ], axis=0)\n', (27964, 28122), True, 'import numpy as np\n'), ((28436, 28488), 'numpy.concatenate', 'np.concatenate', (['[l_detections, s_detections]'], {'axis': '(0)'}), '([l_detections, s_detections], axis=0)\n', (28450, 28488), True, 'import numpy as np\n'), ((28786, 28820), 'numpy.expand_dims', 'np.expand_dims', (['detections'], {'axis': '(0)'}), '(detections, axis=0)\n', (28800, 28820), True, 'import numpy as np\n'), ((2861, 2881), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2873, 2881), False, 'import torch\n'), ((2954, 2974), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2966, 2974), False, 'import torch\n'), ((3047, 3067), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3059, 3067), False, 'import torch\n'), ((3919, 3939), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3931, 3939), False, 'import torch\n'), ((4012, 4032), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4024, 4032), False, 'import torch\n'), ((4105, 4125), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4117, 4125), False, 'import torch\n'), ((4198, 4218), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (4210, 4218), False, 'import torch\n'), ((9768, 9821), 'math.atan', 'math.atan', (['(-(br_y_gt - bl_y_gt) / (br_x_gt - bl_x_gt))'], {}), '(-(br_y_gt - bl_y_gt) / (br_x_gt - bl_x_gt))\n', (9777, 9821), False, 'import math\n'), ((10623, 10683), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['bbox_coord_pr[:, :4]'], {'dtype': 'np.float32'}), '(bbox_coord_pr[:, :4], dtype=np.float32)\n', (10643, 10683), True, 'import numpy as np\n'), ((10721, 10782), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['boxes_coord_gt[:, :4]'], {'dtype': 'np.float32'}), '(boxes_coord_gt[:, :4], dtype=np.float32)\n', (10741, 10782), True, 'import numpy as np\n'), ((13290, 13332), 'models.utils._tranpose_and_gather_feat', '_tranpose_and_gather_feat', (['tl_reg', 'tl_inds'], {}), '(tl_reg, tl_inds)\n', (13315, 13332), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((13410, 13452), 'models.utils._tranpose_and_gather_feat', '_tranpose_and_gather_feat', (['bl_reg', 'bl_inds'], {}), '(bl_reg, bl_inds)\n', (13435, 13452), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((13530, 13572), 'models.utils._tranpose_and_gather_feat', '_tranpose_and_gather_feat', (['br_reg', 'br_inds'], {}), '(br_reg, br_inds)\n', (13555, 13572), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((13650, 13692), 'models.utils._tranpose_and_gather_feat', '_tranpose_and_gather_feat', (['ct_reg', 'ct_inds'], {}), '(ct_reg, ct_inds)\n', (13675, 13692), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((25967, 26105), 'numpy.max', 'np.max', (['(ind_x1 + 0 & ind_y1 + 0 & ind_cls + 0 | ind_x1 + 0 & ind_y2 + 0 & ind_cls +\n 0 | ind_x2 + 0 & ind_y2 + 0 & ind_cls + 0)'], {'axis': '(0)'}), '(ind_x1 + 0 & ind_y1 + 0 & ind_cls + 0 | ind_x1 + 0 & ind_y2 + 0 & \n ind_cls + 0 | ind_x2 + 0 & ind_y2 + 0 & ind_cls + 0, axis=0)\n', (25973, 26105), True, 'import numpy as np\n'), ((27694, 27832), 'numpy.max', 'np.max', (['(ind_x1 + 0 & ind_y1 + 0 & ind_cls + 0 | ind_x1 + 0 & ind_y2 + 0 & ind_cls +\n 0 | ind_x2 + 0 & ind_y2 + 0 & ind_cls + 0)'], {'axis': '(0)'}), '(ind_x1 + 0 & ind_y1 + 0 & ind_cls + 0 | ind_x1 + 0 & ind_y2 + 0 & \n ind_cls + 0 | ind_x2 + 0 & ind_y2 + 0 & ind_cls + 0, axis=0)\n', (27700, 27832), True, 'import numpy as np\n'), ((28521, 28550), 'numpy.argsort', 'np.argsort', (['(-detections[:, 6])'], {}), '(-detections[:, 6])\n', (28531, 28550), True, 'import numpy as np\n'), ((29678, 29703), 'numpy.partition', 'np.partition', (['scores', 'kth'], {}), '(scores, kth)\n', (29690, 29703), True, 'import numpy as np\n'), ((3371, 3390), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3383, 3390), False, 'import torch\n'), ((3448, 3467), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3460, 3467), False, 'import torch\n'), ((3525, 3544), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3537, 3544), False, 'import torch\n'), ((5869, 5888), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5881, 5888), False, 'import torch\n'), ((5954, 5973), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5966, 5973), False, 'import torch\n'), ((6039, 6058), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6051, 6058), False, 'import torch\n'), ((6124, 6143), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6136, 6143), False, 'import torch\n'), ((6209, 6228), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6221, 6228), False, 'import torch\n'), ((6294, 6313), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6306, 6313), False, 'import torch\n'), ((6379, 6398), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6391, 6398), False, 'import torch\n'), ((6464, 6483), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6476, 6483), False, 'import torch\n'), ((6586, 6605), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6598, 6605), False, 'import torch\n'), ((6707, 6726), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6719, 6726), False, 'import torch\n'), ((6828, 6847), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6840, 6847), False, 'import torch\n'), ((6949, 6968), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6961, 6968), False, 'import torch\n'), ((7070, 7089), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7082, 7089), False, 'import torch\n'), ((7191, 7210), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7203, 7210), False, 'import torch\n'), ((7312, 7331), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7324, 7331), False, 'import torch\n'), ((7433, 7452), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7445, 7452), False, 'import torch\n'), ((8373, 8424), 'math.atan2', 'math.atan2', (['(-(br_y_pr - bl_y_pr))', '(br_x_pr - bl_x_pr)'], {}), '(-(br_y_pr - bl_y_pr), br_x_pr - bl_x_pr)\n', (8383, 8424), False, 'import math\n'), ((11137, 11192), 'math.fabs', 'math.fabs', (['(bbox_coord_pr[i, -1] - boxes_coord_gt[j, -1])'], {}), '(bbox_coord_pr[i, -1] - boxes_coord_gt[j, -1])\n', (11146, 11192), False, 'import math\n'), ((14717, 14744), 'torch.abs', 'torch.abs', (['(br_tag - avg_tag)'], {}), '(br_tag - avg_tag)\n', (14726, 14744), False, 'import torch\n'), ((16050, 16075), 'models.utils._gather_feat', '_gather_feat', (['clses', 'inds'], {}), '(clses, inds)\n', (16062, 16075), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((16170, 16199), 'models.utils._gather_feat', '_gather_feat', (['tl_scores', 'inds'], {}), '(tl_scores, inds)\n', (16182, 16199), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((16294, 16323), 'models.utils._gather_feat', '_gather_feat', (['bl_scores', 'inds'], {}), '(bl_scores, inds)\n', (16306, 16323), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((16418, 16447), 'models.utils._gather_feat', '_gather_feat', (['br_scores', 'inds'], {}), '(br_scores, inds)\n', (16430, 16447), False, 'from models.utils import _tranpose_and_gather_feat, _gather_feat\n'), ((24397, 24457), 'numpy.power', 'np.power', (['(valid_detections[:, 2] - valid_detections[:, 4])', '(2)'], {}), '(valid_detections[:, 2] - valid_detections[:, 4], 2)\n', (24405, 24457), True, 'import numpy as np\n'), ((24490, 24550), 'numpy.power', 'np.power', (['(valid_detections[:, 3] - valid_detections[:, 5])', '(2)'], {}), '(valid_detections[:, 3] - valid_detections[:, 5], 2)\n', (24498, 24550), True, 'import numpy as np\n'), ((24581, 24641), 'numpy.power', 'np.power', (['(valid_detections[:, 2] - valid_detections[:, 0])', '(2)'], {}), '(valid_detections[:, 2] - valid_detections[:, 0], 2)\n', (24589, 24641), True, 'import numpy as np\n'), ((24675, 24735), 'numpy.power', 'np.power', (['(valid_detections[:, 3] - valid_detections[:, 1])', '(2)'], {}), '(valid_detections[:, 3] - valid_detections[:, 1], 2)\n', (24683, 24735), True, 'import numpy as np\n'), ((29064, 29101), 'numpy.array', 'np.array', (['ret[0][j]'], {'dtype': 'np.float32'}), '(ret[0][j], dtype=np.float32)\n', (29072, 29101), True, 'import numpy as np\n'), ((29266, 29332), 'numpy.concatenate', 'np.concatenate', (['[detection[j] for detection in detections]'], {'axis': '(0)'}), '([detection[j] for detection in detections], axis=0)\n', (29280, 29332), True, 'import numpy as np\n'), ((3340, 3367), 'torch.abs', 'torch.abs', (['(avg_tag - tl_tag)'], {}), '(avg_tag - tl_tag)\n', (3349, 3367), False, 'import torch\n'), ((3417, 3444), 'torch.abs', 'torch.abs', (['(bl_tag - avg_tag)'], {}), '(bl_tag - avg_tag)\n', (3426, 3444), False, 'import torch\n'), ((3494, 3521), 'torch.abs', 'torch.abs', (['(avg_tag - br_tag)'], {}), '(avg_tag - br_tag)\n', (3503, 3521), False, 'import torch\n'), ((14657, 14684), 'torch.abs', 'torch.abs', (['(tl_tag - avg_tag)'], {}), '(tl_tag - avg_tag)\n', (14666, 14684), False, 'import torch\n'), ((14687, 14714), 'torch.abs', 'torch.abs', (['(bl_tag - avg_tag)'], {}), '(bl_tag - avg_tag)\n', (14696, 14714), False, 'import torch\n'), ((6549, 6569), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6561, 6569), False, 'import torch\n'), ((6670, 6690), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6682, 6690), False, 'import torch\n'), ((6791, 6811), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6803, 6811), False, 'import torch\n'), ((6912, 6932), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6924, 6932), False, 'import torch\n'), ((7033, 7053), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7045, 7053), False, 'import torch\n'), ((7154, 7174), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7166, 7174), False, 'import torch\n'), ((7275, 7295), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7287, 7295), False, 'import torch\n'), ((7396, 7416), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7408, 7416), False, 'import torch\n')]
|
import logbook
import pandas as pd
import zipline as zl
from datetime import datetime, timedelta
import pathlib
import azul
import numpy as np
from typing import List, Tuple
log = logbook.Logger('BasePriceManager')
class BasePriceManager(object):
def __init__(self, calendar_name='NYSE'):
self._calendar = zl.get_calendar(name=calendar_name)
self._cols = ['open', 'high', 'low', 'close', 'volume', 'dividend', 'split']
# The number of days the price manager will keep trying to pull data for a symbol that is not returning data.
self.MISSING_DATE_THRESHOLD = 5
def get_price_data(self, symbols: List[str], output_dir: str, start_date: datetime, end_date: datetime) -> None:
minute_dir_path = pathlib.Path(output_dir, 'minute')
daily_dir_path = pathlib.Path(output_dir, 'daily')
for ticker in symbols:
self._download_and_process_data(
ticker, start_date, end_date, minute_dir_path, daily_dir_path)
def _download_and_process_data(
self,
ticker: str,
start_date: datetime,
end_date: datetime,
minute_dir_path: pathlib.Path,
daily_dir_path: pathlib.Path
) -> None:
df = self._minute_dataframe_for_dates(ticker, start_date, end_date)
if df.empty:
return
df = self._check_sessions(df, ticker, frequency='minute')
minute_dir_path.mkdir(parents=True, exist_ok=True)
filename = pathlib.Path(minute_dir_path, ticker + '.csv')
df.to_csv(filename)
daily_df = self._resample_minute_data_to_daily_data(df)
daily_df = self._check_sessions(daily_df, ticker, frequency='daily')
daily_dir_path.mkdir(parents=True, exist_ok=True)
filename = pathlib.Path(daily_dir_path, ticker + '.csv')
daily_df.to_csv(filename)
log.notice('Retrieved: {}'.format(ticker))
def _resample_minute_data_to_daily_data(self, df):
ohlc_dict = {
'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum',
'dividend': 'last',
'split': 'last'
}
if df.empty:
daily_df = df
else:
daily_df = df.resample('D', closed='left', label='left').agg(ohlc_dict).dropna(how='any')
# Resample mixes the columns so lets re-arrange them
daily_df = daily_df[self._cols]
return daily_df
def _validated_start_and_end_dates(
self,
start_date: datetime,
end_date: datetime
) -> Tuple[datetime, datetime]:
"""
Creates valid start and end dates. Defaults to a start date of 30 calendar days ago and end date of today.
Args:
start_date (datetime): The start date.
end_date (datetime): The end date.
Returns:
start_date (datetime): The validated start date.
end_date (datetime): The validated end date.
"""
today = datetime.today()
if start_date is None:
start_date = today - timedelta(days=30)
if end_date is None:
end_date = today
if start_date > end_date:
temp_date = start_date
start_date = end_date
end_date = temp_date
return start_date, end_date
def _minute_dataframe_for_dates(
self,
ticker: str,
start_date: datetime,
end_date: datetime
) -> pd.DataFrame:
"""
Returns a DataFrame containing the all the minute bars for stock between the start and end dates.
Args:
ticker (str): Ticker symbol for the stock.
start_date (datetime): Date to start pulling data.
end_date (datetime): Date to stop pulling data.
Returns:
combined_df (DataFrame): Contains the all the minute bars for a stock between the start and end dates.
"""
start_date, end_date = self._validated_start_and_end_dates(start_date, end_date)
combined_df = pd.DataFrame(columns=self._cols)
combined_df.index.name = 'date'
#
# # Get the date the symbol was listed on the exchange.
# list_date = self._list_date(ticker)
#
# if list_date is not None:
# # If the we are asking for data from before the stock was listed, then set the start date to the day
# # the stock was listed.
# if list_date > start_date:
# log.info('The symbol {} was not listed until: {}. Adjusting start time.', ticker, list_date)
# start_date = list_date
# Build a list of the trading days from the dates passed in.
session_dates = self._calendar.sessions_in_range(start_date, end_date)
if session_dates.empty:
log.info('The symbol {} did not trade between {} and {} ', ticker, start_date, end_date)
return combined_df
# Iterate over the trading dates backwards. This means we don't need to know exactly
# when the stock started trading. Note: this won't pull data for stocks that have been delisted.
# TODO: Add code to capture data for delisted stocks.
num_missing_dates = 0
for timestamp in reversed(session_dates):
df = self._minute_dataframe_for_date(ticker, timestamp)
if df.empty:
# Start counting the number of consecutive trading dates we are missing data.
num_missing_dates += 1
log.info('No minute data for {} on {}'.format(ticker, timestamp.date()))
else:
# reset missing date counter
num_missing_dates = 0
log.info('Retrieved minute data for {} on {}'.format(ticker, timestamp.date()))
combined_df = pd.concat([combined_df, df])
if num_missing_dates >= self.MISSING_DATE_THRESHOLD:
log.info('No minute data for {} for {} days. Quitting.'.format(ticker, self.MISSING_DATE_THRESHOLD))
break
# Sort the dataframe oldest first, newest last.
combined_df.sort_index(inplace=True)
return combined_df
# def _list_date(self, ticker: str) -> datetime:
# return None
def _minute_dataframe_for_date(self, ticker: str, start_timestamp: pd.Timestamp) -> pd.DataFrame:
raise NotImplementedError
def _fixna(self, df, symbol):
cols = ['close', 'high', 'low', 'open']
df[cols] = df[cols].replace({0: np.nan})
df[cols] = df[cols].replace({-1.0: np.nan})
if df.isnull().sum().sum() > 0:
# fixna_list.append(symbol)
df['open'] = df['open'].bfill().ffill()
df['close'] = df['close'].bfill().ffill()
df.loc[df['low'].isnull(), 'low'] = df['open']
df.loc[df['high'].isnull(), 'high'] = df['open']
df.loc[df['close'].isnull(), 'close'] = df['open']
return df
def _check_sessions(self, df, ticker, frequency='daily'):
# Remove any data that are outside of the trading sessions for the calendar.
if df.empty:
return df
asset_first_day = df.index[0]
asset_last_day = df.index[-1]
sessions = self._calendar.sessions_in_range(asset_first_day, asset_last_day)
asset_sessions = sessions[sessions.slice_indexer(asset_first_day, asset_last_day)]
if frequency == 'minute':
minutes_passed = len(df)
asset_first_day = self._calendar.minute_to_session_label(asset_first_day, direction='next')
asset_last_day = self._calendar.minute_to_session_label(asset_last_day, direction='previous')
minutes_in_session = self._calendar.minutes_for_sessions_in_range(asset_first_day, asset_last_day)
df = df[df.index.isin(minutes_in_session)]
if (minutes_passed) > len(minutes_in_session):
# print('Removed ' + str((minutes_passed) - len(minutes_in_session)) + ' minutes')
pass
elif minutes_passed < len(minutes_in_session):
num_missing_sessions = len(minutes_in_session) - minutes_passed
log.info('Missing sessions for {}'.format(ticker))
elif frequency == 'daily' and len(df) != len(asset_sessions):
missing_sessions = asset_sessions.difference(
pd.to_datetime(np.array(df.index), unit='s', utc=True, )).tolist()
extra_sessions = pd.to_datetime(np.array(df.index), unit='s', utc=True, ).difference(
asset_sessions).tolist()
if len(missing_sessions) > 0:
# missing_sessions_list.append(symbol)
# print('Adding ' + str(len(missing_sessions)) + ' sessions for ' + str(ticker))
pass
if len(extra_sessions) > 0:
# extra_sessions_list.append(symbol)
# print('Removing ' + str(len(extra_sessions)) + ' sessions for ' + str(symbol))
pass
for missing_session in missing_sessions:
prev_date = self._calendar.previous_session_label(missing_session)
row_to_copy = df[(df.index == prev_date)]
row_to_copy_val = row_to_copy.values
# from IPython import embed; embed()
df.loc[missing_session] = row_to_copy_val[0]
df.loc[missing_session].volume = 0
# row = row_to_copy
# table.append(row)
for extra_session in extra_sessions:
# delete stuff
df.drop(extra_session)
if frequency == 'minute':
log.info('Downloaded and processed {} minute bars for {}', len(df), ticker)
else:
log.info('Downsampled {} daily bars for {}', len(df), ticker)
return df
|
[
"logbook.Logger",
"pandas.DataFrame",
"pathlib.Path",
"numpy.array",
"zipline.get_calendar",
"datetime.datetime.today",
"datetime.timedelta",
"pandas.concat"
] |
[((181, 215), 'logbook.Logger', 'logbook.Logger', (['"""BasePriceManager"""'], {}), "('BasePriceManager')\n", (195, 215), False, 'import logbook\n'), ((322, 357), 'zipline.get_calendar', 'zl.get_calendar', ([], {'name': 'calendar_name'}), '(name=calendar_name)\n', (337, 357), True, 'import zipline as zl\n'), ((746, 780), 'pathlib.Path', 'pathlib.Path', (['output_dir', '"""minute"""'], {}), "(output_dir, 'minute')\n", (758, 780), False, 'import pathlib\n'), ((806, 839), 'pathlib.Path', 'pathlib.Path', (['output_dir', '"""daily"""'], {}), "(output_dir, 'daily')\n", (818, 839), False, 'import pathlib\n'), ((1503, 1549), 'pathlib.Path', 'pathlib.Path', (['minute_dir_path', "(ticker + '.csv')"], {}), "(minute_dir_path, ticker + '.csv')\n", (1515, 1549), False, 'import pathlib\n'), ((1797, 1842), 'pathlib.Path', 'pathlib.Path', (['daily_dir_path', "(ticker + '.csv')"], {}), "(daily_dir_path, ticker + '.csv')\n", (1809, 1842), False, 'import pathlib\n'), ((3071, 3087), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (3085, 3087), False, 'from datetime import datetime, timedelta\n'), ((4143, 4175), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self._cols'}), '(columns=self._cols)\n', (4155, 4175), True, 'import pandas as pd\n'), ((3152, 3170), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (3161, 3170), False, 'from datetime import datetime, timedelta\n'), ((5926, 5954), 'pandas.concat', 'pd.concat', (['[combined_df, df]'], {}), '([combined_df, df])\n', (5935, 5954), True, 'import pandas as pd\n'), ((8511, 8529), 'numpy.array', 'np.array', (['df.index'], {}), '(df.index)\n', (8519, 8529), True, 'import numpy as np\n'), ((8607, 8625), 'numpy.array', 'np.array', (['df.index'], {}), '(df.index)\n', (8615, 8625), True, 'import numpy as np\n')]
|
import random
import numpy as np
from gym_multigrid.multigrid import World
from gym_multigrid.multigrid import DIR_TO_VEC
from gym_multigrid.multigrid import Actions
class Agent:
def __init__(self, agent_id, agent_type=0):
self.id = agent_id
self.total_reward = 0
self.action_probabilities = [0.1, 0.2, 0.2, 0.4, 0.1]
self.agent_type = agent_type
self.observation = None
def next_action(self, observation, reward, round_id):
pass
def start_simulation(self, observation, rounds):
pass
def end_simulation(self, observation, reward, round_id):
pass
def random_action(self):
action = random.choices(np.arange(5), weights=self.action_probabilities, k=1)[0]
return action
def get_my_position(self):
width = len(self.observation)
height = len(self.observation[0])
for x in range(width):
for y in range(height):
if self.observation[x][y][0] == World.OBJECT_TO_IDX["agent"] and self.observation[x][y][2] == self.id:
return x, y
return -1, -1
def get_all_ball_positions(self):
width = len(self.observation)
height = len(self.observation[0])
positions_x = []
positions_y = []
for x in range(width):
for y in range(height):
if self.observation[x][y][0] == World.OBJECT_TO_IDX["ball"]:
positions_x.append(x)
positions_y.append(y)
return positions_x, positions_y
"""
actions:
still = 0
left = 1
right = 2
forward = 3
pickup = 4
"""
class RandomAgent(Agent):
def __init__(self, agent_id):
super().__init__(agent_id, agent_type=1)
def start_simulation(self, observation, rounds):
""" Nothing to be done """
def next_action(self, observation, reward, round_id):
#print("random index: ", self.id, " type: ", self.agent_type)
return self.random_action()
def end_simulation(self, observation, reward, round_id):
""" Nothing to be done """
class GreedyAgent(Agent):
def __init__(self, agent_id):
super().__init__(agent_id, agent_type=2)
self.width = 0
self.height = 0
def get_ball_positions(self):
positions = []
for x in range(self.width):
for y in range(self.height):
if self.observation[x][y][0] == World.OBJECT_TO_IDX["ball"]:
positions.append([x, y])
return positions
def greedy_action(self):
pos_x, pos_y = self.get_my_position()
direction = self.observation[pos_x][pos_y][1]
ball_positions = self.get_ball_positions()
target_ball_positions = get_closest_balls(pos_x, pos_y, direction, ball_positions)
target_ball_position = random.choice(target_ball_positions)
return move_towards_ball(pos_x, pos_y, direction, target_ball_position[0], target_ball_position[1])
def start_simulation(self, observation, rounds):
self.width = len(observation)
self.height = len(observation[0])
def next_action(self, observation, reward, round_id):
self.observation = observation
x, y = self.get_my_position()
#print("greedy index: ", self.id, " type: ", x, " ", y)
return self.greedy_action()
def end_simulation(self, observation, reward, round_id):
""" Nothing to be done """
def sign(x):
if x > 0:
return 1
elif x < 0:
return -1
else:
return 0
def distance_from_ball(pos_x, pos_y, direction, ball_x, ball_y):
dx = ball_x - pos_x
dy = ball_y - pos_y
turns_x = abs(sign(dx) - DIR_TO_VEC[direction][0])
turns_y = abs(sign(dy) - DIR_TO_VEC[direction][1])
return abs(dx) + abs(dy) + max(turns_x, turns_y)
def get_closest_balls(pos_x, pos_y, direction, ball_positions):
if len(ball_positions) == 0:
return [[0, 0]]
best_positions = []
best_distance = -1
for index, [x, y] in enumerate(ball_positions):
current_distance = distance_from_ball(pos_x, pos_y, direction, x, y)
if best_distance == -1 or current_distance < best_distance:
best_distance = current_distance
best_positions = [[x, y]]
elif current_distance == best_distance:
best_positions.append([x, y])
return best_positions
def get_next_state(pos_x, pos_y, direction, action):
if action == Actions.still:
return pos_x, pos_y, direction
if action == Actions.left:
new_direction = (direction + 3) % 4
return pos_x, pos_y, new_direction
if action == Actions.right:
new_direction = (direction + 1) % 4
return pos_x, pos_y, new_direction
if action == Actions.forward:
return pos_x + DIR_TO_VEC[direction][0], pos_y + DIR_TO_VEC[direction][1], direction
def move_towards_ball(pos_x, pos_y, direction, ball_x, ball_y):
distance = distance_from_ball(pos_x, pos_y, direction, ball_x, ball_y)
if distance == 1:
return Actions.pickup
best_action = Actions.still
best_next_distance = distance
for action in [Actions.left, Actions.right, Actions.forward]:
next_x, next_y, next_direction = get_next_state(pos_x, pos_y, direction, action)
current_next_distance = distance_from_ball(next_x, next_y, next_direction, ball_x, ball_y)
if current_next_distance < best_next_distance:
best_next_distance = current_next_distance
best_action = action
return best_action
|
[
"random.choice",
"numpy.arange"
] |
[((2837, 2873), 'random.choice', 'random.choice', (['target_ball_positions'], {}), '(target_ball_positions)\n', (2850, 2873), False, 'import random\n'), ((693, 705), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (702, 705), True, 'import numpy as np\n')]
|
# -*- coding: UTF-8 -*-
import glob
import numpy as np
import pandas as pd
from PIL import Image
import random
# h,w = 60,50
h, w = (60, 50)
size = h * w
# Receding_Hairline Wearing_Necktie Rosy_Cheeks Eyeglasses Goatee Chubby
# Sideburns Blurry Wearing_Hat Double_Chin Pale_Skin Gray_Hair Mustache Bald
label_cls = 'Eyeglasses'
pngs = sorted(glob.glob('./data/img_align_celeba/*.jpg'))
data = pd.read_table('./data/list_attr_celeba.txt',
delim_whitespace=True, error_bad_lines=False)
eyeglasses = np.array(data[label_cls])
eyeglasses_cls = (eyeglasses + 1)/2
label_glasses = np.zeros((202599, 2))
correct_list = []
correct_list_test = []
false_list = []
false_list_test = []
for i in range(len(label_glasses)):
if eyeglasses_cls[i] == 1:
label_glasses[i][1] = 1
if i < 160000:
correct_list.append(i)
else:
correct_list_test.append(i)
else:
label_glasses[i][0] = 1
if i < 160000:
false_list.append(i)
else:
false_list_test.append(i)
print(len(correct_list_test), len(false_list_test))
training_set_label = label_glasses[0:160000, :]
test_set_label = label_glasses[160000:, :]
training_set_cls = eyeglasses_cls[0:160000]
test_set_cls = eyeglasses_cls[160000:]
def create_trainbatch(num=10, channel=0):
train_num = random.sample(false_list, num)
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = np.array(train_set_label_)
train_set_cls_new = np.array(train_set_cls_)
return train_set/255, train_set_label_new, train_set_cls_new
def create_trainbatch_all_correct(num=10, channel=0):
train_num = random.sample(correct_list, num)
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
n = 0
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = np.array(train_set_label_)
train_set_cls_new = np.array(train_set_cls_)
return train_set/255, train_set_label_new, train_set_cls_new
def create_trainbatch_(num=10, channel=0):
train_num1 = random.sample(correct_list, int(num/2))
train_num2 = random.sample(false_list, int(num/2))
train_num = train_num1+train_num2
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
n = 0
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = np.array(train_set_label_)
train_set_cls_new = np.array(train_set_cls_)
return train_set/255, train_set_label_new, train_set_cls_new
def create_trainbatch_grad(num=200, channel=0):
train_num1 = random.sample(correct_list, int(10))
train_num2 = random.sample(false_list, int(190))
train_num = train_num1+train_num2
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
n = 0
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = np.array(train_set_label_)
train_set_cls_new = np.array(train_set_cls_)
return train_set/255, train_set_label_new, train_set_cls_new
def create_testset(num=100, channel=0):
test_num1 = random.sample(correct_list_test, num)
test_num2 = random.sample(false_list_test, num)
test_num = test_num1 + test_num2
if channel == 0:
test_set = np.zeros((num*2, h, w))
else:
test_set = np.zeros((num*2, h, w, 3))
test_set_label_ = []
test_set_cls_ = []
for i in range(num*2):
img = Image.open(pngs[test_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
test_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
test_set[i, :, :, :] = img_grey
test_set_label_.append(label_glasses[test_num[i]])
test_set_cls_.append(eyeglasses_cls[test_num[i]])
# if channel == 0:
# test_set = test_set.reshape(size,num*2).T
test_set_label_new = np.array(test_set_label_)
test_set_cls_new = np.array(test_set_cls_)
return test_set/255, test_set_label_new, test_set_cls_new, test_set_cls_new.mean()*100
def create_testset_all(channel=0):
test_num1 = random.sample(correct_list_test, len(correct_list_test))
test_num2 = random.sample(false_list_test, len(false_list_test))
test_num = test_num1 + test_num2
# test_num =
num = len(test_num)
if channel == 0:
test_set = np.zeros((num, h, w))
else:
test_set = np.zeros((num, h, w, 3))
test_set_label_ = []
test_set_cls_ = []
for i in range(num):
img = Image.open(pngs[test_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
test_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
test_set[i, :, :, :] = img_grey
test_set_label_.append(label_glasses[test_num[i]])
test_set_cls_.append(eyeglasses_cls[test_num[i]])
# if channel == 0:
# test_set = test_set.reshape(size,num).T
test_set_label_new = np.array(test_set_label_)
test_set_cls_new = np.array(test_set_cls_)
return test_set/255, test_set_label_new, test_set_cls_new, test_set_cls_new.mean()*100
def create_testset_unbalanced(channel=0):
test_num1 = random.sample(correct_list_test, 10)
test_num2 = random.sample(false_list_test, 190)
test_num = test_num1 + test_num2
# test_num =
num = len(test_num)
if channel == 0:
test_set = np.zeros((num, h, w))
else:
test_set = np.zeros((num, h, w, 3))
test_set_label_ = []
test_set_cls_ = []
for i in range(num):
img = Image.open(pngs[test_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
test_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
test_set[i, :, :, :] = img_grey
test_set_label_.append(label_glasses[test_num[i]])
test_set_cls_.append(eyeglasses_cls[test_num[i]])
# if channel == 0:
# test_set = test_set.reshape(size,num).T
test_set_label_new = np.array(test_set_label_)
test_set_cls_new = np.array(test_set_cls_)
return test_set/255, test_set_label_new, test_set_cls_new, test_set_cls_new.mean()*100
|
[
"random.sample",
"PIL.Image.open",
"numpy.array",
"numpy.zeros",
"pandas.read_table",
"glob.glob"
] |
[((411, 505), 'pandas.read_table', 'pd.read_table', (['"""./data/list_attr_celeba.txt"""'], {'delim_whitespace': '(True)', 'error_bad_lines': '(False)'}), "('./data/list_attr_celeba.txt', delim_whitespace=True,\n error_bad_lines=False)\n", (424, 505), True, 'import pandas as pd\n'), ((537, 562), 'numpy.array', 'np.array', (['data[label_cls]'], {}), '(data[label_cls])\n', (545, 562), True, 'import numpy as np\n'), ((616, 637), 'numpy.zeros', 'np.zeros', (['(202599, 2)'], {}), '((202599, 2))\n', (624, 637), True, 'import numpy as np\n'), ((360, 402), 'glob.glob', 'glob.glob', (['"""./data/img_align_celeba/*.jpg"""'], {}), "('./data/img_align_celeba/*.jpg')\n", (369, 402), False, 'import glob\n'), ((1365, 1395), 'random.sample', 'random.sample', (['false_list', 'num'], {}), '(false_list, num)\n', (1378, 1395), False, 'import random\n'), ((2127, 2153), 'numpy.array', 'np.array', (['train_set_label_'], {}), '(train_set_label_)\n', (2135, 2153), True, 'import numpy as np\n'), ((2178, 2202), 'numpy.array', 'np.array', (['train_set_cls_'], {}), '(train_set_cls_)\n', (2186, 2202), True, 'import numpy as np\n'), ((2342, 2374), 'random.sample', 'random.sample', (['correct_list', 'num'], {}), '(correct_list, num)\n', (2355, 2374), False, 'import random\n'), ((3114, 3140), 'numpy.array', 'np.array', (['train_set_label_'], {}), '(train_set_label_)\n', (3122, 3140), True, 'import numpy as np\n'), ((3165, 3189), 'numpy.array', 'np.array', (['train_set_cls_'], {}), '(train_set_cls_)\n', (3173, 3189), True, 'import numpy as np\n'), ((4192, 4218), 'numpy.array', 'np.array', (['train_set_label_'], {}), '(train_set_label_)\n', (4200, 4218), True, 'import numpy as np\n'), ((4243, 4267), 'numpy.array', 'np.array', (['train_set_cls_'], {}), '(train_set_cls_)\n', (4251, 4267), True, 'import numpy as np\n'), ((5272, 5298), 'numpy.array', 'np.array', (['train_set_label_'], {}), '(train_set_label_)\n', (5280, 5298), True, 'import numpy as np\n'), ((5323, 5347), 'numpy.array', 'np.array', (['train_set_cls_'], {}), '(train_set_cls_)\n', (5331, 5347), True, 'import numpy as np\n'), ((5473, 5510), 'random.sample', 'random.sample', (['correct_list_test', 'num'], {}), '(correct_list_test, num)\n', (5486, 5510), False, 'import random\n'), ((5527, 5562), 'random.sample', 'random.sample', (['false_list_test', 'num'], {}), '(false_list_test, num)\n', (5540, 5562), False, 'import random\n'), ((6318, 6343), 'numpy.array', 'np.array', (['test_set_label_'], {}), '(test_set_label_)\n', (6326, 6343), True, 'import numpy as np\n'), ((6367, 6390), 'numpy.array', 'np.array', (['test_set_cls_'], {}), '(test_set_cls_)\n', (6375, 6390), True, 'import numpy as np\n'), ((7451, 7476), 'numpy.array', 'np.array', (['test_set_label_'], {}), '(test_set_label_)\n', (7459, 7476), True, 'import numpy as np\n'), ((7500, 7523), 'numpy.array', 'np.array', (['test_set_cls_'], {}), '(test_set_cls_)\n', (7508, 7523), True, 'import numpy as np\n'), ((7677, 7713), 'random.sample', 'random.sample', (['correct_list_test', '(10)'], {}), '(correct_list_test, 10)\n', (7690, 7713), False, 'import random\n'), ((7730, 7765), 'random.sample', 'random.sample', (['false_list_test', '(190)'], {}), '(false_list_test, 190)\n', (7743, 7765), False, 'import random\n'), ((8554, 8579), 'numpy.array', 'np.array', (['test_set_label_'], {}), '(test_set_label_)\n', (8562, 8579), True, 'import numpy as np\n'), ((8603, 8626), 'numpy.array', 'np.array', (['test_set_cls_'], {}), '(test_set_cls_)\n', (8611, 8626), True, 'import numpy as np\n'), ((1437, 1458), 'numpy.zeros', 'np.zeros', (['(num, h, w)'], {}), '((num, h, w))\n', (1445, 1458), True, 'import numpy as np\n'), ((1489, 1513), 'numpy.zeros', 'np.zeros', (['(num, h, w, 3)'], {}), '((num, h, w, 3))\n', (1497, 1513), True, 'import numpy as np\n'), ((1605, 1635), 'PIL.Image.open', 'Image.open', (['pngs[train_num[i]]'], {}), '(pngs[train_num[i]])\n', (1615, 1635), False, 'from PIL import Image\n'), ((2416, 2437), 'numpy.zeros', 'np.zeros', (['(num, h, w)'], {}), '((num, h, w))\n', (2424, 2437), True, 'import numpy as np\n'), ((2468, 2492), 'numpy.zeros', 'np.zeros', (['(num, h, w, 3)'], {}), '((num, h, w, 3))\n', (2476, 2492), True, 'import numpy as np\n'), ((2592, 2622), 'PIL.Image.open', 'Image.open', (['pngs[train_num[i]]'], {}), '(pngs[train_num[i]])\n', (2602, 2622), False, 'from PIL import Image\n'), ((3493, 3514), 'numpy.zeros', 'np.zeros', (['(num, h, w)'], {}), '((num, h, w))\n', (3501, 3514), True, 'import numpy as np\n'), ((3545, 3569), 'numpy.zeros', 'np.zeros', (['(num, h, w, 3)'], {}), '((num, h, w, 3))\n', (3553, 3569), True, 'import numpy as np\n'), ((3669, 3699), 'PIL.Image.open', 'Image.open', (['pngs[train_num[i]]'], {}), '(pngs[train_num[i]])\n', (3679, 3699), False, 'from PIL import Image\n'), ((4572, 4593), 'numpy.zeros', 'np.zeros', (['(num, h, w)'], {}), '((num, h, w))\n', (4580, 4593), True, 'import numpy as np\n'), ((4624, 4648), 'numpy.zeros', 'np.zeros', (['(num, h, w, 3)'], {}), '((num, h, w, 3))\n', (4632, 4648), True, 'import numpy as np\n'), ((4749, 4779), 'PIL.Image.open', 'Image.open', (['pngs[train_num[i]]'], {}), '(pngs[train_num[i]])\n', (4759, 4779), False, 'from PIL import Image\n'), ((5640, 5665), 'numpy.zeros', 'np.zeros', (['(num * 2, h, w)'], {}), '((num * 2, h, w))\n', (5648, 5665), True, 'import numpy as np\n'), ((5693, 5721), 'numpy.zeros', 'np.zeros', (['(num * 2, h, w, 3)'], {}), '((num * 2, h, w, 3))\n', (5701, 5721), True, 'import numpy as np\n'), ((5811, 5840), 'PIL.Image.open', 'Image.open', (['pngs[test_num[i]]'], {}), '(pngs[test_num[i]])\n', (5821, 5840), False, 'from PIL import Image\n'), ((6781, 6802), 'numpy.zeros', 'np.zeros', (['(num, h, w)'], {}), '((num, h, w))\n', (6789, 6802), True, 'import numpy as np\n'), ((6832, 6856), 'numpy.zeros', 'np.zeros', (['(num, h, w, 3)'], {}), '((num, h, w, 3))\n', (6840, 6856), True, 'import numpy as np\n'), ((6946, 6975), 'PIL.Image.open', 'Image.open', (['pngs[test_num[i]]'], {}), '(pngs[test_num[i]])\n', (6956, 6975), False, 'from PIL import Image\n'), ((7884, 7905), 'numpy.zeros', 'np.zeros', (['(num, h, w)'], {}), '((num, h, w))\n', (7892, 7905), True, 'import numpy as np\n'), ((7935, 7959), 'numpy.zeros', 'np.zeros', (['(num, h, w, 3)'], {}), '((num, h, w, 3))\n', (7943, 7959), True, 'import numpy as np\n'), ((8049, 8078), 'PIL.Image.open', 'Image.open', (['pngs[test_num[i]]'], {}), '(pngs[test_num[i]])\n', (8059, 8078), False, 'from PIL import Image\n'), ((1833, 1851), 'numpy.array', 'np.array', (['img_grey'], {}), '(img_grey)\n', (1841, 1851), True, 'import numpy as np\n'), ((2820, 2838), 'numpy.array', 'np.array', (['img_grey'], {}), '(img_grey)\n', (2828, 2838), True, 'import numpy as np\n'), ((3898, 3916), 'numpy.array', 'np.array', (['img_grey'], {}), '(img_grey)\n', (3906, 3916), True, 'import numpy as np\n'), ((4978, 4996), 'numpy.array', 'np.array', (['img_grey'], {}), '(img_grey)\n', (4986, 4996), True, 'import numpy as np\n'), ((6037, 6055), 'numpy.array', 'np.array', (['img_grey'], {}), '(img_grey)\n', (6045, 6055), True, 'import numpy as np\n'), ((7172, 7190), 'numpy.array', 'np.array', (['img_grey'], {}), '(img_grey)\n', (7180, 7190), True, 'import numpy as np\n'), ((8275, 8293), 'numpy.array', 'np.array', (['img_grey'], {}), '(img_grey)\n', (8283, 8293), True, 'import numpy as np\n')]
|
"""
Created on Sat Mar 23 00:23:27 2019
@author: nahid
"""
#https://docs.scipy.org/doc/numpy/reference/generated/numpy.absolute.html
import numpy as np
import matplotlib.pyplot as plt
x = np.array([-1.2, 1.2])
x = np.absolute(x)
print(x)
print(np.absolute(1 + 2j))
#Plot the function over [-10, 10]:
x = np.linspace(-10, 10, 101); #start, end, totalElements you want to create
plt.plot(np.absolute(x))
plt.show()
plt.plot(x)
plt.show()
xx = x + 1j * x[:, np.newaxis]
plt.imshow(np.abs(xx), extent=[-10, 10, -10, 10], cmap='gray')
plt.show()
|
[
"numpy.abs",
"numpy.absolute",
"matplotlib.pyplot.plot",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.show"
] |
[((189, 210), 'numpy.array', 'np.array', (['[-1.2, 1.2]'], {}), '([-1.2, 1.2])\n', (197, 210), True, 'import numpy as np\n'), ((215, 229), 'numpy.absolute', 'np.absolute', (['x'], {}), '(x)\n', (226, 229), True, 'import numpy as np\n'), ((306, 331), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)', '(101)'], {}), '(-10, 10, 101)\n', (317, 331), True, 'import numpy as np\n'), ((404, 414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (412, 414), True, 'import matplotlib.pyplot as plt\n'), ((415, 426), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (423, 426), True, 'import matplotlib.pyplot as plt\n'), ((427, 437), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (435, 437), True, 'import matplotlib.pyplot as plt\n'), ((533, 543), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (541, 543), True, 'import matplotlib.pyplot as plt\n'), ((245, 266), 'numpy.absolute', 'np.absolute', (['(1 + 2.0j)'], {}), '(1 + 2.0j)\n', (256, 266), True, 'import numpy as np\n'), ((388, 402), 'numpy.absolute', 'np.absolute', (['x'], {}), '(x)\n', (399, 402), True, 'import numpy as np\n'), ((481, 491), 'numpy.abs', 'np.abs', (['xx'], {}), '(xx)\n', (487, 491), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
import os
import argparse
import time
import serial
import csv
import math
import pickle
from collections import defaultdict
import numpy as np
from sklearn.decomposition import PCA, FastICA
from sklearn.svm import SVC
# Graph
WINDOW_WIDTH = 800
WINDOW_HEIGHT = 800
PLOT_SCROLL = 3 # higher is faster
CHANNELS = 8
FONT_SIZE = 25
# Data
FREQUENCY = 200 # Hz
CSV_HEADER_EMG = ["timestamp", "emg1", "emg2", "emg3", "emg4", "emg5", "emg6", "emg7", "emg8"]
CSV_HEADER_CA = ["timestamp", "ca1", "ca2", "ca3", "ca4", "ca5", "ca6", "ca7", "ca8"]
# Processing
RMS_WINDOW_SIZE = 50
SVM_WINDOW_SIZE = 5 # higher is smoother but more delay
SVM_IDLE_WEIGHT_FACTOR = 100.0 # higher makes "idle" move more important
VERBOSE = False
# Plotting (Pygame) window interface
class Plotter():
def __init__(self, live=False):
if "pygame" not in sys.modules:
print("Error! pygame not loaded! Plotter not available for library use.")
return None
self.screen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption("Electromyography Processor")
self.font = pygame.font.Font(None, FONT_SIZE)
self.live = live
self.last_values = None
self.last_rms_values = None
self.last_ca_values = None
self.plots = 0
def plot(self, values, rms_values=[], ca_values=[], ca="", gesture="", frequency=None, recording=False):
if self.last_values is None:
self.last_values = values
self.last_rms_values = rms_values
self.last_ca_values = ca_values
self.plots = len(values) + len(ca_values)
return
self.screen.scroll(-PLOT_SCROLL)
self.screen.fill(pygame.Color("black"), (WINDOW_WIDTH - PLOT_SCROLL, 0, WINDOW_WIDTH, WINDOW_HEIGHT))
self.screen.fill(pygame.Color("black"), (0, 0, 60, WINDOW_HEIGHT))
self.clear_info()
# Subplot base
for i in range(self.plots):
base_height = self.subplot_height(i)
pygame.draw.line(self.screen, pygame.Color("darkgrey"),
(WINDOW_WIDTH - PLOT_SCROLL, base_height),
(WINDOW_WIDTH, base_height))
if i < 8 and self.plots >= 8: # Raw / RMS
plot_text = self.font.render(f"RAW {i}", True, pygame.Color("darkgrey"))
rms_offset = 10 if rms_values else 0
if rms_values:
plot_rms = self.font.render(f"RMS {i}", True, pygame.Color("blue"))
self.screen.blit(plot_rms, (0, base_height - rms_offset - FONT_SIZE // 2))
self.screen.blit(plot_text, (0, base_height + rms_offset - FONT_SIZE // 2))
else: # PCA/ICA
plot_text = self.font.render(f" {ca.upper()} {i - len(values)}", True, pygame.Color("green"))
self.screen.blit(plot_text, (0, base_height - FONT_SIZE // 2))
# Raw signal
for i, (u, v) in enumerate(zip(self.last_values, values)):
pygame.draw.line(self.screen, pygame.Color("darkslategrey"),
(WINDOW_WIDTH - PLOT_SCROLL, self.subplot_height(i, u)),
(WINDOW_WIDTH, self.subplot_height(i, v)))
# Processed signals
if rms_values:
for i, (u, v) in enumerate(zip(self.last_rms_values, rms_values)):
pygame.draw.line(self.screen, pygame.Color("blue"),
(WINDOW_WIDTH - PLOT_SCROLL, self.subplot_height(i, u)),
(WINDOW_WIDTH, self.subplot_height(i, v)))
if ca_values:
for i, (u, v) in enumerate(zip(self.last_ca_values, ca_values)):
pygame.draw.line(self.screen, pygame.Color("green"),
(WINDOW_WIDTH - PLOT_SCROLL, self.subplot_height(i + len(rms_values), u)),
(WINDOW_WIDTH, self.subplot_height(i + len(rms_values), v)))
# Information
if frequency:
self.render_frequency(frequency)
self.render_mode()
self.render_controls(recording)
if gesture:
self.render_classification(gesture)
pygame.display.flip()
self.last_values = values
self.last_rms_values = rms_values
self.last_ca_values = ca_values
def subplot_height(self, i, value=0):
scaled_value = value * 1.5
return int(WINDOW_HEIGHT / (self.plots + 1) * (i + 1 - scaled_value))
def clear_info(self):
self.screen.fill(pygame.Color("black"), (0, 0, WINDOW_WIDTH, FONT_SIZE))
self.screen.fill(pygame.Color("black"), (0, WINDOW_HEIGHT - FONT_SIZE, WINDOW_WIDTH, WINDOW_HEIGHT))
def render_mode(self):
mode_text = "LIVE" if self.live else "PLAYBACK"
mode = self.font.render("LIVE" if self.live else "PLAYBACK",
True, pygame.Color("green"))
self.screen.blit(mode, (WINDOW_WIDTH // 2 - len(mode_text) * FONT_SIZE // 2, 0))
def render_frequency(self, frequency):
framerate = self.font.render(f"{frequency} Hz", True,
pygame.Color("green") if frequency > 180 else pygame.Color("red"))
self.screen.fill(pygame.Color("black"), (0, 0, 75, FONT_SIZE)) # Clear old framerate
self.screen.blit(framerate, (0, 0))
def render_controls(self, recording):
pause = self.font.render("P (pause)", True, pygame.Color("white"))
self.screen.blit(pause, (WINDOW_WIDTH - 250, 0))
if self.live: # Can only record live
record = self.font.render("R (stop rec)" if recording else "R (record)", True,
pygame.Color("red") if recording else pygame.Color("white"))
self.screen.blit(record, (WINDOW_WIDTH - 150, 0))
def render_classification(self, gesture):
plot_gesture = self.font.render(f"Classification: {gesture}", True, pygame.Color("green"))
self.screen.blit(plot_gesture, (WINDOW_WIDTH // 2 - 225, WINDOW_HEIGHT - FONT_SIZE))
def pause(self):
self.clear_info()
pause = self.font.render("P (resume)", True, pygame.Color("red"))
self.screen.blit(pause, (WINDOW_WIDTH - 250, 0))
self.render_mode()
pygame.display.flip()
def end(self):
self.clear_info()
pause = self.font.render("END", True, pygame.Color("red"))
self.screen.blit(pause, (WINDOW_WIDTH - 250, 0))
self.render_mode()
pygame.display.flip()
# Interface for data streaming from either live Myo device or recorded playback
class Stream():
def __init__(self, do_rms=False, pca_train_set=[], ica_train_set=[], ca_components=3, svm_train_set=[]):
self.plotter = None # Late setup (display modes)
self.reset()
# Processing
self.do_rms = do_rms
self.ca_components = ca_components
self.pca = self.init_pca(pca_train_set) if pca_train_set else None
self.ica = self.init_ica(ica_train_set) if ica_train_set else None
self.svm = self.init_svm(svm_train_set) if svm_train_set else None
self.gesture = ""
def create_plot(self, live=False):
self.plotter = Plotter(live=live)
def plot(self, data, ca=False, recording=False):
self.calc_frequency()
# Processing
rms_data, ca_data = [], []
if ca:
ca_data, data = data, []
else:
if self.do_rms or self.pca is not None or self.ica is not None:
rms_data = self.calc_rms(data)
ca_data = []
if self.pca is not None:
ca_data = self.calc_pca(rms_data)
elif self.ica is not None:
ca_data = self.calc_ica(rms_data)
if self.svm is not None:
self.gesture = self.class_svm(ca_data)
if not self.paused and self.plotter is not None:
self.plotter.plot([x / 500. for x in data],
rms_values=[x / 500. for x in rms_data],
ca_values=[x / 500. for x in ca_data],
ca=self.current_model()[1],
gesture=self.gesture,
frequency=self.frequency,
recording=recording)
return rms_data, ca_data, self.gesture
def calc_frequency(self):
self.times.append(time.time())
if len(self.times) >= 100:
self.frequency = int((len(self.times) - 1) / (self.times[-1] - self.times[0]))
self.times.clear()
def pause(self, state=False, toggle=False):
if toggle:
self.paused = not self.paused
else:
self.paused = state
if self.paused and not self.ended:
self.plotter.pause()
def end(self):
self.ended = True
if self.plotter is not None:
self.plotter.end()
def reset(self):
self.paused = False
self.ended = False
# Frequency measuring
self.times = []
self.frequency = 0
# Processing
self.rms_window = []
self.svm_window = []
# Processing
def calc_rms(self, data):
# Gather samples, up to RMS_WINDOW_SIZE
self.rms_window.append(data)
if len(self.rms_window) >= RMS_WINDOW_SIZE:
self.rms_window.pop(0)
# Calculate RMS for each channel
rms_data = [0] * CHANNELS
for channel in range(CHANNELS):
samples = [item[channel] for item in self.rms_window]
total = sum([sample ** 2 for sample in samples])
rms_data[channel] = math.sqrt(1.0 / RMS_WINDOW_SIZE * total)
if VERBOSE:
print(f"rms: {rms_data}")
return rms_data
def read_ca_train_set(self, train_set, stype="?"):
emg_data = []
for file in train_set:
print(f"Training {stype.upper()} with '{file}'...")
emg_file = open(file, "r", newline="")
emg_reader = csv.reader(emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)
# Read file
header = next(emg_reader)
if header == CSV_HEADER_EMG:
try:
while True:
data = next(emg_reader)
_, emg = data[0], list(map(int, data[1:]))
emg_data.append(self.calc_rms(emg))
except StopIteration:
pass
else:
print("-> Error! Incorrect header! Expected 'RAW'.")
self.rms_window.clear()
emg_file.close()
emg_data = np.array(emg_data)
return emg_data
def read_model(self, model, stype="?"):
print(f"Reading {stype.upper()} model '{model}'...")
with open(model, "rb") as f:
return pickle.load(f)
def init_pca(self, train_set):
if isinstance(train_set, list):
emg_data = self.read_ca_train_set(train_set, "pca")
# Initialize and train
pca = PCA(n_components=self.ca_components)
pca.fit(emg_data)
else:
pca = self.read_model(train_set, "pca")
return pca
def calc_pca(self, rms_data):
emg_data = np.array(rms_data).reshape(1, -1) # Reshape to 1 sample, N features
pca_data = self.pca.transform(emg_data)[0] # Take 1 sample from array of samples (contains only one)
if VERBOSE:
print(f"pca: {pca_data}")
return pca_data
def init_ica(self, train_set):
if isinstance(train_set, list):
emg_data = self.read_ca_train_set(train_set, "ica")
# Initialize and train
ica = FastICA(n_components=self.ca_components, random_state=0)
ica.fit(emg_data)
else:
ica = self.read_model(train_set, "ica")
return ica
def calc_ica(self, rms_data):
emg_data = np.array(rms_data).reshape(1, -1) # Reshape to 1 sample, N features
ica_data = self.ica.transform(emg_data)[0] # Take 1 sample from array of samples (contains only one)
ica_data *= 5000 # Scale up
if VERBOSE:
print(f"ica: {ica_data}")
return ica_data
def read_class_train_set(self, train_set, stype="?"):
emg_data = []
classes = []
for file in train_set:
classification = os.path.basename(file).split("_")[0]
print(f"Training {stype.upper()} '{classification}' with '{file}'...")
emg_file = open(file, "r", newline="")
emg_reader = csv.reader(emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)
# Read file
header = next(emg_reader)
if header == CSV_HEADER_CA[:self.ca_components + 1]:
try:
while True:
data = next(emg_reader)
_, emg = data[0], list(map(float, data[1:]))
emg_data.append(emg)
classes.append(classification)
except StopIteration:
pass
else:
print("-> Error! Incorrect header! Expected 'PCA/ICA'.")
emg_file.close()
if "idle" not in classes:
print("Warning! No 'idle' move trained!")
emg_data, classes = np.array(emg_data), np.array(classes)
return emg_data, classes
def init_svm(self, train_set):
if isinstance(train_set, list):
emg_data, classes = self.read_class_train_set(train_set, "svm")
svm = SVC(random_state=0, kernel="rbf", class_weight={"idle": SVM_IDLE_WEIGHT_FACTOR})
svm.fit(emg_data, classes)
else:
svm = self.read_model(train_set, "svm")
return svm
def class_svm(self, ca_data):
# Gather samples, up to SVM_WINDOW_SIZE to smooth classification
self.svm_window.append(ca_data)
if len(self.svm_window) > SVM_WINDOW_SIZE:
self.svm_window.pop(0)
window = np.array(self.svm_window)
svm_classes = self.svm.predict(window) # predict each sample in window
# Take classification with most occurences in the window
d = defaultdict(int)
for svm_class in svm_classes:
d[svm_class] += 1
svm_class = max(d.items(), key=lambda x: x[1])[0]
if VERBOSE:
print(f"svm: {svm_class}")
return svm_class
return ""
def current_model(self):
if self.svm is not None:
return self.svm, "svm"
elif self.pca is not None:
return self.pca, "pca"
elif self.ica is not None:
return self.ica, "ica"
return None, ""
# Live Myo device interface
class Myo():
def __init__(self, stream, tty, native, mac):
# Instantiate
self.myo = MyoRaw(tty, native, mac)
self.stream = stream
self.recording = False
self.recording_type = self.init_recording()
# Recording
self.emg_file = None
self.emg_writer = None
# Setup
self.setup()
def close(self):
self.myo.disconnect()
self.record(False)
def setup(self):
# Add handles to process EMG and battery level data
self.myo.add_handler(DataCategory.EMG, self.handle_emg)
self.myo.add_handler(DataCategory.BATTERY, self.handle_battery)
# Subscribe to all data services in full RAW mode (200 Hz)
self.myo.subscribe(EMGMode.RAW)
# Disable sleep to a void disconnects while retrieving data
self.myo.set_sleep_mode(1)
# Vibrate to signalise a successful setup
# myo.vibrate(1)
def run(self):
self.myo.run(1)
def disconnect(self):
self.myo.disconnect()
def sleep(self):
self.myo.deep_sleep()
def handle_emg(self, timestamp, emg, moving, characteristic_num):
emg = list(emg)
_, ca_data, _ = self.stream.plot(emg, recording=self.recording)
record_data = ca_data if len(ca_data) > 0 else emg
if self.recording:
csv_data = [timestamp]
csv_data.extend(record_data)
try:
self.emg_writer.writerow(csv_data)
except AttributeError:
print("Error! Unable to write to CSV!")
if VERBOSE:
print(f"[myo] {self.recording_type}: {timestamp}, {record_data}")
def handle_battery(self, timestamp, battery_level):
if battery_level < 5:
self.myo.set_leds([255, 0, 0], [255, 0, 0]) # red logo, red bar
else:
self.myo.set_leds([128, 128, 255], [128, 128, 255]) # purple logo, purple bar
if VERBOSE:
print(f"[myo] battery level: {timestamp}, {battery_level}")
def init_recording(self):
if self.stream.pca is not None:
return "pca"
elif self.stream.ica is not None:
return "ica"
return "raw"
def record(self, state=False, toggle=False):
if toggle:
recording = not self.recording
else:
recording = state
if recording:
filename = f"recordings/{self.recording_type}/{time.strftime('%Y%m%d-%H%M%S')}.csv"
os.makedirs(os.path.dirname(filename), exist_ok=True)
self.emg_file = open(filename, "w", newline="")
self.emg_writer = csv.writer(self.emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)
if self.recording_type == "raw":
self.emg_writer.writerow(CSV_HEADER_EMG)
else:
self.emg_writer.writerow(CSV_HEADER_CA[:self.stream.ca_components + 1])
elif self.emg_file is not None:
self.emg_file.close()
self.emg_file = None
self.emg_writer = None
self.recording = recording
# Recorded Myo data playback interface
class Playback():
def __init__(self, stream, filename):
self.stream = stream
self.valid = False
self.type = ""
try:
self.emg_file = open(filename, "r", newline="")
self.emg_reader = csv.reader(self.emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)
self.read_header()
except FileNotFoundError:
self.emg_file = None
def close(self):
if self.emg_file:
self.emg_file.close()
def read_header(self):
try:
header = next(self.emg_reader)
if header == CSV_HEADER_EMG:
self.valid = True
self.type = "raw"
if header[:2] == CSV_HEADER_CA[:2]:
self.valid = True
self.type = "ca"
except StopIteration:
pass
def is_valid(self):
return self.valid
# Plays a frame from the recording and indicating end of recording on subsequent calls
def play_frame(self):
if not self.stream.paused:
try:
data = next(self.emg_reader)
if self.type == "raw":
timestamp, emg = data[0], list(map(int, data[1:]))
rms_data, ca_data, gesture = self.stream.plot(emg)
else:
timestamp, emg = data[0], list(map(float, data[1:]))
rms_data, ca_data, gesture = self.stream.plot(emg, ca=True)
if VERBOSE:
print(f"[playback] emg: {timestamp}, {emg}")
return timestamp, rms_data, ca_data, gesture
except StopIteration:
self.stream.end()
return 0, [], [], ""
def main():
# Parse arguments
parser = argparse.ArgumentParser(description="Electromyography Processor")
group1 = parser.add_mutually_exclusive_group()
group1.add_argument("-r", "--recording", default=None, metavar="REC", help="playback recorded Myo data stream")
group1.add_argument("-s", "--sleep", default=False, action="store_true", help="put Myo into deep sleep (turn off)")
parser.add_argument("--rms", default=False, action="store_true", help="process stream using RMS smoothing")
group2 = parser.add_mutually_exclusive_group()
group2.add_argument("--pca", nargs="+", metavar="REC", help="process stream using RAW training set or PCA model")
group2.add_argument("--ica", nargs="+", metavar="REC", help="process stream using RAW training set or ICA model")
parser.add_argument("-c", "--components", default=3, type=int, help="PCA/ICA components to use")
group3 = parser.add_mutually_exclusive_group()
group3.add_argument("--svm", nargs="+", metavar="REC", help="classify using PCA/ICA training set or SVM model")
group4 = parser.add_mutually_exclusive_group()
group4.add_argument("--tty", default=None, help="Myo dongle device (autodetected if omitted)")
group4.add_argument("--native", default=False, action="store_true", help="use a native Bluetooth stack")
parser.add_argument("--mac", default=None, help="Myo MAC address (arbitrarily detected if omitted)")
parser.add_argument("-v", "--verbose", default=False, action="store_true", help="verbose output")
args = parser.parse_args()
if args.svm and not args.pca and not args.ica:
parser.error("the following arguments are required for 'svm': 'pca' or 'ica'")
# Model was given instead of trainining set
if args.pca is not None and len(args.pca) == 1 and not args.pca[0].endswith(".csv"):
args.pca = args.pca[0]
if args.ica is not None and len(args.ica) == 1 and not args.ica[0].endswith(".csv"):
args.ica = args.ica[0]
if args.svm is not None and len(args.svm) == 1 and not args.svm[0].endswith(".csv"):
args.svm = args.svm[0]
if args.verbose:
global VERBOSE
VERBOSE = args.verbose
live_myo = args.recording is None
# Setup common stream interface for Myo or Playback
stream = Stream(do_rms=args.rms, pca_train_set=args.pca, ica_train_set=args.ica, svm_train_set=args.svm,
ca_components=args.components)
# Setup Myo or Playback
if live_myo:
try:
print("Connecting to Myo...")
myo = Myo(stream, args.tty, args.native, args.mac)
print("Connected to Myo!")
except (ValueError, KeyboardInterrupt) as e:
print(f"Error! Unable to connect!\n{e}")
return 1
else:
playback = Playback(stream, args.recording)
if not playback.is_valid():
print("Error! Invalid CSV file!")
return 2
# Run main logic
if args.sleep:
if live_myo:
myo.sleep()
else:
pygame.init()
stream.create_plot(live=live_myo)
# Run until terminated by user or recording ended
try:
starttime = time.time()
while True:
if live_myo:
try:
myo.run()
except serial.serialutil.SerialException:
print("Error! Myo exception! Attempting reboot...")
myo.disconnect()
myo = Myo(stream, args.tty, args.native, args.mac)
else:
playback.play_frame()
# Delay by (1 second / FREQUENCY Hz) including execution time
delay = 1 / FREQUENCY
diff = min(time.time() - starttime, 1 / FREQUENCY)
time.sleep(delay - diff)
starttime = time.time()
# Handle Pygame events
for ev in pygame.event.get():
if ev.type == pygame.QUIT:
raise KeyboardInterrupt()
elif ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_q:
raise KeyboardInterrupt()
elif ev.key == pygame.K_p:
stream.pause(toggle=True)
elif ev.key == pygame.K_r:
if live_myo:
myo.record(toggle=True)
except KeyboardInterrupt:
pass
if live_myo:
myo.close()
else:
playback.close()
return 0
# Conditional imports
if __name__ == "__main__" or os.environ.get("EMGPROC_LOAD_GAME", False):
import pygame
if __name__ == "__main__" or os.environ.get("EMGPROC_LOAD_MYO", False):
from myo_raw import MyoRaw, DataCategory, EMGMode
if __name__ == "__main__":
sys.exit(main())
|
[
"pygame.init",
"math.sqrt",
"time.sleep",
"numpy.array",
"sklearn.decomposition.FastICA",
"pygame.font.Font",
"argparse.ArgumentParser",
"sklearn.decomposition.PCA",
"pygame.display.set_mode",
"pygame.display.flip",
"csv.reader",
"csv.writer",
"pickle.load",
"os.path.dirname",
"time.time",
"sklearn.svm.SVC",
"pygame.event.get",
"myo_raw.MyoRaw",
"time.strftime",
"os.environ.get",
"collections.defaultdict",
"os.path.basename",
"pygame.display.set_caption",
"pygame.Color"
] |
[((19921, 19986), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Electromyography Processor"""'}), "(description='Electromyography Processor')\n", (19944, 19986), False, 'import argparse\n'), ((24584, 24626), 'os.environ.get', 'os.environ.get', (['"""EMGPROC_LOAD_GAME"""', '(False)'], {}), "('EMGPROC_LOAD_GAME', False)\n", (24598, 24626), False, 'import os\n'), ((24675, 24716), 'os.environ.get', 'os.environ.get', (['"""EMGPROC_LOAD_MYO"""', '(False)'], {}), "('EMGPROC_LOAD_MYO', False)\n", (24689, 24716), False, 'import os\n'), ((1025, 1079), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WINDOW_WIDTH, WINDOW_HEIGHT)'], {}), '((WINDOW_WIDTH, WINDOW_HEIGHT))\n', (1048, 1079), False, 'import pygame\n'), ((1088, 1144), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Electromyography Processor"""'], {}), "('Electromyography Processor')\n", (1114, 1144), False, 'import pygame\n'), ((1165, 1198), 'pygame.font.Font', 'pygame.font.Font', (['None', 'FONT_SIZE'], {}), '(None, FONT_SIZE)\n', (1181, 1198), False, 'import pygame\n'), ((4272, 4293), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4291, 4293), False, 'import pygame\n'), ((6363, 6384), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (6382, 6384), False, 'import pygame\n'), ((6591, 6612), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (6610, 6612), False, 'import pygame\n'), ((10782, 10800), 'numpy.array', 'np.array', (['emg_data'], {}), '(emg_data)\n', (10790, 10800), True, 'import numpy as np\n'), ((15081, 15105), 'myo_raw.MyoRaw', 'MyoRaw', (['tty', 'native', 'mac'], {}), '(tty, native, mac)\n', (15087, 15105), False, 'from myo_raw import MyoRaw, DataCategory, EMGMode\n'), ((22923, 22936), 'pygame.init', 'pygame.init', ([], {}), '()\n', (22934, 22936), False, 'import pygame\n'), ((1767, 1788), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (1779, 1788), False, 'import pygame\n'), ((1877, 1898), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (1889, 1898), False, 'import pygame\n'), ((4619, 4640), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (4631, 4640), False, 'import pygame\n'), ((4700, 4721), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (4712, 4721), False, 'import pygame\n'), ((4975, 4996), 'pygame.Color', 'pygame.Color', (['"""green"""'], {}), "('green')\n", (4987, 4996), False, 'import pygame\n'), ((5322, 5343), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (5334, 5343), False, 'import pygame\n'), ((5530, 5551), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (5542, 5551), False, 'import pygame\n'), ((6032, 6053), 'pygame.Color', 'pygame.Color', (['"""green"""'], {}), "('green')\n", (6044, 6053), False, 'import pygame\n'), ((6249, 6268), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (6261, 6268), False, 'import pygame\n'), ((6477, 6496), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (6489, 6496), False, 'import pygame\n'), ((8520, 8531), 'time.time', 'time.time', ([], {}), '()\n', (8529, 8531), False, 'import time\n'), ((9771, 9811), 'math.sqrt', 'math.sqrt', (['(1.0 / RMS_WINDOW_SIZE * total)'], {}), '(1.0 / RMS_WINDOW_SIZE * total)\n', (9780, 9811), False, 'import math\n'), ((10147, 10212), 'csv.reader', 'csv.reader', (['emg_file', 'csv.unix_dialect'], {'quoting': 'csv.QUOTE_MINIMAL'}), '(emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)\n', (10157, 10212), False, 'import csv\n'), ((10987, 11001), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10998, 11001), False, 'import pickle\n'), ((11196, 11232), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'self.ca_components'}), '(n_components=self.ca_components)\n', (11199, 11232), False, 'from sklearn.decomposition import PCA, FastICA\n'), ((11860, 11916), 'sklearn.decomposition.FastICA', 'FastICA', ([], {'n_components': 'self.ca_components', 'random_state': '(0)'}), '(n_components=self.ca_components, random_state=0)\n', (11867, 11916), False, 'from sklearn.decomposition import PCA, FastICA\n'), ((12747, 12812), 'csv.reader', 'csv.reader', (['emg_file', 'csv.unix_dialect'], {'quoting': 'csv.QUOTE_MINIMAL'}), '(emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)\n', (12757, 12812), False, 'import csv\n'), ((13514, 13532), 'numpy.array', 'np.array', (['emg_data'], {}), '(emg_data)\n', (13522, 13532), True, 'import numpy as np\n'), ((13534, 13551), 'numpy.array', 'np.array', (['classes'], {}), '(classes)\n', (13542, 13551), True, 'import numpy as np\n'), ((13756, 13841), 'sklearn.svm.SVC', 'SVC', ([], {'random_state': '(0)', 'kernel': '"""rbf"""', 'class_weight': "{'idle': SVM_IDLE_WEIGHT_FACTOR}"}), "(random_state=0, kernel='rbf', class_weight={'idle': SVM_IDLE_WEIGHT_FACTOR}\n )\n", (13759, 13841), False, 'from sklearn.svm import SVC\n'), ((14218, 14243), 'numpy.array', 'np.array', (['self.svm_window'], {}), '(self.svm_window)\n', (14226, 14243), True, 'import numpy as np\n'), ((14414, 14430), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (14425, 14430), False, 'from collections import defaultdict\n'), ((17642, 17712), 'csv.writer', 'csv.writer', (['self.emg_file', 'csv.unix_dialect'], {'quoting': 'csv.QUOTE_MINIMAL'}), '(self.emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)\n', (17652, 17712), False, 'import csv\n'), ((18383, 18453), 'csv.reader', 'csv.reader', (['self.emg_file', 'csv.unix_dialect'], {'quoting': 'csv.QUOTE_MINIMAL'}), '(self.emg_file, csv.unix_dialect, quoting=csv.QUOTE_MINIMAL)\n', (18393, 18453), False, 'import csv\n'), ((23075, 23086), 'time.time', 'time.time', ([], {}), '()\n', (23084, 23086), False, 'import time\n'), ((2104, 2128), 'pygame.Color', 'pygame.Color', (['"""darkgrey"""'], {}), "('darkgrey')\n", (2116, 2128), False, 'import pygame\n'), ((3113, 3142), 'pygame.Color', 'pygame.Color', (['"""darkslategrey"""'], {}), "('darkslategrey')\n", (3125, 3142), False, 'import pygame\n'), ((5230, 5251), 'pygame.Color', 'pygame.Color', (['"""green"""'], {}), "('green')\n", (5242, 5251), False, 'import pygame\n'), ((5276, 5295), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (5288, 5295), False, 'import pygame\n'), ((11403, 11421), 'numpy.array', 'np.array', (['rms_data'], {}), '(rms_data)\n', (11411, 11421), True, 'import numpy as np\n'), ((12087, 12105), 'numpy.array', 'np.array', (['rms_data'], {}), '(rms_data)\n', (12095, 12105), True, 'import numpy as np\n'), ((17510, 17535), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (17525, 17535), False, 'import os\n'), ((23868, 23886), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (23884, 23886), False, 'import pygame\n'), ((2379, 2403), 'pygame.Color', 'pygame.Color', (['"""darkgrey"""'], {}), "('darkgrey')\n", (2391, 2403), False, 'import pygame\n'), ((2880, 2901), 'pygame.Color', 'pygame.Color', (['"""green"""'], {}), "('green')\n", (2892, 2901), False, 'import pygame\n'), ((3479, 3499), 'pygame.Color', 'pygame.Color', (['"""blue"""'], {}), "('blue')\n", (3491, 3499), False, 'import pygame\n'), ((3813, 3834), 'pygame.Color', 'pygame.Color', (['"""green"""'], {}), "('green')\n", (3825, 3834), False, 'import pygame\n'), ((5786, 5805), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (5798, 5805), False, 'import pygame\n'), ((5824, 5845), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (5836, 5845), False, 'import pygame\n'), ((17449, 17479), 'time.strftime', 'time.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (17462, 17479), False, 'import time\n'), ((23733, 23757), 'time.sleep', 'time.sleep', (['(delay - diff)'], {}), '(delay - diff)\n', (23743, 23757), False, 'import time\n'), ((23790, 23801), 'time.time', 'time.time', ([], {}), '()\n', (23799, 23801), False, 'import time\n'), ((2555, 2575), 'pygame.Color', 'pygame.Color', (['"""blue"""'], {}), "('blue')\n", (2567, 2575), False, 'import pygame\n'), ((12550, 12572), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (12566, 12572), False, 'import os\n'), ((23673, 23684), 'time.time', 'time.time', ([], {}), '()\n', (23682, 23684), False, 'import time\n')]
|
import numpy as np
from preprocess import Vectorizer
from flask import render_template, make_response
from google.oauth2.id_token import verify_oauth2_token
from google.auth.transport.requests import Request
from google.cloud import firestore
from os.path import join, abspath, dirname
from random import randint
from pickle import load
from scipy.sparse import load_npz
database = firestore.Client()
hedgy_path = dirname(abspath(__file__))
with open(join(hedgy_path, 'chapters.p'), 'rb') as chapters_f, open(join(hedgy_path, 'vectorizer.p'), 'rb') as vectorizer_f:
chapters = load(chapters_f)
vectorizer = load(vectorizer_f)
tfidf_matrix = load_npz(join(hedgy_path, 'tfidf.npz'))
similarity_matrix = np.load(join(hedgy_path, 'similarity.npy'))
def hedgy(request):
ranking, sliced, max_request, seed, token = [], False, 50, None, None
credential, clicks = None, None
if request.method == 'POST':
credential = request.form['credential'] if 'credential' in request.form else None
elif request.cookies.get('__session'):
credential, clicks = request.cookies.get('__session').split('#')
if credential:
try:
token = verify_oauth2_token(credential, Request(), '1080182836213-psdjtgo2u10a1fb6e4sbdfpdlmco5i63.apps.googleusercontent.com')
except:
pass
if token:
user_doc = database.collection('users').document(token['sub'])
if not user_doc.get().exists:
user_doc.set({'email': token['email'], 'given_name': token['given_name'], 'family_name': token['family_name'], 'picture': token['picture'], 'clicks': []})
if clicks:
user_doc.update({'clicks': firestore.ArrayUnion(clicks[:-1].split(','))})
if 'max' in request.args:
max_request = int(request.args.get('max'))
if 'query' in request.args or 'similar' in request.args:
if 'query' in request.args:
query_vector = vectorizer.transform([request.args.get('query')])
similarity_vector = (tfidf_matrix @ query_vector.T).toarray().squeeze()
else:
similarity_vector = similarity_matrix[int(request.args.get('similar'))]
if np.any(similarity_vector):
max_chapters = np.count_nonzero(similarity_vector)
if max_request < max_chapters:
max_chapters = max_request
sliced = True
ranking = np.argsort(similarity_vector)[::-1][:max_chapters].tolist()
elif 'seed' in request.args:
seed = int(request.args.get('seed'))
else:
seed = randint(1, 1000000)
if seed:
np.random.seed(seed)
ranking = np.random.permutation(len(chapters))[:max_request].tolist()
sliced = True
response = make_response(render_template('hedgy.html', chapters=chapters, ranking=ranking, sliced=sliced, max_request=max_request, seed=seed, token=token, args=request.args))
if token:
response.set_cookie('__session', credential + '#', secure=True)
response.headers['Cache-Control'] = 'private'
elif '__session' in request.cookies:
response.set_cookie('__session', '', expires=0)
response.headers['Cache-Control'] = 'private'
return response
|
[
"flask.render_template",
"google.cloud.firestore.Client",
"google.auth.transport.requests.Request",
"pickle.load",
"os.path.join",
"numpy.any",
"numpy.count_nonzero",
"numpy.argsort",
"numpy.random.seed",
"os.path.abspath",
"random.randint"
] |
[((383, 401), 'google.cloud.firestore.Client', 'firestore.Client', ([], {}), '()\n', (399, 401), False, 'from google.cloud import firestore\n'), ((424, 441), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (431, 441), False, 'from os.path import join, abspath, dirname\n'), ((583, 599), 'pickle.load', 'load', (['chapters_f'], {}), '(chapters_f)\n', (587, 599), False, 'from pickle import load\n'), ((617, 635), 'pickle.load', 'load', (['vectorizer_f'], {}), '(vectorizer_f)\n', (621, 635), False, 'from pickle import load\n'), ((660, 689), 'os.path.join', 'join', (['hedgy_path', '"""tfidf.npz"""'], {}), "(hedgy_path, 'tfidf.npz')\n", (664, 689), False, 'from os.path import join, abspath, dirname\n'), ((719, 753), 'os.path.join', 'join', (['hedgy_path', '"""similarity.npy"""'], {}), "(hedgy_path, 'similarity.npy')\n", (723, 753), False, 'from os.path import join, abspath, dirname\n'), ((453, 483), 'os.path.join', 'join', (['hedgy_path', '"""chapters.p"""'], {}), "(hedgy_path, 'chapters.p')\n", (457, 483), False, 'from os.path import join, abspath, dirname\n'), ((511, 543), 'os.path.join', 'join', (['hedgy_path', '"""vectorizer.p"""'], {}), "(hedgy_path, 'vectorizer.p')\n", (515, 543), False, 'from os.path import join, abspath, dirname\n'), ((2621, 2640), 'random.randint', 'randint', (['(1)', '(1000000)'], {}), '(1, 1000000)\n', (2628, 2640), False, 'from random import randint\n'), ((2662, 2682), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2676, 2682), True, 'import numpy as np\n'), ((2812, 2965), 'flask.render_template', 'render_template', (['"""hedgy.html"""'], {'chapters': 'chapters', 'ranking': 'ranking', 'sliced': 'sliced', 'max_request': 'max_request', 'seed': 'seed', 'token': 'token', 'args': 'request.args'}), "('hedgy.html', chapters=chapters, ranking=ranking, sliced=\n sliced, max_request=max_request, seed=seed, token=token, args=request.args)\n", (2827, 2965), False, 'from flask import render_template, make_response\n'), ((2202, 2227), 'numpy.any', 'np.any', (['similarity_vector'], {}), '(similarity_vector)\n', (2208, 2227), True, 'import numpy as np\n'), ((1210, 1219), 'google.auth.transport.requests.Request', 'Request', ([], {}), '()\n', (1217, 1219), False, 'from google.auth.transport.requests import Request\n'), ((2260, 2295), 'numpy.count_nonzero', 'np.count_nonzero', (['similarity_vector'], {}), '(similarity_vector)\n', (2276, 2295), True, 'import numpy as np\n'), ((2450, 2479), 'numpy.argsort', 'np.argsort', (['similarity_vector'], {}), '(similarity_vector)\n', (2460, 2479), True, 'import numpy as np\n')]
|
'''
Filename: predict.py
Python Version: 3.6.5
Project: Neutrophil Identifier
Author: <NAME>
Created date: Sep 5, 2018 4:13 PM
-----
Last Modified: Oct 9, 2018 3:48 PM
Modified By: <NAME>
-----
License: MIT
http://www.opensource.org/licenses/MIT
'''
import os
import sys
import logging
from math import ceil
from keras.models import load_model
import numpy as np
import tables as tb
from paths import Paths
def read_hdf5(hdf5_file, dataset="pred", batch_size=32):
"""
"""
data_img = dataset + "_img"
m_data = hdf5_file.root.__getitem__(data_img).shape[0]
batch_list = list(range(int(ceil(m_data / batch_size))))
while True:
for num in batch_list:
n_start = num * batch_size
n_end = min((num + 1) * batch_size, m_data)
inputs = hdf5_file.root.__getitem__(data_img)[n_start:n_end, ...]
yield inputs
def predict(model_path, hdf5_file_path=None):
"""
"""
try:
BATCH_SIZE = 32
model = load_model(model_path)
default_path = Paths.tiles_80
if hdf5_file_path:
hdf5_file = tb.open_file(hdf5_file_path, mode='r')
else:
hdf5_file = tb.open_file(default_path, mode='r')
m_samples = hdf5_file.root.__getitem__("pred_img").shape[0]
logging.debug(f'# of samples: {m_samples}')
steps = int(ceil(m_samples / BATCH_SIZE))
generator = read_hdf5(hdf5_file, dataset="pred", batch_size=BATCH_SIZE)
preds = model.predict_generator(generator, steps=steps, verbose=1)
logging.info(preds[0:100])
base_name = os.path.basename(model_path).split('.')[0]
save_path = os.path.join(
Paths.data_test, base_name + "_tiles_80_preds.csv")
np.savetxt(save_path, preds, delimiter=',')
except Exception as e:
hdf5_file.close()
logging.debug(e.with_traceback())
finally:
hdf5_file.close()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
predict(sys.argv[1])
|
[
"logging.basicConfig",
"keras.models.load_model",
"logging.debug",
"math.ceil",
"os.path.join",
"tables.open_file",
"os.path.basename",
"numpy.savetxt",
"logging.info"
] |
[((1965, 2005), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1984, 2005), False, 'import logging\n'), ((996, 1018), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (1006, 1018), False, 'from keras.models import load_model\n'), ((1300, 1343), 'logging.debug', 'logging.debug', (['f"""# of samples: {m_samples}"""'], {}), "(f'# of samples: {m_samples}')\n", (1313, 1343), False, 'import logging\n'), ((1557, 1583), 'logging.info', 'logging.info', (['preds[0:100]'], {}), '(preds[0:100])\n', (1569, 1583), False, 'import logging\n'), ((1668, 1732), 'os.path.join', 'os.path.join', (['Paths.data_test', "(base_name + '_tiles_80_preds.csv')"], {}), "(Paths.data_test, base_name + '_tiles_80_preds.csv')\n", (1680, 1732), False, 'import os\n'), ((1754, 1797), 'numpy.savetxt', 'np.savetxt', (['save_path', 'preds'], {'delimiter': '""","""'}), "(save_path, preds, delimiter=',')\n", (1764, 1797), True, 'import numpy as np\n'), ((1108, 1146), 'tables.open_file', 'tb.open_file', (['hdf5_file_path'], {'mode': '"""r"""'}), "(hdf5_file_path, mode='r')\n", (1120, 1146), True, 'import tables as tb\n'), ((1186, 1222), 'tables.open_file', 'tb.open_file', (['default_path'], {'mode': '"""r"""'}), "(default_path, mode='r')\n", (1198, 1222), True, 'import tables as tb\n'), ((1364, 1392), 'math.ceil', 'ceil', (['(m_samples / BATCH_SIZE)'], {}), '(m_samples / BATCH_SIZE)\n', (1368, 1392), False, 'from math import ceil\n'), ((608, 633), 'math.ceil', 'ceil', (['(m_data / batch_size)'], {}), '(m_data / batch_size)\n', (612, 633), False, 'from math import ceil\n'), ((1605, 1633), 'os.path.basename', 'os.path.basename', (['model_path'], {}), '(model_path)\n', (1621, 1633), False, 'import os\n')]
|
import numpy as np
import os
import tensorflow as tf
EPS = 1e-8
def placeholder(dim=None):
return tf.placeholder(dtype=tf.float32, shape=(None,dim) if dim else (None,))
def placeholders(*args):
return [placeholder(dim) for dim in args]
def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):
init_fn = tf.keras.initializers.Orthogonal(1.0)
for h in hidden_sizes[:-1]:
x = tf.layers.dense(x, units=h, activation=activation, kernel_initializer=init_fn)
return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation, kernel_initializer=init_fn)
def get_vars(scope):
return [x for x in tf.global_variables() if scope in x.name]
def count_vars(scope):
v = get_vars(scope)
return sum([np.prod(var.shape.as_list()) for var in v])
def gaussian_likelihood(x, mu, log_std):
pre_sum = -0.5 * (((x-mu)/(tf.exp(log_std)+EPS))**2 + 2*log_std + np.log(2*np.pi))
return tf.reduce_sum(pre_sum, axis=1)
def clip_but_pass_gradient(x, l=-1., u=1.):
clip_up = tf.cast(x > u, tf.float32)
clip_low = tf.cast(x < l, tf.float32)
return x + tf.stop_gradient((u - x)*clip_up + (l - x)*clip_low)
"""
Policies
"""
def gumbel_policy(x, act_dim, hidden_sizes, activation):
# policy network outputs
net = mlp(x, list(hidden_sizes), activation, activation)
logits = tf.layers.dense(net, act_dim, activation='linear')
# action and log action probabilites (log_softmax covers numerical problems)
action_probs = tf.nn.softmax([logits], axis=-1)
log_action_probs = tf.nn.log_softmax([logits], axis=-1)
# policy with no noise
mu = tf.argmax(logits, axis=-1)
# add gumbel noise to action distributions
temperature = tf.convert_to_tensor(1.0) # 0 --> argmax, inf --> uniform
uniform_noise = tf.random_uniform(shape=tf.shape(logits),
minval=np.finfo(np.float32).tiny, # (0,1) range
maxval=1.)
gumbel_noise = -tf.log(-tf.log(uniform_noise))
noisy_logits = logits + gumbel_noise
pi_dist = tf.nn.softmax(noisy_logits / temperature[..., tf.newaxis])
# dont use tf.dist.relaxedCategorical for log_prob, seems to give wrong results
logp_pi = -tf.reduce_sum(-pi_dist * tf.nn.log_softmax(logits, axis=-1), axis=1)
return mu, pi_dist, logp_pi
"""
Actor-Critics
"""
def a_out_mlp_actor_critic(x, a, hidden_sizes=[400,300], activation=tf.nn.relu, policy=gumbel_policy):
act_dim = a.shape.as_list()[-1]
with tf.variable_scope('pi'):
mu, pi_dist, logp_pi = policy(x, act_dim, hidden_sizes, activation)
# vfs
with tf.variable_scope('q1'):
q1 = mlp(x, list(hidden_sizes)+[act_dim], activation, None)
q1_a = tf.reduce_sum(tf.multiply(q1, a), axis=1)
with tf.variable_scope('q2'):
q2 = mlp(x, list(hidden_sizes)+[act_dim], activation, None)
q2_a = tf.reduce_sum(tf.multiply(q2, a), axis=1)
return mu, pi_dist, logp_pi, q1_a, q2_a
def a_in_mlp_actor_critic(x, a, hidden_sizes=[400,300], activation=tf.nn.relu, policy=gumbel_policy):
act_dim = a.shape.as_list()[-1]
with tf.variable_scope('pi'):
mu, pi_dist, logp_pi = policy(x, act_dim, hidden_sizes, activation)
# vfs
with tf.variable_scope('q1'):
q1_a = tf.squeeze(mlp(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)
with tf.variable_scope('q2'):
q2_a = tf.squeeze(mlp(tf.concat([x,a], axis=-1), list(hidden_sizes)+[1], activation, None), axis=1)
return mu, pi_dist, logp_pi, q1_a, q2_a
|
[
"tensorflow.shape",
"tensorflow.reduce_sum",
"numpy.log",
"tensorflow.multiply",
"tensorflow.nn.softmax",
"tensorflow.keras.initializers.Orthogonal",
"tensorflow.cast",
"tensorflow.log",
"tensorflow.placeholder",
"tensorflow.concat",
"tensorflow.convert_to_tensor",
"tensorflow.variable_scope",
"tensorflow.global_variables",
"tensorflow.nn.log_softmax",
"numpy.finfo",
"tensorflow.layers.dense",
"tensorflow.argmax",
"tensorflow.stop_gradient",
"tensorflow.exp"
] |
[((104, 175), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '((None, dim) if dim else (None,))'}), '(dtype=tf.float32, shape=(None, dim) if dim else (None,))\n', (118, 175), True, 'import tensorflow as tf\n'), ((338, 375), 'tensorflow.keras.initializers.Orthogonal', 'tf.keras.initializers.Orthogonal', (['(1.0)'], {}), '(1.0)\n', (370, 375), True, 'import tensorflow as tf\n'), ((510, 614), 'tensorflow.layers.dense', 'tf.layers.dense', (['x'], {'units': 'hidden_sizes[-1]', 'activation': 'output_activation', 'kernel_initializer': 'init_fn'}), '(x, units=hidden_sizes[-1], activation=output_activation,\n kernel_initializer=init_fn)\n', (525, 614), True, 'import tensorflow as tf\n'), ((946, 976), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['pre_sum'], {'axis': '(1)'}), '(pre_sum, axis=1)\n', (959, 976), True, 'import tensorflow as tf\n'), ((1036, 1062), 'tensorflow.cast', 'tf.cast', (['(x > u)', 'tf.float32'], {}), '(x > u, tf.float32)\n', (1043, 1062), True, 'import tensorflow as tf\n'), ((1078, 1104), 'tensorflow.cast', 'tf.cast', (['(x < l)', 'tf.float32'], {}), '(x < l, tf.float32)\n', (1085, 1104), True, 'import tensorflow as tf\n'), ((1353, 1403), 'tensorflow.layers.dense', 'tf.layers.dense', (['net', 'act_dim'], {'activation': '"""linear"""'}), "(net, act_dim, activation='linear')\n", (1368, 1403), True, 'import tensorflow as tf\n'), ((1505, 1537), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['[logits]'], {'axis': '(-1)'}), '([logits], axis=-1)\n', (1518, 1537), True, 'import tensorflow as tf\n'), ((1561, 1597), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['[logits]'], {'axis': '(-1)'}), '([logits], axis=-1)\n', (1578, 1597), True, 'import tensorflow as tf\n'), ((1635, 1661), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (1644, 1661), True, 'import tensorflow as tf\n'), ((1728, 1753), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['(1.0)'], {}), '(1.0)\n', (1748, 1753), True, 'import tensorflow as tf\n'), ((2091, 2149), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['(noisy_logits / temperature[..., tf.newaxis])'], {}), '(noisy_logits / temperature[..., tf.newaxis])\n', (2104, 2149), True, 'import tensorflow as tf\n'), ((420, 498), 'tensorflow.layers.dense', 'tf.layers.dense', (['x'], {'units': 'h', 'activation': 'activation', 'kernel_initializer': 'init_fn'}), '(x, units=h, activation=activation, kernel_initializer=init_fn)\n', (435, 498), True, 'import tensorflow as tf\n'), ((1120, 1176), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['((u - x) * clip_up + (l - x) * clip_low)'], {}), '((u - x) * clip_up + (l - x) * clip_low)\n', (1136, 1176), True, 'import tensorflow as tf\n'), ((2525, 2548), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pi"""'], {}), "('pi')\n", (2542, 2548), True, 'import tensorflow as tf\n'), ((2646, 2669), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q1"""'], {}), "('q1')\n", (2663, 2669), True, 'import tensorflow as tf\n'), ((2809, 2832), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q2"""'], {}), "('q2')\n", (2826, 2832), True, 'import tensorflow as tf\n'), ((3159, 3182), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""pi"""'], {}), "('pi')\n", (3176, 3182), True, 'import tensorflow as tf\n'), ((3280, 3303), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q1"""'], {}), "('q1')\n", (3297, 3303), True, 'import tensorflow as tf\n'), ((3424, 3447), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""q2"""'], {}), "('q2')\n", (3441, 3447), True, 'import tensorflow as tf\n'), ((656, 677), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (675, 677), True, 'import tensorflow as tf\n'), ((918, 935), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (924, 935), True, 'import numpy as np\n'), ((1830, 1846), 'tensorflow.shape', 'tf.shape', (['logits'], {}), '(logits)\n', (1838, 1846), True, 'import tensorflow as tf\n'), ((2772, 2790), 'tensorflow.multiply', 'tf.multiply', (['q1', 'a'], {}), '(q1, a)\n', (2783, 2790), True, 'import tensorflow as tf\n'), ((2935, 2953), 'tensorflow.multiply', 'tf.multiply', (['q2', 'a'], {}), '(q2, a)\n', (2946, 2953), True, 'import tensorflow as tf\n'), ((1893, 1913), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (1901, 1913), True, 'import numpy as np\n'), ((2012, 2033), 'tensorflow.log', 'tf.log', (['uniform_noise'], {}), '(uniform_noise)\n', (2018, 2033), True, 'import tensorflow as tf\n'), ((2275, 2309), 'tensorflow.nn.log_softmax', 'tf.nn.log_softmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (2292, 2309), True, 'import tensorflow as tf\n'), ((3336, 3362), 'tensorflow.concat', 'tf.concat', (['[x, a]'], {'axis': '(-1)'}), '([x, a], axis=-1)\n', (3345, 3362), True, 'import tensorflow as tf\n'), ((3480, 3506), 'tensorflow.concat', 'tf.concat', (['[x, a]'], {'axis': '(-1)'}), '([x, a], axis=-1)\n', (3489, 3506), True, 'import tensorflow as tf\n'), ((879, 894), 'tensorflow.exp', 'tf.exp', (['log_std'], {}), '(log_std)\n', (885, 894), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""Polynomial techniques for fitting baselines to experimental data.
Created on Feb. 27, 2021
@author: <NAME>
The function penalized_poly was adapted from MATLAB code from
https://www.mathworks.com/matlabcentral/fileexchange/27429-background-correction
(accessed March 18, 2021), which was licensed under the BSD-2-clause below.
License: 2-clause BSD
Copyright (c) 2012, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The function loess was adapted from code from https://gist.github.com/agramfort/850437
(accessed March 25, 2021), which was licensed under the BSD-3-clause below.
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
Copyright (c) 2015, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from math import ceil
import warnings
import numpy as np
from . import _weighting
from ._algorithm_setup import _get_vander, _setup_polynomial
from ._compat import jit, prange
from .utils import (
_MIN_FLOAT, ParameterWarning, _convert_coef, _interp_inplace, _inverted_sort,
relative_difference
)
def poly(data, x_data=None, poly_order=2, weights=None, return_coef=False):
"""
Computes a polynomial that fits the baseline of the data.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
poly_order : int, optional
The polynomial order for fitting the baseline. Default is 2.
weights : array-like, shape (N,), optional
The weighting array. If None (default), then will be an array with
size equal to N and all values set to 1.
return_coef : bool, optional
If True, will convert the polynomial coefficients for the fit baseline to
a form that fits the input x_data and return them in the params dictionary.
Default is False, since the conversion takes time.
Returns
-------
baseline : numpy.ndarray, shape (N,)
The calculated baseline.
params : dict
A dictionary with the following items:
* 'weights': numpy.ndarray, shape (N,)
The weight array used for fitting the data.
* 'coef': numpy.ndarray, shape (poly_order,)
Only if `return_coef` is True. The array of polynomial parameters
for the baseline, in increasing order. Can be used to create a
polynomial using numpy.polynomial.polynomial.Polynomial().
Notes
-----
To only fit regions without peaks, supply a weight array with zero values
at the indices where peaks are located.
"""
y, x, weight_array, original_domain = _setup_polynomial(data, x_data, weights)
fit_polynomial = np.polynomial.Polynomial.fit(x, y, poly_order, w=np.sqrt(weight_array))
baseline = fit_polynomial(x)
params = {'weights': weight_array}
if return_coef:
params['coef'] = fit_polynomial.convert(window=original_domain).coef
return baseline, params
def modpoly(data, x_data=None, poly_order=2, tol=1e-3, max_iter=250, weights=None,
use_original=False, mask_initial_peaks=False, return_coef=False):
"""
The modified polynomial (ModPoly) baseline algorithm.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
poly_order : int, optional
The polynomial order for fitting the baseline. Default is 2.
tol : float, optional
The exit criteria. Default is 1e-3.
max_iter : int, optional
The maximum number of iterations. Default is 250.
weights : array-like, shape (N,), optional
The weighting array. If None (default), then will be an array with
size equal to N and all values set to 1.
use_original : bool, optional
If False (default), will compare the baseline of each iteration with
the y-values of that iteration [1]_ when choosing minimum values. If True,
will compare the baseline with the original y-values given by `data` [2]_.
mask_initial_peaks : bool, optional
If True, will mask any data where the initial baseline fit + the standard
deviation of the residual is less than measured data [3]_. Default is False.
return_coef : bool, optional
If True, will convert the polynomial coefficients for the fit baseline to
a form that fits the input x_data and return them in the params dictionary.
Default is False, since the conversion takes time.
Returns
-------
baseline : numpy.ndarray, shape (N,)
The calculated baseline.
params : dict
A dictionary with the following items:
* 'weights': numpy.ndarray, shape (N,)
The weight array used for fitting the data.
* 'tol_history': numpy.ndarray
An array containing the calculated tolerance values for
each iteration. The length of the array is the number of iterations
completed. If the last value in the array is greater than the input
`tol` value, then the function did not converge.
* 'coef': numpy.ndarray, shape (poly_order + 1,)
Only if `return_coef` is True. The array of polynomial parameters
for the baseline, in increasing order. Can be used to create a
polynomial using numpy.polynomial.polynomial.Polynomial().
Notes
-----
Algorithm originally developed in [2]_ and then slightly modified in [1]_.
References
----------
.. [1] <NAME>., et al. Baseline correction by improved iterative polynomial
fitting with automatic threshold. Chemometrics and Intelligent
Laboratory Systems, 2006, 82, 59-65.
.. [2] <NAME>., et al. Automated method for subtraction of fluorescence
from biological raman spectra. Applied Spectroscopy, 2003, 57(11),
1363-1367.
.. [3] <NAME>., et al. Automated Autofluorescence Background Subtraction
Algorithm for Biomedical Raman Spectroscopy, Applied Spectroscopy,
2007, 61(11), 1225-1232.
"""
y, x, weight_array, original_domain, vander, pseudo_inverse = _setup_polynomial(
data, x_data, weights, poly_order, True, True, True
)
sqrt_w = np.sqrt(weight_array)
if use_original:
y0 = y
coef = np.dot(pseudo_inverse, sqrt_w * y)
baseline = np.dot(vander, coef)
if mask_initial_peaks:
# use baseline + deviation since without deviation, half of y should be above baseline
weight_array[baseline + np.std(y - baseline) < y] = 0
sqrt_w = np.sqrt(weight_array)
vander, pseudo_inverse = _get_vander(x, poly_order, sqrt_w)
tol_history = np.empty(max_iter)
for i in range(max_iter):
baseline_old = baseline
y = np.minimum(y0 if use_original else y, baseline)
coef = np.dot(pseudo_inverse, sqrt_w * y)
baseline = np.dot(vander, coef)
calc_difference = relative_difference(baseline_old, baseline)
tol_history[i] = calc_difference
if calc_difference < tol:
break
params = {'weights': weight_array, 'tol_history': tol_history[:i + 1]}
if return_coef:
params['coef'] = _convert_coef(coef, original_domain)
return baseline, params
def imodpoly(data, x_data=None, poly_order=2, tol=1e-3, max_iter=250, weights=None,
use_original=False, mask_initial_peaks=True, return_coef=False, num_std=1):
"""
The improved modofied polynomial (IModPoly) baseline algorithm.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
poly_order : int, optional
The polynomial order for fitting the baseline. Default is 2.
tol : float, optional
The exit criteria. Default is 1e-3.
max_iter : int, optional
The maximum number of iterations. Default is 250.
weights : array-like, shape (N,), optional
The weighting array. If None (default), then will be an array with
size equal to N and all values set to 1.
use_original : bool, optional
If False (default), will compare the baseline of each iteration with
the y-values of that iteration [4]_ when choosing minimum values. If True,
will compare the baseline with the original y-values given by `data` [5]_.
mask_initial_peaks : bool, optional
If True (default), will mask any data where the initial baseline fit +
the standard deviation of the residual is less than measured data [6]_.
return_coef : bool, optional
If True, will convert the polynomial coefficients for the fit baseline to
a form that fits the input x_data and return them in the params dictionary.
Default is False, since the conversion takes time.
num_std : float, optional
The number of standard deviations to include when thresholding. Default
is 1.
Returns
-------
baseline : numpy.ndarray, shape (N,)
The calculated baseline.
params : dict
A dictionary with the following items:
* 'weights': numpy.ndarray, shape (N,)
The weight array used for fitting the data.
* 'tol_history': numpy.ndarray
An array containing the calculated tolerance values for
each iteration. The length of the array is the number of iterations
completed. If the last value in the array is greater than the input
`tol` value, then the function did not converge.
* 'coef': numpy.ndarray, shape (poly_order + 1,)
Only if `return_coef` is True. The array of polynomial parameters
for the baseline, in increasing order. Can be used to create a
polynomial using numpy.polynomial.polynomial.Polynomial().
Notes
-----
Algorithm originally developed in [6]_.
References
----------
.. [4] <NAME>., et al. Baseline correction by improved iterative polynomial
fitting with automatic threshold. Chemometrics and Intelligent
Laboratory Systems, 2006, 82, 59-65.
.. [5] <NAME>., et al. Automated method for subtraction of fluorescence
from biological raman spectra. Applied Spectroscopy, 2003, 57(11),
1363-1367.
.. [6] <NAME>., et al. Automated Autofluorescence Background Subtraction
Algorithm for Biomedical Raman Spectroscopy, Applied Spectroscopy,
2007, 61(11), 1225-1232.
"""
y, x, weight_array, original_domain, vander, pseudo_inverse = _setup_polynomial(
data, x_data, weights, poly_order, True, True, True
)
sqrt_w = np.sqrt(weight_array)
if use_original:
y0 = y
coef = np.dot(pseudo_inverse, sqrt_w * y)
baseline = np.dot(vander, coef)
deviation = np.std(y - baseline)
if mask_initial_peaks:
weight_array[baseline + deviation < y] = 0
sqrt_w = np.sqrt(weight_array)
vander, pseudo_inverse = _get_vander(x, poly_order, sqrt_w)
tol_history = np.empty(max_iter)
for i in range(max_iter):
y = np.minimum(y0 if use_original else y, baseline + num_std * deviation)
coef = np.dot(pseudo_inverse, sqrt_w * y)
baseline = np.dot(vander, coef)
new_deviation = np.std(y - baseline)
# use new_deviation as dividing term in relative difference
calc_difference = relative_difference(new_deviation, deviation)
tol_history[i] = calc_difference
if calc_difference < tol:
break
deviation = new_deviation
params = {'weights': weight_array, 'tol_history': tol_history[:i + 1]}
if return_coef:
params['coef'] = _convert_coef(coef, original_domain)
return baseline, params
# adapted from (https://www.mathworks.com/matlabcentral/fileexchange/27429-background-correction);
# see license above
def _huber_loss(residual, threshold=1.0, alpha_factor=0.99, symmetric=True):
"""
The Huber non-quadratic cost function.
Parameters
----------
residual : numpy.ndarray, shape (N,)
The residual array.
threshold : float, optional
Any residual values below the threshold are given quadratic loss.
Default is 1.0.
alpha_factor : float, optional
The scale between 0 and 1 to multiply the cost function's alpha_max
value (see Notes below). Default is 0.99.
symmetric : bool, optional
If True (default), the cost function is symmetric and applies the same
weighting for positive and negative values. If False, will apply weights
asymmetrically so that only positive weights are given the non-quadratic
weigting and negative weights have normal, quadratic weighting.
Returns
-------
weights : numpy.ndarray, shape (N,)
The weight array.
Notes
-----
The returned result is
-residual + alpha_factor * alpha_max * phi'(residual)
where phi'(x) is the derivative of the huber loss function, phi(x).
References
----------
<NAME>., et al. Background removal from spectra by designing and
minimising a non-quadratic cost function. Chemometrics and Intelligent
Laboratory Systems, 2005, 76(2), 121–133.
"""
alpha = alpha_factor * 0.5 # alpha_max for huber is 0.5
if symmetric:
mask = (np.abs(residual) < threshold)
weights = (
mask * residual * (2 * alpha - 1)
+ (~mask) * 2 * alpha * threshold * np.sign(residual)
)
else:
mask = (residual < threshold)
weights = (
mask * residual * (2 * alpha - 1)
+ (~mask) * (2 * alpha * threshold - residual)
)
return weights
# adapted from (https://www.mathworks.com/matlabcentral/fileexchange/27429-background-correction);
# see license above
def _truncated_quadratic_loss(residual, threshold=1.0, alpha_factor=0.99, symmetric=True):
"""
The Truncated-Quadratic non-quadratic cost function.
Parameters
----------
residual : numpy.ndarray, shape (N,)
The residual array.
threshold : float, optional
Any residual values below the threshold are given quadratic loss.
Default is 1.0.
alpha_factor : float, optional
The scale between 0 and 1 to multiply the cost function's alpha_max
value (see Notes below). Default is 0.99.
symmetric : bool, optional
If True (default), the cost function is symmetric and applies the same
weighting for positive and negative values. If False, will apply weights
asymmetrically so that only positive weights are given the non-quadratic
weigting and negative weights have normal, quadratic weighting.
Returns
-------
weights : numpy.ndarray, shape (N,)
The weight array.
Notes
-----
The returned result is
-residual + alpha_factor * alpha_max * phi'(residual)
where phi'(x) is the derivative of the truncated quadratic function, phi(x).
References
----------
Mazet, V., et al. Background removal from spectra by designing and
minimising a non-quadratic cost function. Chemometrics and Intelligent
Laboratory Systems, 2005, 76(2), 121–133.
"""
alpha = alpha_factor * 0.5 # alpha_max for truncated quadratic is 0.5
if symmetric:
mask = (np.abs(residual) < threshold)
else:
mask = (residual < threshold)
return mask * residual * (2 * alpha - 1) - (~mask) * residual
def _indec_loss(residual, threshold=1.0, alpha_factor=0.99, symmetric=True):
"""
The Indec non-quadratic cost function.
Parameters
----------
residual : numpy.ndarray, shape (N,)
The residual array.
threshold : float, optional
Any residual values below the threshold are given quadratic loss.
Default is 1.0.
alpha_factor : float, optional
The scale between 0 and 1 to multiply the cost function's alpha_max
value (see Notes below). Default is 0.99.
symmetric : bool, optional
If True (default), the cost function is symmetric and applies the same
weighting for positive and negative values. If False, will apply weights
asymmetrically so that only positive weights are given the non-quadratic
weigting and negative weights have normal, quadratic weighting.
Returns
-------
weights : numpy.ndarray, shape (N,)
The weight array.
Notes
-----
The returned result is
-residual + alpha_factor * alpha_max * phi'(residual)
where phi'(x) is the derivative of the Indec function, phi(x).
References
----------
<NAME>., et al. Goldindec: A Novel Algorithm for Raman Spectrum Baseline
Correction. Applied Spectroscopy, 2015, 69(7), 834-842.
<NAME>., et al. Background removal from spectra by designing and
minimising a non-quadratic cost function. Chemometrics and Intelligent
Laboratory Systems, 2005, 76(2), 121–133.
"""
alpha = alpha_factor * 0.5 # alpha_max for indec is 0.5
if symmetric:
mask = (np.abs(residual) < threshold)
multiple = np.sign(residual)
else:
mask = (residual < threshold)
# multiple=1 is same as sign(residual) since residual is always > 0
# for asymmetric case, but this allows not doing the sign calculation
multiple = 1
weights = (
mask * residual * (2 * alpha - 1)
- (~mask) * (
residual + alpha * multiple * threshold**3 / np.maximum(2 * residual**2, _MIN_FLOAT)
)
)
return weights
def _identify_loss_method(loss_method):
"""
Identifies the symmetry for the given loss method.
Parameters
----------
loss_method : str
The loss method to use. Should have the symmetry identifier as
the prefix.
Returns
-------
symmetric : bool
True if `loss_method` had 's_' or 'symmetric_' as the prefix, else False.
str
The input `loss_method` value without the first section that indicated
the symmetry.
Raises
------
ValueError
Raised if the loss method does not have the correct form.
"""
prefix, *split_method = loss_method.lower().split('_')
if prefix not in ('a', 's', 'asymmetric', 'symmetric') or not split_method:
raise ValueError('must specify loss function symmetry by appending "a_" or "s_"')
if prefix in ('a', 'asymmetric'):
symmetric = False
else:
symmetric = True
return symmetric, '_'.join(split_method)
# adapted from (https://www.mathworks.com/matlabcentral/fileexchange/27429-background-correction);
# see license above
def penalized_poly(data, x_data=None, poly_order=2, tol=1e-3, max_iter=250,
weights=None, cost_function='asymmetric_truncated_quadratic',
threshold=None, alpha_factor=0.99, return_coef=False):
"""
Fits a polynomial baseline using a non-quadratic cost function.
The non-quadratic cost functions penalize residuals with larger values,
giving a more robust fit compared to normal least-squares.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
poly_order : int, optional
The polynomial order for fitting the baseline. Default is 2.
tol : float, optional
The exit criteria. Default is 1e-3.
max_iter : int, optional
The maximum number of iterations. Default is 250.
weights : array-like, shape (N,), optional
The weighting array. If None (default), then will be an array with
size equal to N and all values set to 1.
cost_function : str, optional
The non-quadratic cost function to minimize. Must indicate symmetry of the
method by appending 'a' or 'asymmetric' for asymmetric loss, and 's' or
'symmetric' for symmetric loss. Default is 'asymmetric_truncated_quadratic'.
Available methods, and their associated reference, are:
* 'asymmetric_truncated_quadratic'[7]_
* 'symmetric_truncated_quadratic'[7]_
* 'asymmetric_huber'[7]_
* 'symmetric_huber'[7]_
* 'asymmetric_indec'[8]_
* 'symmetric_indec'[8]_
threshold : float, optional
The threshold value for the loss method, where the function goes from
quadratic loss (such as used for least squares) to non-quadratic. For
symmetric loss methods, residual values with absolute value less than
threshold will have quadratic loss. For asymmetric loss methods, residual
values less than the threshold will have quadratic loss. Default is None,
which sets `threshold` to one-tenth of the standard deviation of the input
data.
alpha_factor : float, optional
A value between 0 and 1 that controls the value of the penalty. Default is
0.99. Typically should not need to change this value.
return_coef : bool, optional
If True, will convert the polynomial coefficients for the fit baseline to
a form that fits the input x_data and return them in the params dictionary.
Default is False, since the conversion takes time.
Returns
-------
baseline : numpy.ndarray, shape (N,)
The calculated baseline.
params : dict
A dictionary with the following items:
* 'weights': numpy.ndarray, shape (N,)
The weight array used for fitting the data.
* 'tol_history': numpy.ndarray
An array containing the calculated tolerance values for
each iteration. The length of the array is the number of iterations
completed. If the last value in the array is greater than the input
`tol` value, then the function did not converge.
* 'coef': numpy.ndarray, shape (poly_order + 1,)
Only if `return_coef` is True. The array of polynomial parameters
for the baseline, in increasing order. Can be used to create a
polynomial using numpy.polynomial.polynomial.Polynomial().
Raises
------
ValueError
Raised if `alpha_factor` is not between 0 and 1.
Notes
-----
In baseline literature, this procedure is sometimes called "backcor".
References
----------
.. [7] <NAME>., et al. Background removal from spectra by designing and
minimising a non-quadratic cost function. Chemometrics and Intelligent
Laboratory Systems, 2005, 76(2), 121–133.
.. [8] <NAME>., et al. Goldindec: A Novel Algorithm for Raman Spectrum Baseline
Correction. Applied Spectroscopy, 2015, 69(7), 834-842.
"""
if not 0 < alpha_factor <= 1:
raise ValueError('alpha_factor must be between 0 and 1')
symmetric_loss, method = _identify_loss_method(cost_function)
loss_function = {
'huber': _huber_loss,
'truncated_quadratic': _truncated_quadratic_loss,
'indec': _indec_loss
}[method]
y, x, weight_array, original_domain, vander, pseudo_inverse = _setup_polynomial(
data, x_data, weights, poly_order, return_vander=True, return_pinv=True
)
if threshold is None:
threshold = np.std(y) / 10
loss_kwargs = {
'threshold': threshold, 'alpha_factor': alpha_factor, 'symmetric': symmetric_loss
}
sqrt_w = np.sqrt(weight_array)
y = sqrt_w * y
coef = np.dot(pseudo_inverse, y)
baseline = np.dot(vander, coef)
tol_history = np.empty(max_iter)
for i in range(max_iter):
baseline_old = baseline
coef = np.dot(pseudo_inverse, y + loss_function(y - sqrt_w * baseline, **loss_kwargs))
baseline = np.dot(vander, coef)
calc_difference = relative_difference(baseline_old, baseline)
tol_history[i] = calc_difference
if calc_difference < tol:
break
params = {'weights': weight_array, 'tol_history': tol_history[:i + 1]}
if return_coef:
params['coef'] = _convert_coef(coef, original_domain)
return baseline, params
def _tukey_square(residual, scale=3, symmetric=False):
"""
The square root of Tukey's bisquare function.
Parameters
----------
residual : numpy.ndarray, shape (N,)
The residual array of the fit.
scale : float, optional
A scale factor applied to the weighted residuals to control the
robustness of the fit. Default is 3.0.
symmetric : bool, optional
If False (default), will apply weighting asymmetrically, with residuals
< 0 having full weight. If True, will apply weighting the same for both
positive and negative residuals, which is regular LOESS.
Returns
-------
weights : numpy.ndarray, shape (N,)
The weighting array.
Notes
-----
The function is technically sqrt(Tukey's bisquare) since the outer
power of 2 is not performed. This is intentional, so that the square
root for weighting in least squares does not need to be done, speeding
up the calculation.
References
----------
<NAME>., et al., Baseline subtraction using robust local regression
estimation. J. Quantitative Spectroscopy and Radiative Transfer, 2001, 68,
179-193.
"""
if symmetric:
inner = residual / scale
weights = np.maximum(0, 1 - inner * inner)
else:
weights = np.ones_like(residual)
mask = residual > 0
inner = residual[mask] / scale
weights[mask] = np.maximum(0, 1 - inner * inner)
return weights
def _median_absolute_value(values):
"""
Computes the median absolute value (MAV) of an array.
Parameters
----------
values : array-like
The array of values to use for the calculation.
Returns
-------
float
The scaled median absolute value for the input array.
Notes
-----
The 1/0.6744897501960817 scale factor is to make the result comparable to the
standard deviation of a Gaussian distribution. The divisor is obtained by
calculating the value at which the cumulative distribution function of a Gaussian
distribution is 0.75 (see https://en.wikipedia.org/wiki/Median_absolute_deviation),
which can be obtained by::
from scipy.special import ndtri
ndtri(0.75) # equals 0.6744897501960817
To calculate the median absolute difference (MAD) using this function, simply do::
_median_absolute_value(values - np.median(values))
References
----------
<NAME>., et al., Baseline subtraction using robust local regression
estimation. J. Quantitative Spectroscopy and Radiative Transfer, 2001, 68,
179-193.
https://en.wikipedia.org/wiki/Median_absolute_deviation.
"""
return np.median(np.abs(values)) / 0.6744897501960817
@jit(nopython=True, cache=True)
def _loess_solver(AT, b):
"""
Solves the equation `A x = b` given `A.T` and `b`.
Parameters
----------
AT : numpy.ndarray, shape (M, N)
The transposed `A` matrix.
b : numpy.ndarray, shape (N,)
The `b` array.
Returns
-------
numpy.ndarray, shape (N,)
The solution to the normal equation.
Notes
-----
Uses np.linalg.solve (which uses LU decomposition) rather than np.linalg.lstsq
(which uses SVD) since solve is ~30-60% faster. np.linalg.solve requires ``A.T * A``,
which squares the condition number of ``A``, but on tested datasets the relative
difference when using solve vs lstsq (using np.allclose) is ~1e-10 to 1e-13 for
poly_orders of 1 or 2, which seems fine; the relative differences increase to
~1e-6 to 1e-9 for a poly_order of 3, and ~1e-4 to 1e-6 for a poly_order of 4, but
loess should use a poly_order <= 2, so that should not be a problem.
"""
return np.linalg.solve(AT.dot(AT.T), AT.dot(b))
@jit(nopython=True, cache=True, parallel=True)
def _fill_skips(x, baseline, skips):
"""
Fills in the skipped baseline points using linear interpolation.
Parameters
----------
x : numpy.ndarray
The array of x-values.
baseline : numpy.ndarray
The array of baseline values with all fit points allocated. All skipped points
will be filled in using interpolation.
skips : numpy.ndarray, shape (G, 2)
The array of left and right indices that define the windows for interpolation,
with length G being the number of interpolation segments. Indices are set such
that `baseline[skips[i][0]:skips[i][1]]` will have fitted values at the first
and last indices and all other values (the slice [1:-1]) will be calculated by
interpolation.
Notes
-----
All changes to `baseline` are done inplace.
"""
for i in prange(skips.shape[0]):
window = skips[i]
left = window[0]
right = window[1]
_interp_inplace(x[left:right], baseline[left:right], baseline[left], baseline[right - 1])
# adapted from (https://gist.github.com/agramfort/850437); see license above
@jit(nopython=True, cache=True, parallel=True)
def _loess_low_memory(x, y, weights, coefs, vander, num_x, windows, fits):
"""
A version of loess that uses near constant memory.
The distance-weighted kernel for each x-value is computed each loop, rather
than cached, so memory usage is low but the calculation is slightly slower.
Parameters
----------
x : numpy.ndarray, shape (N,)
The x-values of the measured data, with N data points.
y : numpy.ndarray, shape (N,)
The y-values of the measured data, with N points.
weights : numpy.ndarray, shape (N,)
The array of weights.
coefs : numpy.ndarray, shape (N, poly_order + 1)
The array of polynomial coefficients (with polynomial order poly_order),
for each value in `x`.
vander : numpy.ndarray, shape (N, poly_order + 1)
The Vandermonde matrix for the `x` array.
num_x : int
The number of data points in `x`, also known as N.
windows : numpy.ndarray, shape (F, 2)
An array of left and right indices that define the fitting window for each fit
x-value. The length is F, which is the total number of fit points. If `fit_dx`
is <= 0, F is equal to N, the total number of x-values.
fits : numpy.ndarray, shape (F,)
The array of indices indicating which x-values to fit.
Notes
-----
The coefficient array, `coefs`, is modified inplace.
"""
baseline = np.empty(num_x)
y_fit = y * weights
vander_fit = vander.T * weights
for idx in prange(fits.shape[0]):
i = fits[idx]
window = windows[idx]
left = window[0]
right = window[1]
difference = np.abs(x[left:right] - x[i])
difference = difference / max(difference[0], difference[-1])
difference = difference * difference * difference
difference = 1 - difference
kernel = np.sqrt(difference * difference * difference)
coef = _loess_solver(
kernel * vander_fit[:, left:right], kernel * y_fit[left:right]
)
baseline[i] = vander[i].dot(coef)
coefs[i] = coef
return baseline
# adapted from (https://gist.github.com/agramfort/850437); see license above
@jit(nopython=True, cache=True, parallel=True)
def _loess_first_loop(x, y, weights, coefs, vander, total_points, num_x, windows, fits):
"""
The initial fit for loess that also caches the window values for each x-value.
Parameters
----------
x : numpy.ndarray, shape (N,)
The x-values of the measured data, with N data points.
y : numpy.ndarray, shape (N,)
The y-values of the measured data, with N points.
weights : numpy.ndarray, shape (N,)
The array of weights.
coefs : numpy.ndarray, shape (N, poly_order + 1)
The array of polynomial coefficients (with polynomial order poly_order),
for each value in `x`.
vander : numpy.ndarray, shape (N, poly_order + 1)
The Vandermonde matrix for the `x` array.
total_points : int
The number of points to include when fitting each x-value.
num_x : int
The number of data points in `x`, also known as N.
windows : numpy.ndarray, shape (F, 2)
An array of left and right indices that define the fitting window for each fit
x-value. The length is F, which is the total number of fit points. If `fit_dx`
is <= 0, F is equal to N, the total number of x-values.
fits : numpy.ndarray, shape (F,)
The array of indices indicating which x-values to fit.
Returns
-------
kernels : numpy.ndarray, shape (num_x, total_points)
The array containing the distance-weighted kernel for each x-value.
Notes
-----
The coefficient array, `coefs`, is modified inplace.
"""
kernels = np.empty((num_x, total_points))
baseline = np.empty(num_x)
y_fit = y * weights
vander_fit = vander.T * weights
for idx in prange(fits.shape[0]):
i = fits[idx]
window = windows[idx]
left = window[0]
right = window[1]
difference = np.abs(x[left:right] - x[i])
difference = difference / max(difference[0], difference[-1])
difference = difference * difference * difference
difference = 1 - difference
kernel = np.sqrt(difference * difference * difference)
kernels[i] = kernel
coef = _loess_solver(
kernel * vander_fit[:, left:right], kernel * y_fit[left:right]
)
baseline[i] = vander[i].dot(coef)
coefs[i] = coef
return kernels, baseline
@jit(nopython=True, cache=True, parallel=True)
def _loess_nonfirst_loops(y, weights, coefs, vander, kernels, windows, num_x, fits):
"""
The loess fit to use after the first loop that uses the cached window values.
Parameters
----------
y : numpy.ndarray, shape (N,)
The y-values of the measured data, with N points.
weights : numpy.ndarray, shape (N,)
The array of weights.
coefs : numpy.ndarray, shape (N, poly_order + 1)
The array of polynomial coefficients (with polynomial order poly_order),
for each value in `x`.
vander : numpy.ndarray, shape (N, poly_order + 1)
The Vandermonde matrix for the `x` array.
kernels : numpy.ndarray, shape (N, total_points)
The array containing the distance-weighted kernel for each x-value. Each
kernel has a length of total_points.
windows : numpy.ndarray, shape (F, 2)
An array of left and right indices that define the fitting window for each fit
x-value. The length is F, which is the total number of fit points. If `fit_dx`
is <= 0, F is equal to N, the total number of x-values.
num_x : int
The total number of values, N.
fits : numpy.ndarray, shape (F,)
The array of indices indicating which x-values to fit.
Notes
-----
The coefficient array, `coefs`, is modified inplace.
"""
baseline = np.empty(num_x)
y_fit = y * weights
vander_fit = vander.T * weights
for idx in prange(fits.shape[0]):
i = fits[idx]
window = windows[idx]
left = window[0]
right = window[1]
kernel = kernels[i]
coef = _loess_solver(
kernel * vander_fit[:, left:right], kernel * y_fit[left:right]
)
baseline[i] = vander[i].dot(coef)
coefs[i] = coef
return baseline
@jit(nopython=True, cache=True)
def _determine_fits(x, num_x, total_points, delta):
"""
Determines the x-values to fit and the left and right indices for each fit x-value.
The windows are set before fitting so that fitting can be done in parallel
when numba is installed, since the left and right indices would otherwise
need to be determined in order. Similarly, determining which x-values to fit would
not be able to be done in parallel since it requires knowledge of the last x-value
fit.
Parameters
----------
x : numpy.ndarray, shape (N,)
The array of x-values.
num_x : int
The total number of x-values, N.
total_points : int
The number of values to include in each fitting window.
delta : float
If `delta` is > 0, will skip all but the last x-value in the range x_last + `delta`,
where x_last is the last x-value to be fit. Fits all x-values if `delta` is <= 0.
Returns
-------
windows : numpy.ndarray, shape (F, 2)
An array of left and right indices that define the fitting window for each fit
x-value. The length is F, which is the total number of fit points. If `fit_dx`
is <= 0, F is equal to N, the total number of x-values. Indices are set such
that the number of values in `x[windows[i][0]:windows[i][1]] is equal to
`total_points`.
fits : numpy.ndarray, shape (F,)
The array of indices indicating which x-values to fit.
skips : numpy.ndarray, shape (G, 2)
The array of left and right indices that define the windows for interpolation,
with length G being the number of interpolation segments. G is 0 if `fit_dx` is
<= 0. Indices are set such that `baseline[skips[i][0]:skips[i][1]]` will have
fitted values at the first and last indices and all other values (the slice [1:-1])
will be calculated by interpolation.
Notes
-----
The dtype `np.intp` is used for `fits`, `skips`, and `windows` to be consistent with
numpy since numpy internally uses that type when referring to indices.
"""
# faster to allocate array and return only filled in sections
# rather than constanly appending to a list
if delta > 0:
check_fits = True
fits = np.empty(num_x, dtype=np.intp)
fits[0] = 0 # always fit first item
skips = np.empty((num_x, 2), dtype=np.intp)
else:
# TODO maybe use another function when fitting all points in order
# to skip the if check_fits check for every x-value; does it affect
# calculation time that much?
check_fits = False
# TODO once numba minimum version is >= 0.47, can use dtype kwarg in np.arange
fits = np.arange(num_x).astype(np.intp)
# numba cannot compile in nopython mode when directly creating
# np.array([], dtype=np.intp), so work-around by creating np.array([[0, 0]])
# and then index with [:total_skips], which becomes np.array([])
# since total_skips is 0 when delta is <= 0.
skips = np.array([[0, 0]], dtype=np.intp)
windows = np.empty((num_x, 2), dtype=np.intp)
windows[0] = (0, total_points)
total_fits = 1
total_skips = 0
skip_start = 0
skip_range = x[0] + delta
left = 0
right = total_points
for i in range(1, num_x - 1):
x_val = x[i]
if check_fits:
# use x[i+1] rather than x[i] since it ensures that the last value within
# the range x_last_fit + delta is used; x[i+1] is also guranteed to be >= x[i]
if x[i + 1] < skip_range:
if not skip_start:
skip_start = i
continue
else:
skip_range = x_val + delta
fits[total_fits] = i
if skip_start:
skips[total_skips] = (skip_start - 1, i + 1)
total_skips += 1
skip_start = 0
while right < num_x and x_val - x[left] > x[right] - x_val:
left += 1
right += 1
window = windows[total_fits]
window[0] = left
window[1] = right
total_fits += 1
if skip_start: # fit second to last x-value
fits[total_fits] = num_x - 2
if x[-1] - x[-2] < x[-2] - x[num_x - total_points]:
windows[total_fits] = (num_x - total_points, num_x)
else:
windows[total_fits] = (num_x - total_points - 1, num_x - 1)
total_fits += 1
skips[total_skips] = (skip_start - 1, num_x - 1)
total_skips += 1
# always fit last item
fits[total_fits] = num_x - 1
windows[total_fits] = (num_x - total_points, num_x)
total_fits += 1
return windows[:total_fits], fits[:total_fits], skips[:total_skips]
def loess(data, x_data=None, fraction=0.2, total_points=None, poly_order=1, scale=3.0,
tol=1e-3, max_iter=10, symmetric_weights=False, use_threshold=False, num_std=1,
use_original=False, weights=None, return_coef=False, conserve_memory=True, delta=0.0):
"""
Locally estimated scatterplot smoothing (LOESS).
Performs polynomial regression at each data point using the nearest points.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
fraction : float, optional
The fraction of N data points to include for the fitting on each point.
Default is 0.2. Not used if `total_points` is not None.
total_points : int, optional
The total number of points to include for the fitting on each point. Default
is None, which will use `fraction` * N to determine the number of points.
scale : float, optional
A scale factor applied to the weighted residuals to control the robustness
of the fit. Default is 3.0, as used in [9]_. Note that the original loess
procedure in [10]_ used a `scale` of ~4.05.
poly_order : int, optional
The polynomial order for fitting the baseline. Default is 1.
tol : float, optional
The exit criteria. Default is 1e-3.
max_iter : int, optional
The maximum number of iterations. Default is 10.
symmetric_weights : bool, optional
If False (default), will apply weighting asymmetrically, with residuals
< 0 having a weight of 1, according to [9]_. If True, will apply weighting
the same for both positive and negative residuals, which is regular LOESS.
If `use_threshold` is True, this parameter is ignored.
use_threshold : bool, optional
If False (default), will compute weights each iteration to perform the
robust fitting, which is regular LOESS. If True, will apply a threshold
on the data being fit each iteration, based on the maximum values of the
data and the fit baseline, as proposed by [11]_, similar to the modpoly
and imodpoly techniques.
num_std : float, optional
The number of standard deviations to include when thresholding. Default
is 1, which is the value used for the imodpoly technique. Only used if
`use_threshold` is True.
use_original : bool, optional
If False (default), will compare the baseline of each iteration with
the y-values of that iteration [12]_ when choosing minimum values for
thresholding. If True, will compare the baseline with the original
y-values given by `data` [13]_. Only used if `use_threshold` is True.
weights : array-like, shape (N,), optional
The weighting array. If None (default), then will be an array with
size equal to N and all values set to 1.
return_coef : bool, optional
If True, will convert the polynomial coefficients for the fit baseline to
a form that fits the input x_data and return them in the params dictionary.
Default is False, since the conversion takes time.
conserve_memory : bool, optional
If False, will cache the distance-weighted kernels for each value
in `x_data` on the first iteration and reuse them on subsequent iterations to
save time. The shape of the array of kernels is (len(`x_data`), `total_points`).
If True (default), will recalculate the kernels each iteration, which uses very
little memory, but is slower. Can usually set to False unless `x_data` and`total_points`
are quite large and the function causes memory issues when cacheing the kernels. If
numba is installed, there is no significant time difference since the calculations are
sped up.
delta : float, optional
If `delta` is > 0, will skip all but the last x-value in the range x_last + `delta`,
where x_last is the last x-value to be fit using weighted least squares, and instead
use linear interpolation to calculate the fit for those x-values (same behavior as in
statsmodels [14]_ and Cleveland's original Fortran lowess implementation [15]_).
Fits all x-values if `delta` is <= 0. Default is 0.0. Note that `x_data` is scaled to
fit in the range [-1, 1], so `delta` should likewise be scaled. For example, if the
desired `delta` value was ``0.01 * (max(x_data) - min(x_data))``, then the
correctly scaled `delta` would be 0.02 (ie. ``0.01 * (1 - (-1))``).
Returns
-------
baseline : numpy.ndarray, shape (N,)
The calculated baseline.
params : dict
A dictionary with the following items:
* 'weights': numpy.ndarray, shape (N,)
The weight array used for fitting the data. Does NOT contain the
individual distance-weighted kernels for each x-value.
* 'tol_history': numpy.ndarray
An array containing the calculated tolerance values for
each iteration. The length of the array is the number of iterations
completed. If the last value in the array is greater than the input
`tol` value, then the function did not converge.
* 'coef': numpy.ndarray, shape (N, poly_order + 1)
Only if `return_coef` is True. The array of polynomial parameters
for the baseline, in increasing order. Can be used to create a polynomial
using numpy.polynomial.polynomial.Polynomial(). If `delta` is > 0, the
coefficients for any skipped x-value will all be 0.
Raises
------
ValueError
Raised if the number of points per window for the fitting is less than
`poly_order` + 1 or greater than the total number of points.
Notes
-----
The iterative, robust, aspect of the fitting can be achieved either through
reweighting based on the residuals (the typical usage), or thresholding the
fit data based on the residuals, as proposed by [11]_, similar to the modpoly
and imodpoly techniques.
In baseline literature, this procedure is sometimes called "rbe", meaning
"robust baseline estimate".
References
----------
.. [9] <NAME>., et al. Baseline subtraction using robust local
regression estimation. J. Quantitative Spectroscopy and Radiative
Transfer, 2001, 68, 179-193.
.. [10] <NAME>. Robust locally weighted regression and smoothing
scatterplots. Journal of the American Statistical Association,
1979, 74(368), 829-836.
.. [11] <NAME>. Comparison of Several Methods of Chromatographic
Baseline Removal with a New Approach Based on Quantile Regression.
Chromatographia, 2011, 73, 721-731.
.. [12] <NAME>., et al. Baseline correction by improved iterative polynomial
fitting with automatic threshold. Chemometrics and Intelligent
Laboratory Systems, 2006, 82, 59-65.
.. [13] <NAME>., et al. Automated method for subtraction of fluorescence
from biological raman spectra. Applied Spectroscopy, 2003, 57(11),
1363-1367.
.. [14] https://github.com/statsmodels/statsmodels.
.. [15] https://www.netlib.org/go (lowess.f is the file).
"""
y, x, weight_array, original_domain = _setup_polynomial(data, x_data, weights, poly_order)
num_x = x.shape[0]
if total_points is None:
total_points = ceil(fraction * num_x)
if total_points < poly_order + 1:
raise ValueError('total points must be greater than polynomial order + 1')
elif total_points > num_x:
raise ValueError((
'points per window is higher than total number of points; lower either '
'"fraction" or "total_points"'
))
elif poly_order > 2:
warnings.warn(
('polynomial orders greater than 2 can have numerical issues;'
' consider using a polynomial order of 1 or 2 instead'), ParameterWarning
)
sort_x = x_data is not None
if sort_x:
sort_order = np.argsort(x, kind='mergesort') # to ensure x is increasing
x = x[sort_order]
y = y[sort_order]
weight_array = weight_array[sort_order]
if use_original:
y0 = y
# find the indices for fitting beforehand so that the fitting can be done
# in parallel; cast delta as float so numba does not have to compile for
# both int and float
windows, fits, skips = _determine_fits(x, num_x, total_points, float(delta))
# np.polynomial.polynomial.polyvander returns a Fortran-ordered array, which
# when matrix multiplied with the C-ordered coefficient array gives a warning
# when using numba, so convert Vandermonde matrix to C-ordering.
vander = np.ascontiguousarray(_get_vander(x, poly_order, calc_pinv=False))
baseline = y
coefs = np.zeros((num_x, poly_order + 1))
tol_history = np.empty(max_iter + 1)
sqrt_w = np.sqrt(weight_array)
# do max_iter + 1 since a max_iter of 0 would return y as baseline otherwise
for i in range(max_iter + 1):
baseline_old = baseline
if conserve_memory:
baseline = _loess_low_memory(
x, y, sqrt_w, coefs, vander, num_x, windows, fits
)
elif i == 0:
kernels, baseline = _loess_first_loop(
x, y, sqrt_w, coefs, vander, total_points, num_x, windows, fits
)
else:
baseline = _loess_nonfirst_loops(
y, sqrt_w, coefs, vander, kernels, windows, num_x, fits
)
_fill_skips(x, baseline, skips)
calc_difference = relative_difference(baseline_old, baseline)
tol_history[i] = calc_difference
if calc_difference < tol:
break
if use_threshold:
y = np.minimum(
y0 if use_original else y, baseline + num_std * np.std(y - baseline)
)
else:
residual = y - baseline
# TODO median_absolute_value can be 0 if more than half of residuals are
# 0 (perfect fit); can that ever really happen? if so, should prevent dividing by 0
sqrt_w = _tukey_square(
residual / _median_absolute_value(residual), scale, symmetric_weights
)
params = {'weights': sqrt_w**2, 'tol_history': tol_history[:i + 1]}
if return_coef:
# TODO maybe leave out the coefficients from the rest of the calculations
# since they are otherwise unused, and just fit x vs baseline here; would
# save a little memory; is providing coefficients for loess even useful?
params['coef'] = np.array([_convert_coef(coef, original_domain) for coef in coefs])
if sort_x:
inverted_order = _inverted_sort(sort_order)
baseline = baseline[inverted_order]
params['weights'] = params['weights'][inverted_order]
if return_coef:
params['coef'] = params['coef'][inverted_order]
return baseline, params
def quant_reg(data, x_data=None, poly_order=2, quantile=0.05, tol=1e-6, max_iter=250,
weights=None, eps=None, return_coef=False):
"""
Approximates the baseline of the data using quantile regression.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
poly_order : int, optional
The polynomial order for fitting the baseline. Default is 2.
quantile : float, optional
The quantile at which to fit the baseline. Default is 0.05.
tol : float, optional
The exit criteria. Default is 1e-6. For extreme quantiles (`quantile` < 0.01
or `quantile` > 0.99), may need to use a lower value to get a good fit.
max_iter : int, optional
The maximum number of iterations. Default is 250. For extreme quantiles
(`quantile` < 0.01 or `quantile` > 0.99), may need to use a higher value to
ensure convergence.
weights : array-like, shape (N,), optional
The weighting array. If None (default), then will be an array with
size equal to N and all values set to 1.
eps : float, optional
A small value added to the square of the residual to prevent dividing by 0.
Default is None, which uses the square of the maximum-absolute-value of the
fit each iteration multiplied by 1e-6.
return_coef : bool, optional
If True, will convert the polynomial coefficients for the fit baseline to
a form that fits the input `x_data` and return them in the params dictionary.
Default is False, since the conversion takes time.
Returns
-------
baseline : numpy.ndarray, shape (N,)
The calculated baseline.
params : dict
A dictionary with the following items:
* 'weights': numpy.ndarray, shape (N,)
The weight array used for fitting the data.
* 'tol_history': numpy.ndarray
An array containing the calculated tolerance values for
each iteration. The length of the array is the number of iterations
completed. If the last value in the array is greater than the input
`tol` value, then the function did not converge.
* 'coef': numpy.ndarray, shape (poly_order + 1,)
Only if `return_coef` is True. The array of polynomial parameters
for the baseline, in increasing order. Can be used to create a
polynomial using numpy.polynomial.polynomial.Polynomial().
Raises
------
ValueError
Raised if `quantile` is not between 0 and 1.
Notes
-----
Application of quantile regression for baseline fitting as described in [16]_.
Performs quantile regression using iteratively reweighted least squares (IRLS)
as described in [17]_.
References
----------
.. [16] Komsta, Ł. Comparison of Several Methods of Chromatographic
Baseline Removal with a New Approach Based on Quantile Regression.
Chromatographia, 2011, 73, 721-731.
.. [17] <NAME>., et al. Simultaneous estimation of quantile curves using
quantile sheets. AStA Advances in Statistical Analysis, 2013, 97, 77-87.
"""
# TODO provide a way to estimate best poly_order based on AIC like in Komsta? could be
# useful for all polynomial methods; maybe could be an optimizer function
if not 0 < quantile < 1:
raise ValueError('quantile must be between 0 and 1.')
y, x, weight_array, original_domain, vander = _setup_polynomial(
data, x_data, weights, poly_order, return_vander=True
)
# estimate first iteration using least squares
coef = np.linalg.lstsq(vander * weight_array[:, None], y * weight_array, None)[0]
baseline = vander @ coef
tol_history = np.empty(max_iter)
for i in range(max_iter):
baseline_old = baseline
weight_array = np.sqrt(_weighting._quantile(y, baseline, quantile, eps))
coef = np.linalg.lstsq(vander * weight_array[:, None], y * weight_array, None)[0]
baseline = vander @ coef
# relative_difference(baseline_old, baseline, 1) gives nearly same result and
# the l2 norm is faster to calculate, so use that instead of l1 norm
calc_difference = relative_difference(baseline_old, baseline)
tol_history[i] = calc_difference
if calc_difference < tol:
break
params = {'weights': weight_array**2, 'tol_history': tol_history[:i + 1]}
if return_coef:
params['coef'] = _convert_coef(coef, original_domain)
return baseline, params
def goldindec(data, x_data=None, poly_order=2, tol=1e-3, max_iter=250, weights=None,
cost_function='asymmetric_indec', peak_ratio=0.5, alpha_factor=0.99,
tol_2=1e-3, tol_3=1e-6, max_iter_2=100, return_coef=False):
"""
Fits a polynomial baseline using a non-quadratic cost function.
The non-quadratic cost functions penalize residuals with larger values,
giving a more robust fit compared to normal least-squares.
Parameters
----------
data : array-like, shape (N,)
The y-values of the measured data, with N data points.
x_data : array-like, shape (N,), optional
The x-values of the measured data. Default is None, which will create an
array from -1 to 1 with N points.
poly_order : int, optional
The polynomial order for fitting the baseline. Default is 2.
tol : float, optional
The exit criteria for the fitting with a given threshold value. Default is 1e-3.
max_iter : int, optional
The maximum number of iterations for fitting a threshold value. Default is 250.
weights : array-like, shape (N,), optional
The weighting array. If None (default), then will be an array with
size equal to N and all values set to 1.
cost_function : str, optional
The non-quadratic cost function to minimize. Unlike :func:`.penalized_poly`,
this function only works with asymmetric cost functions, so the symmetry prefix
('a' or 'asymmetric') is optional (eg. 'indec' and 'a_indec' are the same). Default
is 'asymmetric_indec'. Available methods, and their associated reference, are:
* 'asymmetric_indec'[18]_
* 'asymmetric_truncated_quadratic'[19]_
* 'asymmetric_huber'[19]_
peak_ratio : float, optional
A value between 0 and 1 that designates how many points in the data belong
to peaks. Values are valid within ~10% of the actual peak ratio. Default is 0.5.
alpha_factor : float, optional
A value between 0 and 1 that controls the value of the penalty. Default is
0.99. Typically should not need to change this value.
tol_2 : float, optional
The exit criteria for the difference between the optimal up-down ratio (number of
points above 0 in the residual compared to number of points below 0) and the up-down
ratio for a given threshold value. Default is 1e-3.
tol_3 : float, optional
The exit criteria for the relative change in the threshold value. Default is 1e-6.
max_iter_2 : float, optional
The number of iterations for iterating between different threshold values.
Default is 100.
return_coef : bool, optional
If True, will convert the polynomial coefficients for the fit baseline to
a form that fits the input x_data and return them in the params dictionary.
Default is False, since the conversion takes time.
Returns
-------
baseline : numpy.ndarray, shape (N,)
The calculated baseline.
params : dict
A dictionary with the following items:
* 'weights': numpy.ndarray, shape (N,)
The weight array used for fitting the data.
* 'tol_history': numpy.ndarray, shape (J, K)
An array containing the calculated tolerance values for each iteration
of both threshold values and fit values. Index 0 are the tolerence values
for the difference in up-down ratios, index 1 are the tolerance values for
the relative change in the threshold, and indices >= 2 are the tolerance values
for each fit. All values that were not used in fitting have values of 0. Shape J
is 2 plus the number of iterations for the threshold to converge (related to
`max_iter_2`, `tol_2`, `tol_3`), and shape K is the maximum of the number of
iterations for the threshold and the maximum number of iterations for all of
the fits of the various threshold values (related to `max_iter` and `tol`).
* 'threshold' : float
The optimal threshold value. Could be used in :func:`.penalized_poly`
for fitting other similar data.
* 'coef': numpy.ndarray, shape (poly_order + 1,)
Only if `return_coef` is True. The array of polynomial parameters
for the baseline, in increasing order. Can be used to create a
polynomial using numpy.polynomial.polynomial.Polynomial().
Raises
------
ValueError
Raised if `alpha_factor` or `peak_ratio` are not between 0 and 1, or if the
specified cost function is symmetric.
References
----------
.. [18] <NAME>., et al. Goldindec: A Novel Algorithm for Raman Spectrum Baseline
Correction. Applied Spectroscopy, 2015, 69(7), 834-842.
.. [19] <NAME>., et al. Background removal from spectra by designing and
minimising a non-quadratic cost function. Chemometrics and Intelligent
Laboratory Systems, 2005, 76(2), 121–133.
"""
if not 0 < alpha_factor <= 1:
raise ValueError('alpha_factor must be between 0 and 1')
elif not 0 < peak_ratio < 1:
raise ValueError('peak_ratio must be between 0 and 1')
try:
symmetric_loss, method = _identify_loss_method(cost_function)
except ValueError: # do not require a prefix since cost must be asymmetric
symmetric_loss, method = _identify_loss_method('a_' + cost_function)
if symmetric_loss:
# symmetric cost functions don't work due to how the up-down ratio vs
# peak_ratio function was created in the reference; in theory, could simulate
# spectra with both positive and negative peaks following the reference
# and build another empirical function, but would likely need to also
# add other parameters detailing the percent of positive vs negative peaks,
# etc., so it's not worth the effort
raise ValueError('goldindec only works for asymmetric cost functions')
loss_function = {
'huber': _huber_loss,
'truncated_quadratic': _truncated_quadratic_loss,
'indec': _indec_loss
}[method]
y, x, weight_array, original_domain, vander, pseudo_inverse = _setup_polynomial(
data, x_data, weights, poly_order, return_vander=True, return_pinv=True
)
num_y = y.shape[0]
up_down_ratio_goal = (
0.7679 + 11.2358 * peak_ratio - 39.7064 * peak_ratio**2 + 92.3583 * peak_ratio**3
)
# TODO reference states threshold must be <= 2 for half-quadratic minimization to
# be valid for indec cost function, and normalized y so that threshold is always <= 2;
# however, it seems to work fine without normalization; just be aware in case errors
# occur, may have to normalize y in both this function and penalized_poly
sqrt_w = np.sqrt(weight_array)
y_fit = sqrt_w * y
coef = np.dot(pseudo_inverse, y_fit)
initial_baseline = np.dot(vander, coef)
a = 0
# reference used b=1, but normalized y before fitting; instead, set b as max of
# initial residual
b = abs((y - initial_baseline).max())
threshold = a + 0.618 * (b - a)
loss_kwargs = {
'threshold': threshold, 'alpha_factor': alpha_factor,
'symmetric': symmetric_loss
}
# have to use zeros rather than empty for tol_history since each inner fit may
# have a different number of iterations
tol_history = np.zeros((max_iter_2 + 2, max(max_iter, max_iter_2)))
j_max = 0
for i in range(max_iter_2):
baseline = initial_baseline
for j in range(max_iter):
baseline_old = baseline
coef = np.dot(
pseudo_inverse, y_fit + loss_function(y_fit - sqrt_w * baseline, **loss_kwargs)
)
baseline = np.dot(vander, coef)
calc_difference = relative_difference(baseline_old, baseline)
tol_history[i + 2, j] = calc_difference
if calc_difference < tol:
break
if j > j_max:
j_max = j
up_count = (y > baseline).sum()
up_down_ratio = up_count / max(1, num_y - up_count)
calc_difference = up_down_ratio - up_down_ratio_goal
tol_history[0, i] = calc_difference
if calc_difference > tol_2:
a = threshold
elif calc_difference < -tol_2:
b = threshold
else:
break
threshold = a + 0.618 * (b - a)
# this exit criteria was not stated in the reference, but the change in threshold
# becomes zero fairly quickly, so need to also exit rather than needlessly
# continuing to calculate with the same threshold value
calc_difference = relative_difference(loss_kwargs['threshold'], threshold)
tol_history[1, i] = calc_difference
if calc_difference < tol_3:
break
loss_kwargs['threshold'] = threshold
params = {
'weights': weight_array, 'tol_history': tol_history[:i + 3, :max(i, j_max) + 1],
'threshold': loss_kwargs['threshold']
}
if return_coef:
params['coef'] = _convert_coef(coef, original_domain)
return baseline, params
|
[
"numpy.ones_like",
"numpy.abs",
"numpy.sqrt",
"numpy.minimum",
"math.ceil",
"numpy.argsort",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.empty",
"numpy.sign",
"numpy.linalg.lstsq",
"numpy.std",
"warnings.warn",
"numpy.maximum",
"numpy.arange"
] |
[((9204, 9225), 'numpy.sqrt', 'np.sqrt', (['weight_array'], {}), '(weight_array)\n', (9211, 9225), True, 'import numpy as np\n'), ((9274, 9308), 'numpy.dot', 'np.dot', (['pseudo_inverse', '(sqrt_w * y)'], {}), '(pseudo_inverse, sqrt_w * y)\n', (9280, 9308), True, 'import numpy as np\n'), ((9324, 9344), 'numpy.dot', 'np.dot', (['vander', 'coef'], {}), '(vander, coef)\n', (9330, 9344), True, 'import numpy as np\n'), ((9655, 9673), 'numpy.empty', 'np.empty', (['max_iter'], {}), '(max_iter)\n', (9663, 9673), True, 'import numpy as np\n'), ((13785, 13806), 'numpy.sqrt', 'np.sqrt', (['weight_array'], {}), '(weight_array)\n', (13792, 13806), True, 'import numpy as np\n'), ((13855, 13889), 'numpy.dot', 'np.dot', (['pseudo_inverse', '(sqrt_w * y)'], {}), '(pseudo_inverse, sqrt_w * y)\n', (13861, 13889), True, 'import numpy as np\n'), ((13905, 13925), 'numpy.dot', 'np.dot', (['vander', 'coef'], {}), '(vander, coef)\n', (13911, 13925), True, 'import numpy as np\n'), ((13942, 13962), 'numpy.std', 'np.std', (['(y - baseline)'], {}), '(y - baseline)\n', (13948, 13962), True, 'import numpy as np\n'), ((14167, 14185), 'numpy.empty', 'np.empty', (['max_iter'], {}), '(max_iter)\n', (14175, 14185), True, 'import numpy as np\n'), ((26700, 26721), 'numpy.sqrt', 'np.sqrt', (['weight_array'], {}), '(weight_array)\n', (26707, 26721), True, 'import numpy as np\n'), ((26753, 26778), 'numpy.dot', 'np.dot', (['pseudo_inverse', 'y'], {}), '(pseudo_inverse, y)\n', (26759, 26778), True, 'import numpy as np\n'), ((26794, 26814), 'numpy.dot', 'np.dot', (['vander', 'coef'], {}), '(vander, coef)\n', (26800, 26814), True, 'import numpy as np\n'), ((26833, 26851), 'numpy.empty', 'np.empty', (['max_iter'], {}), '(max_iter)\n', (26841, 26851), True, 'import numpy as np\n'), ((33845, 33860), 'numpy.empty', 'np.empty', (['num_x'], {}), '(num_x)\n', (33853, 33860), True, 'import numpy as np\n'), ((36211, 36242), 'numpy.empty', 'np.empty', (['(num_x, total_points)'], {}), '((num_x, total_points))\n', (36219, 36242), True, 'import numpy as np\n'), ((36258, 36273), 'numpy.empty', 'np.empty', (['num_x'], {}), '(num_x)\n', (36266, 36273), True, 'import numpy as np\n'), ((38394, 38409), 'numpy.empty', 'np.empty', (['num_x'], {}), '(num_x)\n', (38402, 38409), True, 'import numpy as np\n'), ((41977, 42012), 'numpy.empty', 'np.empty', (['(num_x, 2)'], {'dtype': 'np.intp'}), '((num_x, 2), dtype=np.intp)\n', (41985, 42012), True, 'import numpy as np\n'), ((52749, 52782), 'numpy.zeros', 'np.zeros', (['(num_x, poly_order + 1)'], {}), '((num_x, poly_order + 1))\n', (52757, 52782), True, 'import numpy as np\n'), ((52801, 52823), 'numpy.empty', 'np.empty', (['(max_iter + 1)'], {}), '(max_iter + 1)\n', (52809, 52823), True, 'import numpy as np\n'), ((52837, 52858), 'numpy.sqrt', 'np.sqrt', (['weight_array'], {}), '(weight_array)\n', (52844, 52858), True, 'import numpy as np\n'), ((58879, 58897), 'numpy.empty', 'np.empty', (['max_iter'], {}), '(max_iter)\n', (58887, 58897), True, 'import numpy as np\n'), ((66570, 66591), 'numpy.sqrt', 'np.sqrt', (['weight_array'], {}), '(weight_array)\n', (66577, 66591), True, 'import numpy as np\n'), ((66627, 66656), 'numpy.dot', 'np.dot', (['pseudo_inverse', 'y_fit'], {}), '(pseudo_inverse, y_fit)\n', (66633, 66656), True, 'import numpy as np\n'), ((66680, 66700), 'numpy.dot', 'np.dot', (['vander', 'coef'], {}), '(vander, coef)\n', (66686, 66700), True, 'import numpy as np\n'), ((9546, 9567), 'numpy.sqrt', 'np.sqrt', (['weight_array'], {}), '(weight_array)\n', (9553, 9567), True, 'import numpy as np\n'), ((9748, 9795), 'numpy.minimum', 'np.minimum', (['(y0 if use_original else y)', 'baseline'], {}), '(y0 if use_original else y, baseline)\n', (9758, 9795), True, 'import numpy as np\n'), ((9811, 9845), 'numpy.dot', 'np.dot', (['pseudo_inverse', '(sqrt_w * y)'], {}), '(pseudo_inverse, sqrt_w * y)\n', (9817, 9845), True, 'import numpy as np\n'), ((9865, 9885), 'numpy.dot', 'np.dot', (['vander', 'coef'], {}), '(vander, coef)\n', (9871, 9885), True, 'import numpy as np\n'), ((14058, 14079), 'numpy.sqrt', 'np.sqrt', (['weight_array'], {}), '(weight_array)\n', (14065, 14079), True, 'import numpy as np\n'), ((14228, 14297), 'numpy.minimum', 'np.minimum', (['(y0 if use_original else y)', '(baseline + num_std * deviation)'], {}), '(y0 if use_original else y, baseline + num_std * deviation)\n', (14238, 14297), True, 'import numpy as np\n'), ((14313, 14347), 'numpy.dot', 'np.dot', (['pseudo_inverse', '(sqrt_w * y)'], {}), '(pseudo_inverse, sqrt_w * y)\n', (14319, 14347), True, 'import numpy as np\n'), ((14367, 14387), 'numpy.dot', 'np.dot', (['vander', 'coef'], {}), '(vander, coef)\n', (14373, 14387), True, 'import numpy as np\n'), ((14412, 14432), 'numpy.std', 'np.std', (['(y - baseline)'], {}), '(y - baseline)\n', (14418, 14432), True, 'import numpy as np\n'), ((20273, 20290), 'numpy.sign', 'np.sign', (['residual'], {}), '(residual)\n', (20280, 20290), True, 'import numpy as np\n'), ((27028, 27048), 'numpy.dot', 'np.dot', (['vander', 'coef'], {}), '(vander, coef)\n', (27034, 27048), True, 'import numpy as np\n'), ((28659, 28691), 'numpy.maximum', 'np.maximum', (['(0)', '(1 - inner * inner)'], {}), '(0, 1 - inner * inner)\n', (28669, 28691), True, 'import numpy as np\n'), ((28720, 28742), 'numpy.ones_like', 'np.ones_like', (['residual'], {}), '(residual)\n', (28732, 28742), True, 'import numpy as np\n'), ((28834, 28866), 'numpy.maximum', 'np.maximum', (['(0)', '(1 - inner * inner)'], {}), '(0, 1 - inner * inner)\n', (28844, 28866), True, 'import numpy as np\n'), ((34084, 34112), 'numpy.abs', 'np.abs', (['(x[left:right] - x[i])'], {}), '(x[left:right] - x[i])\n', (34090, 34112), True, 'import numpy as np\n'), ((34293, 34338), 'numpy.sqrt', 'np.sqrt', (['(difference * difference * difference)'], {}), '(difference * difference * difference)\n', (34300, 34338), True, 'import numpy as np\n'), ((36497, 36525), 'numpy.abs', 'np.abs', (['(x[left:right] - x[i])'], {}), '(x[left:right] - x[i])\n', (36503, 36525), True, 'import numpy as np\n'), ((36706, 36751), 'numpy.sqrt', 'np.sqrt', (['(difference * difference * difference)'], {}), '(difference * difference * difference)\n', (36713, 36751), True, 'import numpy as np\n'), ((41141, 41171), 'numpy.empty', 'np.empty', (['num_x'], {'dtype': 'np.intp'}), '(num_x, dtype=np.intp)\n', (41149, 41171), True, 'import numpy as np\n'), ((41233, 41268), 'numpy.empty', 'np.empty', (['(num_x, 2)'], {'dtype': 'np.intp'}), '((num_x, 2), dtype=np.intp)\n', (41241, 41268), True, 'import numpy as np\n'), ((41928, 41961), 'numpy.array', 'np.array', (['[[0, 0]]'], {'dtype': 'np.intp'}), '([[0, 0]], dtype=np.intp)\n', (41936, 41961), True, 'import numpy as np\n'), ((51319, 51341), 'math.ceil', 'ceil', (['(fraction * num_x)'], {}), '(fraction * num_x)\n', (51323, 51341), False, 'from math import ceil\n'), ((51948, 51979), 'numpy.argsort', 'np.argsort', (['x'], {'kind': '"""mergesort"""'}), "(x, kind='mergesort')\n", (51958, 51979), True, 'import numpy as np\n'), ((58757, 58828), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['(vander * weight_array[:, None])', '(y * weight_array)', 'None'], {}), '(vander * weight_array[:, None], y * weight_array, None)\n', (58772, 58828), True, 'import numpy as np\n'), ((5537, 5558), 'numpy.sqrt', 'np.sqrt', (['weight_array'], {}), '(weight_array)\n', (5544, 5558), True, 'import numpy as np\n'), ((16471, 16487), 'numpy.abs', 'np.abs', (['residual'], {}), '(residual)\n', (16477, 16487), True, 'import numpy as np\n'), ((18482, 18498), 'numpy.abs', 'np.abs', (['residual'], {}), '(residual)\n', (18488, 18498), True, 'import numpy as np\n'), ((20224, 20240), 'numpy.abs', 'np.abs', (['residual'], {}), '(residual)\n', (20230, 20240), True, 'import numpy as np\n'), ((26555, 26564), 'numpy.std', 'np.std', (['y'], {}), '(y)\n', (26561, 26564), True, 'import numpy as np\n'), ((30109, 30123), 'numpy.abs', 'np.abs', (['values'], {}), '(values)\n', (30115, 30123), True, 'import numpy as np\n'), ((59056, 59127), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['(vander * weight_array[:, None])', '(y * weight_array)', 'None'], {}), '(vander * weight_array[:, None], y * weight_array, None)\n', (59071, 59127), True, 'import numpy as np\n'), ((67532, 67552), 'numpy.dot', 'np.dot', (['vander', 'coef'], {}), '(vander, coef)\n', (67538, 67552), True, 'import numpy as np\n'), ((16615, 16632), 'numpy.sign', 'np.sign', (['residual'], {}), '(residual)\n', (16622, 16632), True, 'import numpy as np\n'), ((41597, 41613), 'numpy.arange', 'np.arange', (['num_x'], {}), '(num_x)\n', (41606, 41613), True, 'import numpy as np\n'), ((51693, 51849), 'warnings.warn', 'warnings.warn', (['"""polynomial orders greater than 2 can have numerical issues; consider using a polynomial order of 1 or 2 instead"""', 'ParameterWarning'], {}), "(\n 'polynomial orders greater than 2 can have numerical issues; consider using a polynomial order of 1 or 2 instead'\n , ParameterWarning)\n", (51706, 51849), False, 'import warnings\n'), ((9499, 9519), 'numpy.std', 'np.std', (['(y - baseline)'], {}), '(y - baseline)\n', (9505, 9519), True, 'import numpy as np\n'), ((20651, 20692), 'numpy.maximum', 'np.maximum', (['(2 * residual ** 2)', '_MIN_FLOAT'], {}), '(2 * residual ** 2, _MIN_FLOAT)\n', (20661, 20692), True, 'import numpy as np\n'), ((53792, 53812), 'numpy.std', 'np.std', (['(y - baseline)'], {}), '(y - baseline)\n', (53798, 53812), True, 'import numpy as np\n')]
|
"""
Running operational space control with the PyGame display, using an exponential
additive signal when to push away from joints.
The target location can be moved by clicking on the background.
"""
import numpy as np
from abr_control.arms import threejoint as arm
# from abr_control.arms import twojoint as arm
from abr_control.interfaces import PyGame
from abr_control.controllers import OSC, AvoidJointLimits, Damping
print('\nClick to move the target.\n')
# initialize our robot config
robot_config = arm.Config(use_cython=True)
# create our arm simulation
arm_sim = arm.ArmSim(robot_config)
avoid = AvoidJointLimits(
robot_config,
min_joint_angles=[np.pi/5.0]*robot_config.N_JOINTS,
max_joint_angles=[np.pi/2.0]*robot_config.N_JOINTS,
max_torque=[100.0]*robot_config.N_JOINTS)
# damp the movements of the arm
damping = Damping(robot_config, kv=10)
# create an operational space controller
ctrlr = OSC(robot_config, kp=100, null_controllers=[avoid, damping],
# control (x, y) out of [x, y, z, alpha, beta, gamma]
ctrlr_dof=[True, True, False, False, False, False])
def on_click(self, mouse_x, mouse_y):
self.target[0] = self.mouse_x
self.target[1] = self.mouse_y
# create our interface
interface = PyGame(robot_config, arm_sim,
dt=.001, on_click=on_click,
q_init=[np.pi/4, np.pi/2, np.pi/2])
interface.connect()
# create a target [x, y, z]]
target_xyz = [0, 2, 0]
# create a target orientation [alpha, beta, gamma]
target_angles = [0, 0, 0]
interface.set_target(target_xyz)
try:
print('\nSimulation starting...\n')
count = 0
while 1:
# get arm feedback
feedback = interface.get_feedback()
hand_xyz = robot_config.Tx('EE', feedback['q'])
target = np.hstack([target_xyz, target_angles])
# generate an operational space control signal
u = ctrlr.generate(
q=feedback['q'],
dq=feedback['dq'],
target=target,
)
new_target_xy = interface.get_mousexy()
if new_target_xy is not None:
target_xyz[:2] = new_target_xy
interface.set_target(target_xyz)
# apply the control signal, step the sim forward
interface.send_forces(
u, update_display=True if count % 20 == 0 else False)
count += 1
finally:
# stop and reset the simulation
interface.disconnect()
print('Simulation terminated...')
|
[
"numpy.hstack",
"abr_control.controllers.OSC",
"abr_control.interfaces.PyGame",
"abr_control.controllers.AvoidJointLimits",
"abr_control.arms.threejoint.Config",
"abr_control.controllers.Damping",
"abr_control.arms.threejoint.ArmSim"
] |
[((509, 536), 'abr_control.arms.threejoint.Config', 'arm.Config', ([], {'use_cython': '(True)'}), '(use_cython=True)\n', (519, 536), True, 'from abr_control.arms import threejoint as arm\n'), ((575, 599), 'abr_control.arms.threejoint.ArmSim', 'arm.ArmSim', (['robot_config'], {}), '(robot_config)\n', (585, 599), True, 'from abr_control.arms import threejoint as arm\n'), ((609, 804), 'abr_control.controllers.AvoidJointLimits', 'AvoidJointLimits', (['robot_config'], {'min_joint_angles': '([np.pi / 5.0] * robot_config.N_JOINTS)', 'max_joint_angles': '([np.pi / 2.0] * robot_config.N_JOINTS)', 'max_torque': '([100.0] * robot_config.N_JOINTS)'}), '(robot_config, min_joint_angles=[np.pi / 5.0] *\n robot_config.N_JOINTS, max_joint_angles=[np.pi / 2.0] * robot_config.\n N_JOINTS, max_torque=[100.0] * robot_config.N_JOINTS)\n', (625, 804), False, 'from abr_control.controllers import OSC, AvoidJointLimits, Damping\n'), ((845, 873), 'abr_control.controllers.Damping', 'Damping', (['robot_config'], {'kv': '(10)'}), '(robot_config, kv=10)\n', (852, 873), False, 'from abr_control.controllers import OSC, AvoidJointLimits, Damping\n'), ((923, 1040), 'abr_control.controllers.OSC', 'OSC', (['robot_config'], {'kp': '(100)', 'null_controllers': '[avoid, damping]', 'ctrlr_dof': '[True, True, False, False, False, False]'}), '(robot_config, kp=100, null_controllers=[avoid, damping], ctrlr_dof=[\n True, True, False, False, False, False])\n', (926, 1040), False, 'from abr_control.controllers import OSC, AvoidJointLimits, Damping\n'), ((1258, 1363), 'abr_control.interfaces.PyGame', 'PyGame', (['robot_config', 'arm_sim'], {'dt': '(0.001)', 'on_click': 'on_click', 'q_init': '[np.pi / 4, np.pi / 2, np.pi / 2]'}), '(robot_config, arm_sim, dt=0.001, on_click=on_click, q_init=[np.pi / \n 4, np.pi / 2, np.pi / 2])\n', (1264, 1363), False, 'from abr_control.interfaces import PyGame\n'), ((1793, 1831), 'numpy.hstack', 'np.hstack', (['[target_xyz, target_angles]'], {}), '([target_xyz, target_angles])\n', (1802, 1831), True, 'import numpy as np\n')]
|
import itertools
from unittest import TestCase
import numpy as np
from utils.data import ArrayInfo, image_array_to_rgb
from utils.data.mappers import *
class ImageUtilsTestCase(TestCase):
def test_image_array_to_rgb(self):
np.random.seed(1234)
def f(batch_size, n_channels, channel_last, the_channel_last,
use_info, bit_depth, dequantize, scale_to):
shape = [31, 32]
if n_channels is not None:
if the_channel_last:
shape = shape + [n_channels]
else:
shape = [n_channels] + shape
the_info = ArrayInfo(
shape=shape, min_val=0, max_val=255, is_discrete=True,
n_discrete_vals=256, bit_depth=8)
x = np.random.randint(0, 256, size=batch_size + shape)
mappers = []
ans_mappers = None
if bit_depth not in (None, 8):
mappers.append(ReduceToBitDepth(bit_depth))
ans_mappers = ReduceToBitDepth(bit_depth)
if dequantize:
mappers.append(Dequantize(epsilon=1e-5))
if scale_to:
mappers.append(ScaleToRange(*scale_to))
if mappers:
m = ArrayMapperList(mappers)
y_the_info = m.fit(the_info)
y = m.transform(x)
else:
y_the_info = the_info
y = x
ans = x
if ans_mappers is not None:
ans_mappers.fit(the_info)
ans = ans_mappers.transform(ans)
if n_channels is None:
ans = np.reshape(ans, ans.shape + (1,))
elif not the_channel_last:
ans = np.transpose(
ans,
list(range(len(ans.shape) - 3)) + [-2, -1, -3]
)
info = y_the_info if use_info else None
out = image_array_to_rgb(y, info, channel_last)
self.assertEqual(out.dtype, np.uint8)
np.testing.assert_equal(out, ans)
for (batch_size, n_channels, channel_last, the_channel_last,
use_info, bit_depth, dequantize, scale_to) in itertools.product(
([], [7], [3, 4]),
(None, 1, 3),
(None, True, False),
(True, False),
(True, False),
(8, 5),
(True, False),
(None, (0, 1), (-1, 1),),
):
# skip inconsistent parameter combination
if (n_channels is None and channel_last is not None) or \
(channel_last is not None and channel_last != the_channel_last):
continue
if n_channels is None and batch_size:
continue
# use_info = False is not supported along with dequantize or bit-depth
if not use_info and (dequantize or bit_depth != 8):
continue
f(batch_size, n_channels, channel_last, the_channel_last,
use_info, bit_depth, dequantize, scale_to)
|
[
"numpy.reshape",
"numpy.testing.assert_equal",
"itertools.product",
"utils.data.ArrayInfo",
"utils.data.image_array_to_rgb",
"numpy.random.randint",
"numpy.random.seed"
] |
[((240, 260), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (254, 260), True, 'import numpy as np\n'), ((2216, 2375), 'itertools.product', 'itertools.product', (['([], [7], [3, 4])', '(None, 1, 3)', '(None, True, False)', '(True, False)', '(True, False)', '(8, 5)', '(True, False)', '(None, (0, 1), (-1, 1))'], {}), '(([], [7], [3, 4]), (None, 1, 3), (None, True, False), (\n True, False), (True, False), (8, 5), (True, False), (None, (0, 1), (-1, 1))\n )\n', (2233, 2375), False, 'import itertools\n'), ((639, 741), 'utils.data.ArrayInfo', 'ArrayInfo', ([], {'shape': 'shape', 'min_val': '(0)', 'max_val': '(255)', 'is_discrete': '(True)', 'n_discrete_vals': '(256)', 'bit_depth': '(8)'}), '(shape=shape, min_val=0, max_val=255, is_discrete=True,\n n_discrete_vals=256, bit_depth=8)\n', (648, 741), False, 'from utils.data import ArrayInfo, image_array_to_rgb\n'), ((787, 837), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)'], {'size': '(batch_size + shape)'}), '(0, 256, size=batch_size + shape)\n', (804, 837), True, 'import numpy as np\n'), ((1949, 1990), 'utils.data.image_array_to_rgb', 'image_array_to_rgb', (['y', 'info', 'channel_last'], {}), '(y, info, channel_last)\n', (1967, 1990), False, 'from utils.data import ArrayInfo, image_array_to_rgb\n'), ((2053, 2086), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['out', 'ans'], {}), '(out, ans)\n', (2076, 2086), True, 'import numpy as np\n'), ((1659, 1692), 'numpy.reshape', 'np.reshape', (['ans', '(ans.shape + (1,))'], {}), '(ans, ans.shape + (1,))\n', (1669, 1692), True, 'import numpy as np\n')]
|
## @ingroup Methods-Aerodynamics-Airfoil_Panel_Method
# panel_geometry.py
# Created: Mar 2021, <NAME>
# ---------------------------------------
#-------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units
import numpy as np
# ----------------------------------------------------------------------
# panel_geometry.py
# ----------------------------------------------------------------------
## @ingroup Methods-Aerodynamics-Airfoil_Panel_Method
def panel_geometry(x,y,npanel,nalpha,nRe):
"""Computes airfoil surface panelization parameters for later use in
the computation of the matrix of influence coefficients.
Assumptions:
None
Source:
None
Inputs:
x - Vector of x coordinates of the surface nodes [unitless]
y - Vector of y coordinates of the surface nodes [unitless]
npanel - Number of panels on the airfoil [unitless]
Outputs:
l - Panel lengths [unitless]
st - np.sin(theta) for each panel [radians]
ct - np.cos(theta) for each panel [radians]
xbar - x-coordinate of the midpoint of each panel [unitless]
ybar - y-coordinate of the midpoint of each panel [unitless]
Properties Used:
N/A
"""
# compute various geometrical quantities
l = np.sqrt((x[1:] -x[:-1])**2 +(y[1:] -y[:-1])**2)
st = (y[1:] -y[:-1])/l
ct = (x[1:] -x[:-1])/l
xbar = (x[1:] +x[:-1])/2
ybar = (y[1:] +y[:-1])/2
norm = np.zeros((npanel,2,nalpha,nRe))
norm[:,0,:,:] = -st
norm[:,1,:,:] = ct
return l,st,ct,xbar,ybar,norm
|
[
"numpy.zeros",
"numpy.sqrt"
] |
[((1778, 1832), 'numpy.sqrt', 'np.sqrt', (['((x[1:] - x[:-1]) ** 2 + (y[1:] - y[:-1]) ** 2)'], {}), '((x[1:] - x[:-1]) ** 2 + (y[1:] - y[:-1]) ** 2)\n', (1785, 1832), True, 'import numpy as np\n'), ((1962, 1996), 'numpy.zeros', 'np.zeros', (['(npanel, 2, nalpha, nRe)'], {}), '((npanel, 2, nalpha, nRe))\n', (1970, 1996), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import csv
import logging as logmodule
import math
import os
import sys
import tempfile
from collections import OrderedDict
# On OS X, the default backend will fail if you are not using a Framework build of Python,
# e.g. in a virtualenv. To avoid having to set MPLBACKEND each time we use Studio,
# automatically set the backend.
if sys.platform.startswith("darwin"):
import matplotlib
if matplotlib.get_backend().lower() == "macosx":
matplotlib.use('PS')
import matplotlib.pyplot as plt
import numpy as np
import pdfkit
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.files.storage import default_storage
from django.template.loader import get_template
from django.utils.translation import ngettext
from django.utils.translation import ugettext as _
from le_utils.constants import content_kinds
from pptx import Presentation
from pptx.dml.color import RGBColor
from pptx.enum.shapes import MSO_SHAPE
from pptx.enum.text import MSO_AUTO_SIZE
from pptx.enum.text import PP_ALIGN
from pptx.text.fonts import FontFiles
from pptx.util import Inches
from pptx.util import Pt
from pressurecooker.encodings import encode_file_to_base64
from pressurecooker.encodings import write_base64_to_file
from wordcloud import WordCloud
from contentcuration.models import Channel
from contentcuration.models import ContentKind
from contentcuration.utils.files import generate_thumbnail_from_channel
from contentcuration.utils.format import format_size
AUDIO_COLOR = "#F06292"
DOCUMENT_COLOR = "#FF3D00"
EXERCISE_COLOR = "#4DB6AC"
HTML_COLOR = "#FF8F00"
VIDEO_COLOR = "#283593"
plt.switch_backend('agg') # Avoid using tkinter as it causes server to stall (https://discuss.erpnext.com/t/wkhtmltopdf-error-erpnext-v7/14673/10)
os.environ['QT_QPA_PLATFORM'] = 'offscreen' # Must be set for tests to run (https://github.com/ipython/ipython/issues/10627)
logmodule.basicConfig()
logging = logmodule.getLogger(__name__)
def _monkeypatch_font_directories():
# python-pptx automatically fails on linux systems, so patch it
# https://github.com/scanny/python-pptx/blob/master/pptx/text/fonts.py#L57
def _can_i_haz_linux(cls):
if sys.platform.startswith("linux"):
return {
# python-pptx fails if Calibri isn't found, so reroute it to a local font file
('Calibri', False, False): '/usr/share/fonts/truetype/freefont/FreeSans.ttf',
}
else:
return FontFiles._old_installed_fonts()
FontFiles._old_installed_fonts = FontFiles._installed_fonts
FontFiles._installed_fonts = classmethod(_can_i_haz_linux)
_monkeypatch_font_directories()
class PDFMixin(object):
def write_pdf(self, template, context, filepath, extra_options=None):
template = get_template(template)
html = template.render(context)
options = {
"encoding": "utf-8-sig",
"quiet": "",
'page-size': 'Letter',
'margin-top': '0.5in',
'margin-right': '0.5in',
'margin-bottom': '0.5in',
'margin-left': '0.5in',
}
if extra_options:
options.update(extra_options)
pdfkit.from_string(html, filepath, options=options)
return filepath
class PPTMixin(object):
slide = None
width = float(10)
height = float(5.6)
def get_next_slide(self):
self.ppt.slide_width = Inches(self.width)
self.ppt.slide_height = Inches(self.height)
slide_layout = self.ppt.slide_layouts[6] # Get a blank slide
self.slide = self.ppt.slides.add_slide(slide_layout)
def get_rgb_from_hex(self, hexval):
hexval = hexval.upper()
mapping = {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "A": 10, "B": 11, "C": 12, "D": 13, "E": 14, "F": 15}
red = mapping[hexval[1]] * 16 + mapping[hexval[2]]
green = mapping[hexval[3]] * 16 + mapping[hexval[4]]
blue = mapping[hexval[5]] * 16 + mapping[hexval[6]]
return RGBColor(red, green, blue)
def generate_textbox(self, left=0, top=0, width=12, height=1, word_wrap=True):
left = Inches(left)
top = Inches(top)
width = Inches(width)
height = Inches(height)
textbox = self.slide.shapes.add_textbox(left, top, width, height)
textframe = textbox.text_frame
textframe.word_wrap = word_wrap
return textframe
def add_line(self, textframe, text, fontsize=12, bold=False, color=None, italic=False, space_after=0.5, space_before=0, append=True):
p = textframe.add_paragraph() if append else textframe.paragraphs[0]
p.space_after = Pt(space_after)
p.space_before = Pt(space_before)
self.add_run(p, text, fontsize=fontsize, bold=bold, color=color, italic=italic)
return p
def add_run(self, paragraph, text, fontsize=12, bold=False, color=None, italic=False):
run = paragraph.add_run()
run.font.size = Pt(fontsize)
run.font.name = 'Calibri'
run.font.bold = bold
run.font.italic = italic
run.font.color.rgb = color or RGBColor(0, 0, 0)
run.text = text
return run
def get_thumbnail_from_encoding(self, encoding):
filepath = self.get_write_to_path(ext="png")
write_base64_to_file(encoding, filepath)
return filepath
def add_picture(self, encoding, left=0, top=0, width=2, height=2):
filepath = self.get_write_to_path(ext="png")
write_base64_to_file(encoding, filepath)
return self.slide.shapes.add_picture(filepath, Inches(left), Inches(top), width=Inches(width), height=Inches(height))
def add_shape(self, shape=MSO_SHAPE.RECTANGLE, left=0, top=0, width=1, height=1, color=None):
shape = self.slide.shapes.add_shape(MSO_SHAPE.RECTANGLE, Inches(left), Inches(top), Inches(width), Inches(height))
shape.fill.solid()
shape.fill.fore_color.rgb = color or RGBColor(0, 0, 0)
shape.line.color.rgb = color or RGBColor(0, 0, 0)
shape.shadow.inherit = False
return shape
class CSVMixin(object):
def write_csv(self, filepath, rows, header=None):
with open(filepath, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
if header:
writer.writerow(header)
for row in rows:
writer.writerow(row)
return filepath
class ExportWriter(object):
tempfiles = None
ext = None
messages = {
content_kinds.TOPIC: _("Topic"),
content_kinds.VIDEO: _("Video"),
content_kinds.AUDIO: _("Audio"),
content_kinds.EXERCISE: _("Exercise"),
content_kinds.DOCUMENT: _("Document"),
content_kinds.HTML5: _("Html App"),
content_kinds.TOPIC + "_plural": _("Topics"),
content_kinds.VIDEO + "_plural": _("Videos"),
content_kinds.AUDIO + "_plural": _("Audios"),
content_kinds.EXERCISE + "_plural": _("Exercises"),
content_kinds.DOCUMENT + "_plural": _("Documents"),
content_kinds.HTML5 + "_plural": _("Html Apps"),
"resource": _("Total Resource"),
"resource_plural": _("Total Resources")
}
def __init__(self, *args, **kwargs):
self.tempfiles = []
def pluralize_constant(self, count, constant, sep=' '):
return ngettext(
'%(count)d%(sep)s%(singular)s',
'%(count)d%(sep)s%(plural)s',
count
) % {
'count': count,
'singular': self.messages.get(constant),
'plural': self.messages.get(constant + "_plural"),
'sep': sep
}
def get_write_to_path(self, ext=None):
ext = ext or self.ext
tempf = tempfile.NamedTemporaryFile(suffix=".{}".format(ext), delete=False)
self.tempfiles.append(tempf.name)
return tempf.name
def write(self, *args, **kwargs):
raise NotImplementedError("Must implement a write method for this class")
def delete_tempfiles(self):
for tempf in self.tempfiles:
os.unlink(tempf)
self.tempfiles = []
class ChannelDetailsWriter(ExportWriter):
color_selection = [AUDIO_COLOR, DOCUMENT_COLOR, EXERCISE_COLOR, HTML_COLOR, VIDEO_COLOR]
condensed_tag_limit = 10
size_divisor = 100000000
scale_text = [_("Very Small")] * 2 + [_("Small")] * 2 + [_("Average")] * 3 + [_("Large")] * 2 + [_("Very Large")] * 2
tagcloud_width = 600
tagcloud_height = None
def __init__(self, channel_ids, site=None, condensed=False, filename=None):
super(ChannelDetailsWriter, self).__init__()
self.channels = Channel.objects.filter(pk__in=channel_ids) # Implementing as a list so we can easily make this apply to bundles
self.filename = filename
if self.channels.count() == 1 and not filename:
self.filename = self.channels[0].pk
elif not filename:
raise ValueError("Must specify a filename if channel count is greater than 1")
self.site = site or Site.objects.get(id=1)
self.condensed = condensed
def write(self, *args, **kwargs):
try:
filepath = self.get_write_to_path()
self._write_details(filepath)
saved_filename = "{}.{}".format(self.filename, self.ext)
save_to_path = os.path.sep.join([settings.EXPORT_ROOT, saved_filename])
# Write file to default storage
with open(filepath, 'rb') as fobj:
default_storage.save(save_to_path, fobj)
return save_to_path
finally:
self.delete_tempfiles()
def _write_details(self, *args, **kwargs):
raise NotImplementedError("Must implement a write_export_file method for ChannelDetailsWriter subclasses")
def get_channel_data(self, channel):
data = channel.main_tree.get_details()
primarytoken = channel.secret_tokens.filter(is_primary=True).first()
data.update({
"channel": channel,
"site": 'https://' + self.site.domain,
"thumbnail": generate_thumbnail_from_channel(channel, dimension=300) or self.get_default_thumbnail_encoding(),
"tokens": [str(t) for t in channel.secret_tokens.exclude(token=channel.pk).filter(is_primary=True)],
"primarytoken": primarytoken and str(primarytoken),
"storage": self.get_storage_bar(data['resource_size']),
"size": self.get_size_bar(data['resource_count']),
"piechart": self.get_pie_chart(data['kind_count'], small_layout=self.condensed),
"tagcloud": data['tags'] and self.get_tagcloud(data['tags'], tag_limit=self.condensed and self.condensed_tag_limit),
})
return data
def get_default_thumbnail_encoding(self):
try:
filepath = os.path.join(settings.STATIC_ROOT, 'img', 'kolibri_placeholder.png')
return encode_file_to_base64(filepath, "data:image/png;base64,")
except IOError:
logging.warning("Could not find {}".format(filepath))
def get_storage_bar(self, size):
try:
size_index = int(max(1, min(math.ceil(math.log(size/self.size_divisor, 2)), 10)))
except ValueError:
size_index = 1
return {
"filled": range(size_index),
"text": self.scale_text[size_index],
"storage": "{} {}".format(*format_size(size)),
}
def get_size_bar(self, count):
try:
size_index = int(max(1, min(math.floor(math.log(count, 2.8)), 10)))
except ValueError:
size_index = 1
return {
"filled": size_index,
"scale": range(len(self.scale_text)),
"text": self.scale_text[size_index]
}
def get_pie_chart(self, counts, small_layout=False):
# Put kind counts in a usable format
kinds = list(ContentKind.objects.exclude(kind=content_kinds.TOPIC)
.order_by('kind')
.values_list('kind', flat=True))
kind_vals = {k: next((c['count'] for c in counts if c['kind_id'] == k), 0) for k in kinds}
kind_vals = OrderedDict(sorted(kind_vals.items()))
sizes = [v for k, v in kind_vals.items()]
total = max(sum(sizes), 1)
labels = [{
"text": ' {text} \n{p:.1f}%'.format(
text=self.pluralize_constant(v, k),
p=float(v)/total * 100.0
),
"count": v
} for k, v in kind_vals.items()]
# Create pie chart
fig, ax = plt.subplots(subplot_kw=dict(aspect="equal"))
wedgeprops = {"edgecolor": "white", 'linewidth': 1, 'linestyle': 'solid', 'antialiased': True}
wedges, texts = ax.pie(sizes, colors=self.color_selection, wedgeprops=wedgeprops)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=0.72)
kw = dict(xycoords='data', textcoords='data', arrowprops=dict(arrowstyle="-"),
bbox=bbox_props, zorder=0, va="center")
# Add popout labels for the larger layout
if not small_layout:
for i, p in enumerate(wedges):
if not labels[i]['count']:
continue
ang = (p.theta2 - p.theta1)/2. + p.theta1
y = np.sin(np.deg2rad(ang))
x = np.cos(np.deg2rad(ang))
connectionstyle = "angle,angleA=0,angleB={}".format(ang)
kw["arrowprops"].update({"connectionstyle": connectionstyle, "facecolor": "gray"})
ax.annotate(labels[i]['text'], xy=(x, y), xytext=(1.35*np.sign(x), 1.4*y),
ha="center", fontsize=10, **kw)
# Add legend for the smaller layout
else:
plt.legend(
loc='center left',
labels=[l['text'].split('\n')[0] for l in labels],
prop={'size': 20},
bbox_to_anchor=(0.7, 0.5),
bbox_transform=plt.gcf().transFigure
)
# Set up size variables for center circle
center_text_size = 25 if small_layout else 20 # Renders smaller, so text needs to be bigger
center_text_ratio = 0.75 if small_layout else 0.6
# Add center circle
circle = plt.Circle((0, 0), center_text_ratio, fc='white')
centertext = self.pluralize_constant(sum(sizes), "resource", sep='\n').split('\n')
plt.annotate(centertext[0], xy=(0, 0.1), fontsize=center_text_size, ha="center")
plt.annotate(centertext[1], xy=(0, -0.15), fontsize=center_text_size - 5, ha="center")
fig = plt.gcf()
fig.gca().add_artist(circle)
plt.tight_layout()
# Write chart to image and get encoding
filepath = self.get_write_to_path(ext="png")
plt.savefig(filepath, bbox_inches='tight')
plt.clf()
plt.close()
return encode_file_to_base64(filepath, "data:image/png;base64,")
def get_tagcloud(self, tags, tag_limit=None):
tag_limit = tag_limit or len(tags)
tags = sorted(tags, key=lambda kv: -kv['count'])[:tag_limit] # Get top X tags
tag_dict = {t['tag_name']: t['count'] for t in tags}
# Generate a word cloud image
wordcloud = WordCloud(
background_color='white',
min_font_size=10,
max_font_size=60,
width=self.tagcloud_width,
height=self.tagcloud_height or 30 * len(tags) / 2 + 10,
font_path=os.path.sep.join([settings.STATIC_ROOT, 'fonts', 'OpenSans-Regular.ttf'])
).generate_from_frequencies(tag_dict)
tag_counts = [t['count'] for t in tags]
step = (float(max(tag_counts))) / len(self.color_selection)
thresholds = list(reversed([int(round(i * step)) for i in range(len(self.color_selection))]))
def get_color(word, font_size, position, orientation, random_state=None, **kwargs):
index = next((i for i, t in enumerate(thresholds) if tag_dict[word] >= t), 0)
return self.color_selection[index]
wordcloud.recolor(color_func=get_color)
image = wordcloud.to_image()
filepath = self.get_write_to_path(ext="png")
image.save(filepath)
return encode_file_to_base64(filepath, "data:image/png;base64,")
class ChannelDetailsPDFWriter(ChannelDetailsWriter, PDFMixin):
ext = "pdf"
def __init__(self, channel_ids, condensed=False, **kwargs):
super(ChannelDetailsPDFWriter, self).__init__(channel_ids, condensed=condensed, **kwargs)
self.filename = "{} (condensed)".format(self.filename) if condensed else self.filename
self.template = "export/channel_detail_pdf_condensed.html" if condensed else "export/channel_detail_pdf.html"
def _write_details(self, filepath):
if self.channels.count() == 1:
footer_text = _("Page %(page)s of %(pagecount)s - %(channel)s can be found on Kolibri Studio, a product of Learning Equality") \
% {"page": "[page]", "pagecount": "[topage]", "channel": self.channels[0].name[:40]}
else:
footer_text = _("Page %(page)s of %(pagecount)s - These channels can be found on Kolibri Studio, a product of Learning Equality") \
% {"page": "[page]", "pagecount": "[topage]"}
data = {
"channels": [self.get_channel_data(channel) for channel in self.channels],
"colors": {
"audio": AUDIO_COLOR,
"document": DOCUMENT_COLOR,
"exercise": EXERCISE_COLOR,
"html": HTML_COLOR,
"video": VIDEO_COLOR,
}
}
try:
self.write_pdf(self.template, data, filepath, extra_options={"footer-center": footer_text, "footer-font-size": "9"})
except IOError as e:
logging.error("Unable to generate PDF, attempting without footer: {}".format(str(e)))
self.write_pdf(self.template, data, filepath)
class ChannelDetailsPPTWriter(ChannelDetailsWriter, PPTMixin):
ext = "pptx"
tagcloud_width = 430
tagcloud_height = 210
condensed_tag_limit = 20
gray = RGBColor(170, 170, 170)
def __init__(self, channel_ids, **kwargs):
super(ChannelDetailsPPTWriter, self).__init__(channel_ids, condensed=True, **kwargs)
def _write_details(self, filepath):
self.ppt = Presentation()
for channel in self.channels:
self.write_slide(channel)
# Save the file
self.ppt.save(filepath)
def write_slide(self, channel):
self.get_next_slide()
data = self.get_channel_data(channel)
next_line = 0.1 # Keeps track of last line (useful for setting the top location of shapes)
# Add thumbnail
padding = 0.2
thumbnail_width = 1.1
if data['thumbnail']:
thumbnail = self.add_picture(data['thumbnail'], padding, padding, thumbnail_width, thumbnail_width)
thumbnail.line.color.rgb = self.gray
thumbnail.line.width = Inches(0.01)
# Add title/description
title_left = thumbnail_width + padding * 2
title_height = 0.5
title_tf = self.generate_textbox(title_left, next_line, self.width - title_left, title_height)
title_tf.auto_size = MSO_AUTO_SIZE.TEXT_TO_FIT_SHAPE
self.add_line(title_tf, channel.name, fontsize=24, bold=True, append=False)
next_line += title_height
# Variables for section under title
includes_width = 2
size_height = 0.5
description_height = 1.25
size_width = self.width - title_left - includes_width - padding * 2
# Add language information
icon_width = 0.2
language_left = size_width + title_left + padding
language_icon_path = os.path.join(settings.STATIC_ROOT, 'img', 'export', 'language.png')
encoding = encode_file_to_base64(language_icon_path, 'data:image/png;base64,')
self.add_picture(encoding, language_left, next_line + 0.04, icon_width, icon_width)
includes_tf = self.generate_textbox(language_left + icon_width - 0.08, next_line, includes_width, size_height + description_height)
language = channel.language.native_name if channel.language else _("No language set")
self.add_line(includes_tf, " {}".format(language), append=False, bold=True)
if data['accessible_languages']:
self.add_line(includes_tf, _(" * Subtitles included"), fontsize=10, space_before=2)
# Add For Educators: Coach Content
if data['includes'].get('coach_content'):
coach_content = self.add_line(includes_tf, "✔", bold=True, color=self.get_rgb_from_hex(EXERCISE_COLOR), space_before=4)
self.add_run(coach_content, _(" Coach Content"))
# Add For Educators: Assessments
if data['includes'].get('exercises'):
assessments = self.add_line(includes_tf, "✔", bold=True, color=self.get_rgb_from_hex(EXERCISE_COLOR), space_before=4)
self.add_run(assessments, _(" Assessments"))
# Add size information
size_tf = self.generate_textbox(title_left, next_line, size_width, size_height)
size_bar = self.add_line(size_tf, "", append=False)
for i in data['size']['scale']:
self.add_run(size_bar, "▮", color=self.get_rgb_from_hex(EXERCISE_COLOR) if i < data['size']['filled'] else self.gray, fontsize=14)
self.add_line(size_tf, _("Channel size: %(size)s") % {"size": data['size']['text'].lower()}, fontsize=8, italic=True, color=self.gray)
next_line += size_height
# Add description
description_tf = self.generate_textbox(title_left, next_line, size_width, description_height)
self.add_line(description_tf, channel.description, color=self.gray, append=False)
description_tf.fit_text()
next_line += description_height + 0.1
# Add separator with headers
separator_height = 0.3
self.add_shape(left=0, top=next_line, width=self.width/2, height=separator_height, color=self.get_rgb_from_hex(EXERCISE_COLOR))
resource_header = self.generate_textbox(padding, next_line, self.width / 2 - padding, separator_height)
self.add_line(resource_header, _("Resource Breakdown"), bold=True, color=self.get_rgb_from_hex("#FFFFFF"), append=False)
self.add_shape(left=self.width/2, top=next_line, width=self.width/2, height=separator_height, color=self.get_rgb_from_hex("#595959"))
tag_header = self.generate_textbox(padding + self.width / 2 - padding, next_line, self.width / 2 - padding, separator_height)
self.add_line(tag_header, _("Most Common Tags"), bold=True, color=self.get_rgb_from_hex("#FFFFFF"), append=False)
next_line += separator_height + 0.05
# Add piechart
chart_height = 2.3
if data['resource_count']:
self.add_picture(data['piechart'], 0, next_line, self.width / 2 - 1, height=chart_height)
else:
empty_tf = self.generate_textbox(0, next_line, self.width / 2, chart_height)
empty_line = self.add_line(empty_tf, _("No Resources Found"), color=self.gray, fontsize=14, italic=True)
empty_line.alignment = PP_ALIGN.CENTER
# Add tagcloud
if data['tags']:
self.add_picture(data['tagcloud'], self.width/2 + padding, next_line + 0.1, self.width / 2 - 1, chart_height - padding * 2)
else:
empty_tf = self.generate_textbox(self.width / 2, next_line, self.width / 2, chart_height)
empty_line = self.add_line(empty_tf, _("No Tags Found"), color=self.gray, fontsize=14, italic=True)
empty_line.alignment = PP_ALIGN.CENTER
next_line += chart_height + 0.01
# Add logo
logo_width = 0.9
logo_height = 0.25
logo_left = Inches(self.width / 2 - logo_width / 2)
try:
logo_url = os.path.join(settings.STATIC_ROOT, 'img', 'le_login.png')
self.slide.shapes.add_picture(logo_url, logo_left, Inches(next_line), width=Inches(logo_width), height=Inches(logo_height))
except IOError:
logging.warning("Unable to add LE logo")
next_line += logo_height
# Add disclaimer
disclaimer_tf = self.generate_textbox(0, next_line, self.width, 0.2)
disclaimer_line = self.add_line(disclaimer_tf, _("This slide was automatically generated by Kolibri Studio, a product of Learning Equality"),
fontsize=7, color=self.gray, append=False)
disclaimer_line.alignment = PP_ALIGN.CENTER
class ChannelDetailsCSVWriter(ChannelDetailsWriter, CSVMixin):
ext = "csv"
def _write_details(self, filepath):
header = [_("Name"), _("Description"), _("Language"), _("Token"), _("Size"), _("Storage"), _("Resources"),
_("Languages"), _("Subtitles"), _("Coach Content?"), _("Assessments?"), _("Tags"), _("Authors"),
_("Providers"), _("Aggregators"), _("Licenses"), _("Copyright Holders")]
rows = []
for channel in self.channels:
data = self.get_channel_data(channel)
language = channel.language.native_name if channel.language else _("No language set")
token = data['primarytoken'] if data['primarytoken'] else _("Publish channel to get token")
size = "{} - {}".format(self.pluralize_constant(data['resource_count'], "resource"), data['size']['text'])
storage = "{} - {}".format(data['storage']['storage'], data['storage']['text'])
resources = " | ".join([self.pluralize_constant(k['count'], k['kind_id']) for k in data['kind_count']])
languages = " | ".join(data['languages'])
subtitles = " | ".join(data['accessible_languages'])
coach_content = _("Yes") if data['includes']['coach_content'] else _("No")
assessments = _("Yes") if data['includes']['exercises'] else _("No")
tags = " | ".join([t['tag_name'] for t in data['tags']])
authors = " | ".join(data['authors'])
providers = " | ".join(data['providers'])
aggregators = " | ".join(data['aggregators'])
licenses = " | ".join(data['licenses']).encode('utf-8')
copyright_holders = " | ".join(data['copyright_holders'])
rows.append([channel.name, channel.description, language, token, size, storage, resources,
languages, subtitles, coach_content, assessments, tags, authors, providers,
aggregators, licenses, copyright_holders])
return self.write_csv(filepath, rows, header=header)
|
[
"logging.getLogger",
"contentcuration.utils.format.format_size",
"pdfkit.from_string",
"sys.platform.startswith",
"pptx.Presentation",
"math.log",
"matplotlib.pyplot.annotate",
"os.path.sep.join",
"matplotlib.pyplot.switch_backend",
"pressurecooker.encodings.encode_file_to_base64",
"pptx.dml.color.RGBColor",
"pptx.text.fonts.FontFiles._old_installed_fonts",
"django.core.files.storage.default_storage.save",
"contentcuration.models.ContentKind.objects.exclude",
"contentcuration.utils.files.generate_thumbnail_from_channel",
"matplotlib.get_backend",
"matplotlib.pyplot.close",
"os.unlink",
"matplotlib.pyplot.Circle",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"matplotlib.pyplot.gcf",
"csv.writer",
"numpy.deg2rad",
"numpy.sign",
"django.utils.translation.ugettext",
"pptx.util.Inches",
"pressurecooker.encodings.write_base64_to_file",
"logging.basicConfig",
"matplotlib.pyplot.clf",
"os.path.join",
"contentcuration.models.Channel.objects.filter",
"django.contrib.sites.models.Site.objects.get",
"matplotlib.pyplot.tight_layout",
"django.utils.translation.ngettext",
"django.template.loader.get_template",
"pptx.util.Pt"
] |
[((359, 392), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (382, 392), False, 'import sys\n'), ((1653, 1678), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""agg"""'], {}), "('agg')\n", (1671, 1678), True, 'import matplotlib.pyplot as plt\n'), ((1927, 1950), 'logging.basicConfig', 'logmodule.basicConfig', ([], {}), '()\n', (1948, 1950), True, 'import logging as logmodule\n'), ((1961, 1990), 'logging.getLogger', 'logmodule.getLogger', (['__name__'], {}), '(__name__)\n', (1980, 1990), True, 'import logging as logmodule\n'), ((18342, 18365), 'pptx.dml.color.RGBColor', 'RGBColor', (['(170)', '(170)', '(170)'], {}), '(170, 170, 170)\n', (18350, 18365), False, 'from pptx.dml.color import RGBColor\n'), ((477, 497), 'matplotlib.use', 'matplotlib.use', (['"""PS"""'], {}), "('PS')\n", (491, 497), False, 'import matplotlib\n'), ((2220, 2252), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (2243, 2252), False, 'import sys\n'), ((2824, 2846), 'django.template.loader.get_template', 'get_template', (['template'], {}), '(template)\n', (2836, 2846), False, 'from django.template.loader import get_template\n'), ((3236, 3287), 'pdfkit.from_string', 'pdfkit.from_string', (['html', 'filepath'], {'options': 'options'}), '(html, filepath, options=options)\n', (3254, 3287), False, 'import pdfkit\n'), ((3463, 3481), 'pptx.util.Inches', 'Inches', (['self.width'], {}), '(self.width)\n', (3469, 3481), False, 'from pptx.util import Inches\n'), ((3514, 3533), 'pptx.util.Inches', 'Inches', (['self.height'], {}), '(self.height)\n', (3520, 3533), False, 'from pptx.util import Inches\n'), ((4086, 4112), 'pptx.dml.color.RGBColor', 'RGBColor', (['red', 'green', 'blue'], {}), '(red, green, blue)\n', (4094, 4112), False, 'from pptx.dml.color import RGBColor\n'), ((4212, 4224), 'pptx.util.Inches', 'Inches', (['left'], {}), '(left)\n', (4218, 4224), False, 'from pptx.util import Inches\n'), ((4239, 4250), 'pptx.util.Inches', 'Inches', (['top'], {}), '(top)\n', (4245, 4250), False, 'from pptx.util import Inches\n'), ((4267, 4280), 'pptx.util.Inches', 'Inches', (['width'], {}), '(width)\n', (4273, 4280), False, 'from pptx.util import Inches\n'), ((4298, 4312), 'pptx.util.Inches', 'Inches', (['height'], {}), '(height)\n', (4304, 4312), False, 'from pptx.util import Inches\n'), ((4731, 4746), 'pptx.util.Pt', 'Pt', (['space_after'], {}), '(space_after)\n', (4733, 4746), False, 'from pptx.util import Pt\n'), ((4772, 4788), 'pptx.util.Pt', 'Pt', (['space_before'], {}), '(space_before)\n', (4774, 4788), False, 'from pptx.util import Pt\n'), ((5044, 5056), 'pptx.util.Pt', 'Pt', (['fontsize'], {}), '(fontsize)\n', (5046, 5056), False, 'from pptx.util import Pt\n'), ((5367, 5407), 'pressurecooker.encodings.write_base64_to_file', 'write_base64_to_file', (['encoding', 'filepath'], {}), '(encoding, filepath)\n', (5387, 5407), False, 'from pressurecooker.encodings import write_base64_to_file\n'), ((5565, 5605), 'pressurecooker.encodings.write_base64_to_file', 'write_base64_to_file', (['encoding', 'filepath'], {}), '(encoding, filepath)\n', (5585, 5605), False, 'from pressurecooker.encodings import write_base64_to_file\n'), ((6635, 6645), 'django.utils.translation.ugettext', '_', (['"""Topic"""'], {}), "('Topic')\n", (6636, 6645), True, 'from django.utils.translation import ugettext as _\n'), ((6676, 6686), 'django.utils.translation.ugettext', '_', (['"""Video"""'], {}), "('Video')\n", (6677, 6686), True, 'from django.utils.translation import ugettext as _\n'), ((6717, 6727), 'django.utils.translation.ugettext', '_', (['"""Audio"""'], {}), "('Audio')\n", (6718, 6727), True, 'from django.utils.translation import ugettext as _\n'), ((6761, 6774), 'django.utils.translation.ugettext', '_', (['"""Exercise"""'], {}), "('Exercise')\n", (6762, 6774), True, 'from django.utils.translation import ugettext as _\n'), ((6808, 6821), 'django.utils.translation.ugettext', '_', (['"""Document"""'], {}), "('Document')\n", (6809, 6821), True, 'from django.utils.translation import ugettext as _\n'), ((6852, 6865), 'django.utils.translation.ugettext', '_', (['"""Html App"""'], {}), "('Html App')\n", (6853, 6865), True, 'from django.utils.translation import ugettext as _\n'), ((6908, 6919), 'django.utils.translation.ugettext', '_', (['"""Topics"""'], {}), "('Topics')\n", (6909, 6919), True, 'from django.utils.translation import ugettext as _\n'), ((6962, 6973), 'django.utils.translation.ugettext', '_', (['"""Videos"""'], {}), "('Videos')\n", (6963, 6973), True, 'from django.utils.translation import ugettext as _\n'), ((7016, 7027), 'django.utils.translation.ugettext', '_', (['"""Audios"""'], {}), "('Audios')\n", (7017, 7027), True, 'from django.utils.translation import ugettext as _\n'), ((7073, 7087), 'django.utils.translation.ugettext', '_', (['"""Exercises"""'], {}), "('Exercises')\n", (7074, 7087), True, 'from django.utils.translation import ugettext as _\n'), ((7133, 7147), 'django.utils.translation.ugettext', '_', (['"""Documents"""'], {}), "('Documents')\n", (7134, 7147), True, 'from django.utils.translation import ugettext as _\n'), ((7190, 7204), 'django.utils.translation.ugettext', '_', (['"""Html Apps"""'], {}), "('Html Apps')\n", (7191, 7204), True, 'from django.utils.translation import ugettext as _\n'), ((7226, 7245), 'django.utils.translation.ugettext', '_', (['"""Total Resource"""'], {}), "('Total Resource')\n", (7227, 7245), True, 'from django.utils.translation import ugettext as _\n'), ((7274, 7294), 'django.utils.translation.ugettext', '_', (['"""Total Resources"""'], {}), "('Total Resources')\n", (7275, 7294), True, 'from django.utils.translation import ugettext as _\n'), ((8753, 8795), 'contentcuration.models.Channel.objects.filter', 'Channel.objects.filter', ([], {'pk__in': 'channel_ids'}), '(pk__in=channel_ids)\n', (8775, 8795), False, 'from contentcuration.models import Channel\n'), ((14440, 14489), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', 'center_text_ratio'], {'fc': '"""white"""'}), "((0, 0), center_text_ratio, fc='white')\n", (14450, 14489), True, 'import matplotlib.pyplot as plt\n'), ((14589, 14674), 'matplotlib.pyplot.annotate', 'plt.annotate', (['centertext[0]'], {'xy': '(0, 0.1)', 'fontsize': 'center_text_size', 'ha': '"""center"""'}), "(centertext[0], xy=(0, 0.1), fontsize=center_text_size, ha='center'\n )\n", (14601, 14674), True, 'import matplotlib.pyplot as plt\n'), ((14678, 14768), 'matplotlib.pyplot.annotate', 'plt.annotate', (['centertext[1]'], {'xy': '(0, -0.15)', 'fontsize': '(center_text_size - 5)', 'ha': '"""center"""'}), "(centertext[1], xy=(0, -0.15), fontsize=center_text_size - 5,\n ha='center')\n", (14690, 14768), True, 'import matplotlib.pyplot as plt\n'), ((14779, 14788), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14786, 14788), True, 'import matplotlib.pyplot as plt\n'), ((14834, 14852), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14850, 14852), True, 'import matplotlib.pyplot as plt\n'), ((14963, 15005), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filepath'], {'bbox_inches': '"""tight"""'}), "(filepath, bbox_inches='tight')\n", (14974, 15005), True, 'import matplotlib.pyplot as plt\n'), ((15014, 15023), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (15021, 15023), True, 'import matplotlib.pyplot as plt\n'), ((15032, 15043), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15041, 15043), True, 'import matplotlib.pyplot as plt\n'), ((15059, 15116), 'pressurecooker.encodings.encode_file_to_base64', 'encode_file_to_base64', (['filepath', '"""data:image/png;base64,"""'], {}), "(filepath, 'data:image/png;base64,')\n", (15080, 15116), False, 'from pressurecooker.encodings import encode_file_to_base64\n'), ((16408, 16465), 'pressurecooker.encodings.encode_file_to_base64', 'encode_file_to_base64', (['filepath', '"""data:image/png;base64,"""'], {}), "(filepath, 'data:image/png;base64,')\n", (16429, 16465), False, 'from pressurecooker.encodings import encode_file_to_base64\n'), ((18567, 18581), 'pptx.Presentation', 'Presentation', ([], {}), '()\n', (18579, 18581), False, 'from pptx import Presentation\n'), ((19997, 20064), 'os.path.join', 'os.path.join', (['settings.STATIC_ROOT', '"""img"""', '"""export"""', '"""language.png"""'], {}), "(settings.STATIC_ROOT, 'img', 'export', 'language.png')\n", (20009, 20064), False, 'import os\n'), ((20084, 20151), 'pressurecooker.encodings.encode_file_to_base64', 'encode_file_to_base64', (['language_icon_path', '"""data:image/png;base64,"""'], {}), "(language_icon_path, 'data:image/png;base64,')\n", (20105, 20151), False, 'from pressurecooker.encodings import encode_file_to_base64\n'), ((24056, 24095), 'pptx.util.Inches', 'Inches', (['(self.width / 2 - logo_width / 2)'], {}), '(self.width / 2 - logo_width / 2)\n', (24062, 24095), False, 'from pptx.util import Inches\n'), ((2511, 2543), 'pptx.text.fonts.FontFiles._old_installed_fonts', 'FontFiles._old_installed_fonts', ([], {}), '()\n', (2541, 2543), False, 'from pptx.text.fonts import FontFiles\n'), ((5191, 5208), 'pptx.dml.color.RGBColor', 'RGBColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (5199, 5208), False, 'from pptx.dml.color import RGBColor\n'), ((5661, 5673), 'pptx.util.Inches', 'Inches', (['left'], {}), '(left)\n', (5667, 5673), False, 'from pptx.util import Inches\n'), ((5675, 5686), 'pptx.util.Inches', 'Inches', (['top'], {}), '(top)\n', (5681, 5686), False, 'from pptx.util import Inches\n'), ((5896, 5908), 'pptx.util.Inches', 'Inches', (['left'], {}), '(left)\n', (5902, 5908), False, 'from pptx.util import Inches\n'), ((5910, 5921), 'pptx.util.Inches', 'Inches', (['top'], {}), '(top)\n', (5916, 5921), False, 'from pptx.util import Inches\n'), ((5923, 5936), 'pptx.util.Inches', 'Inches', (['width'], {}), '(width)\n', (5929, 5936), False, 'from pptx.util import Inches\n'), ((5938, 5952), 'pptx.util.Inches', 'Inches', (['height'], {}), '(height)\n', (5944, 5952), False, 'from pptx.util import Inches\n'), ((6026, 6043), 'pptx.dml.color.RGBColor', 'RGBColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (6034, 6043), False, 'from pptx.dml.color import RGBColor\n'), ((6084, 6101), 'pptx.dml.color.RGBColor', 'RGBColor', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (6092, 6101), False, 'from pptx.dml.color import RGBColor\n'), ((6307, 6368), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n", (6317, 6368), False, 'import csv\n'), ((7447, 7524), 'django.utils.translation.ngettext', 'ngettext', (['"""%(count)d%(sep)s%(singular)s"""', '"""%(count)d%(sep)s%(plural)s"""', 'count'], {}), "('%(count)d%(sep)s%(singular)s', '%(count)d%(sep)s%(plural)s', count)\n", (7455, 7524), False, 'from django.utils.translation import ngettext\n'), ((8181, 8197), 'os.unlink', 'os.unlink', (['tempf'], {}), '(tempf)\n', (8190, 8197), False, 'import os\n'), ((9149, 9171), 'django.contrib.sites.models.Site.objects.get', 'Site.objects.get', ([], {'id': '(1)'}), '(id=1)\n', (9165, 9171), False, 'from django.contrib.sites.models import Site\n'), ((9446, 9502), 'os.path.sep.join', 'os.path.sep.join', (['[settings.EXPORT_ROOT, saved_filename]'], {}), '([settings.EXPORT_ROOT, saved_filename])\n', (9462, 9502), False, 'import os\n'), ((10940, 11008), 'os.path.join', 'os.path.join', (['settings.STATIC_ROOT', '"""img"""', '"""kolibri_placeholder.png"""'], {}), "(settings.STATIC_ROOT, 'img', 'kolibri_placeholder.png')\n", (10952, 11008), False, 'import os\n'), ((11028, 11085), 'pressurecooker.encodings.encode_file_to_base64', 'encode_file_to_base64', (['filepath', '"""data:image/png;base64,"""'], {}), "(filepath, 'data:image/png;base64,')\n", (11049, 11085), False, 'from pressurecooker.encodings import encode_file_to_base64\n'), ((19232, 19244), 'pptx.util.Inches', 'Inches', (['(0.01)'], {}), '(0.01)\n', (19238, 19244), False, 'from pptx.util import Inches\n'), ((20460, 20480), 'django.utils.translation.ugettext', '_', (['"""No language set"""'], {}), "('No language set')\n", (20461, 20480), True, 'from django.utils.translation import ugettext as _\n'), ((22464, 22487), 'django.utils.translation.ugettext', '_', (['"""Resource Breakdown"""'], {}), "('Resource Breakdown')\n", (22465, 22487), True, 'from django.utils.translation import ugettext as _\n'), ((22865, 22886), 'django.utils.translation.ugettext', '_', (['"""Most Common Tags"""'], {}), "('Most Common Tags')\n", (22866, 22886), True, 'from django.utils.translation import ugettext as _\n'), ((24132, 24189), 'os.path.join', 'os.path.join', (['settings.STATIC_ROOT', '"""img"""', '"""le_login.png"""'], {}), "(settings.STATIC_ROOT, 'img', 'le_login.png')\n", (24144, 24189), False, 'import os\n'), ((24595, 24693), 'django.utils.translation.ugettext', '_', (['"""This slide was automatically generated by Kolibri Studio, a product of Learning Equality"""'], {}), "('This slide was automatically generated by Kolibri Studio, a product of Learning Equality'\n )\n", (24596, 24693), True, 'from django.utils.translation import ugettext as _\n'), ((24965, 24974), 'django.utils.translation.ugettext', '_', (['"""Name"""'], {}), "('Name')\n", (24966, 24974), True, 'from django.utils.translation import ugettext as _\n'), ((24976, 24992), 'django.utils.translation.ugettext', '_', (['"""Description"""'], {}), "('Description')\n", (24977, 24992), True, 'from django.utils.translation import ugettext as _\n'), ((24994, 25007), 'django.utils.translation.ugettext', '_', (['"""Language"""'], {}), "('Language')\n", (24995, 25007), True, 'from django.utils.translation import ugettext as _\n'), ((25009, 25019), 'django.utils.translation.ugettext', '_', (['"""Token"""'], {}), "('Token')\n", (25010, 25019), True, 'from django.utils.translation import ugettext as _\n'), ((25021, 25030), 'django.utils.translation.ugettext', '_', (['"""Size"""'], {}), "('Size')\n", (25022, 25030), True, 'from django.utils.translation import ugettext as _\n'), ((25032, 25044), 'django.utils.translation.ugettext', '_', (['"""Storage"""'], {}), "('Storage')\n", (25033, 25044), True, 'from django.utils.translation import ugettext as _\n'), ((25046, 25060), 'django.utils.translation.ugettext', '_', (['"""Resources"""'], {}), "('Resources')\n", (25047, 25060), True, 'from django.utils.translation import ugettext as _\n'), ((25080, 25094), 'django.utils.translation.ugettext', '_', (['"""Languages"""'], {}), "('Languages')\n", (25081, 25094), True, 'from django.utils.translation import ugettext as _\n'), ((25096, 25110), 'django.utils.translation.ugettext', '_', (['"""Subtitles"""'], {}), "('Subtitles')\n", (25097, 25110), True, 'from django.utils.translation import ugettext as _\n'), ((25112, 25131), 'django.utils.translation.ugettext', '_', (['"""Coach Content?"""'], {}), "('Coach Content?')\n", (25113, 25131), True, 'from django.utils.translation import ugettext as _\n'), ((25133, 25150), 'django.utils.translation.ugettext', '_', (['"""Assessments?"""'], {}), "('Assessments?')\n", (25134, 25150), True, 'from django.utils.translation import ugettext as _\n'), ((25152, 25161), 'django.utils.translation.ugettext', '_', (['"""Tags"""'], {}), "('Tags')\n", (25153, 25161), True, 'from django.utils.translation import ugettext as _\n'), ((25163, 25175), 'django.utils.translation.ugettext', '_', (['"""Authors"""'], {}), "('Authors')\n", (25164, 25175), True, 'from django.utils.translation import ugettext as _\n'), ((25195, 25209), 'django.utils.translation.ugettext', '_', (['"""Providers"""'], {}), "('Providers')\n", (25196, 25209), True, 'from django.utils.translation import ugettext as _\n'), ((25211, 25227), 'django.utils.translation.ugettext', '_', (['"""Aggregators"""'], {}), "('Aggregators')\n", (25212, 25227), True, 'from django.utils.translation import ugettext as _\n'), ((25229, 25242), 'django.utils.translation.ugettext', '_', (['"""Licenses"""'], {}), "('Licenses')\n", (25230, 25242), True, 'from django.utils.translation import ugettext as _\n'), ((25244, 25266), 'django.utils.translation.ugettext', '_', (['"""Copyright Holders"""'], {}), "('Copyright Holders')\n", (25245, 25266), True, 'from django.utils.translation import ugettext as _\n'), ((423, 447), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (445, 447), False, 'import matplotlib\n'), ((5694, 5707), 'pptx.util.Inches', 'Inches', (['width'], {}), '(width)\n', (5700, 5707), False, 'from pptx.util import Inches\n'), ((5716, 5730), 'pptx.util.Inches', 'Inches', (['height'], {}), '(height)\n', (5722, 5730), False, 'from pptx.util import Inches\n'), ((8522, 8537), 'django.utils.translation.ugettext', '_', (['"""Very Large"""'], {}), "('Very Large')\n", (8523, 8537), True, 'from django.utils.translation import ugettext as _\n'), ((9611, 9651), 'django.core.files.storage.default_storage.save', 'default_storage.save', (['save_to_path', 'fobj'], {}), '(save_to_path, fobj)\n', (9631, 9651), False, 'from django.core.files.storage import default_storage\n'), ((17029, 17146), 'django.utils.translation.ugettext', '_', (['"""Page %(page)s of %(pagecount)s - %(channel)s can be found on Kolibri Studio, a product of Learning Equality"""'], {}), "('Page %(page)s of %(pagecount)s - %(channel)s can be found on Kolibri Studio, a product of Learning Equality'\n )\n", (17030, 17146), True, 'from django.utils.translation import ugettext as _\n'), ((17297, 17417), 'django.utils.translation.ugettext', '_', (['"""Page %(page)s of %(pagecount)s - These channels can be found on Kolibri Studio, a product of Learning Equality"""'], {}), "('Page %(page)s of %(pagecount)s - These channels can be found on Kolibri Studio, a product of Learning Equality'\n )\n", (17298, 17417), True, 'from django.utils.translation import ugettext as _\n'), ((20645, 20674), 'django.utils.translation.ugettext', '_', (['""" * Subtitles included"""'], {}), "(' * Subtitles included')\n", (20646, 20674), True, 'from django.utils.translation import ugettext as _\n'), ((20971, 20990), 'django.utils.translation.ugettext', '_', (['""" Coach Content"""'], {}), "(' Coach Content')\n", (20972, 20990), True, 'from django.utils.translation import ugettext as _\n'), ((21248, 21265), 'django.utils.translation.ugettext', '_', (['""" Assessments"""'], {}), "(' Assessments')\n", (21249, 21265), True, 'from django.utils.translation import ugettext as _\n'), ((21663, 21690), 'django.utils.translation.ugettext', '_', (['"""Channel size: %(size)s"""'], {}), "('Channel size: %(size)s')\n", (21664, 21690), True, 'from django.utils.translation import ugettext as _\n'), ((23339, 23362), 'django.utils.translation.ugettext', '_', (['"""No Resources Found"""'], {}), "('No Resources Found')\n", (23340, 23362), True, 'from django.utils.translation import ugettext as _\n'), ((23809, 23827), 'django.utils.translation.ugettext', '_', (['"""No Tags Found"""'], {}), "('No Tags Found')\n", (23810, 23827), True, 'from django.utils.translation import ugettext as _\n'), ((24253, 24270), 'pptx.util.Inches', 'Inches', (['next_line'], {}), '(next_line)\n', (24259, 24270), False, 'from pptx.util import Inches\n'), ((25452, 25472), 'django.utils.translation.ugettext', '_', (['"""No language set"""'], {}), "('No language set')\n", (25453, 25472), True, 'from django.utils.translation import ugettext as _\n'), ((25543, 25576), 'django.utils.translation.ugettext', '_', (['"""Publish channel to get token"""'], {}), "('Publish channel to get token')\n", (25544, 25576), True, 'from django.utils.translation import ugettext as _\n'), ((26051, 26059), 'django.utils.translation.ugettext', '_', (['"""Yes"""'], {}), "('Yes')\n", (26052, 26059), True, 'from django.utils.translation import ugettext as _\n'), ((26102, 26109), 'django.utils.translation.ugettext', '_', (['"""No"""'], {}), "('No')\n", (26103, 26109), True, 'from django.utils.translation import ugettext as _\n'), ((26136, 26144), 'django.utils.translation.ugettext', '_', (['"""Yes"""'], {}), "('Yes')\n", (26137, 26144), True, 'from django.utils.translation import ugettext as _\n'), ((26183, 26190), 'django.utils.translation.ugettext', '_', (['"""No"""'], {}), "('No')\n", (26184, 26190), True, 'from django.utils.translation import ugettext as _\n'), ((8503, 8513), 'django.utils.translation.ugettext', '_', (['"""Large"""'], {}), "('Large')\n", (8504, 8513), True, 'from django.utils.translation import ugettext as _\n'), ((10198, 10253), 'contentcuration.utils.files.generate_thumbnail_from_channel', 'generate_thumbnail_from_channel', (['channel'], {'dimension': '(300)'}), '(channel, dimension=300)\n', (10229, 10253), False, 'from contentcuration.utils.files import generate_thumbnail_from_channel\n'), ((11521, 11538), 'contentcuration.utils.format.format_size', 'format_size', (['size'], {}), '(size)\n', (11532, 11538), False, 'from contentcuration.utils.format import format_size\n'), ((13470, 13485), 'numpy.deg2rad', 'np.deg2rad', (['ang'], {}), '(ang)\n', (13480, 13485), True, 'import numpy as np\n'), ((13514, 13529), 'numpy.deg2rad', 'np.deg2rad', (['ang'], {}), '(ang)\n', (13524, 13529), True, 'import numpy as np\n'), ((24278, 24296), 'pptx.util.Inches', 'Inches', (['logo_width'], {}), '(logo_width)\n', (24284, 24296), False, 'from pptx.util import Inches\n'), ((24305, 24324), 'pptx.util.Inches', 'Inches', (['logo_height'], {}), '(logo_height)\n', (24311, 24324), False, 'from pptx.util import Inches\n'), ((8482, 8494), 'django.utils.translation.ugettext', '_', (['"""Average"""'], {}), "('Average')\n", (8483, 8494), True, 'from django.utils.translation import ugettext as _\n'), ((14148, 14157), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14155, 14157), True, 'import matplotlib.pyplot as plt\n'), ((15656, 15729), 'os.path.sep.join', 'os.path.sep.join', (["[settings.STATIC_ROOT, 'fonts', 'OpenSans-Regular.ttf']"], {}), "([settings.STATIC_ROOT, 'fonts', 'OpenSans-Regular.ttf'])\n", (15672, 15729), False, 'import os\n'), ((8439, 8454), 'django.utils.translation.ugettext', '_', (['"""Very Small"""'], {}), "('Very Small')\n", (8440, 8454), True, 'from django.utils.translation import ugettext as _\n'), ((8463, 8473), 'django.utils.translation.ugettext', '_', (['"""Small"""'], {}), "('Small')\n", (8464, 8473), True, 'from django.utils.translation import ugettext as _\n'), ((11277, 11314), 'math.log', 'math.log', (['(size / self.size_divisor)', '(2)'], {}), '(size / self.size_divisor, 2)\n', (11285, 11314), False, 'import math\n'), ((11651, 11671), 'math.log', 'math.log', (['count', '(2.8)'], {}), '(count, 2.8)\n', (11659, 11671), False, 'import math\n'), ((12018, 12071), 'contentcuration.models.ContentKind.objects.exclude', 'ContentKind.objects.exclude', ([], {'kind': 'content_kinds.TOPIC'}), '(kind=content_kinds.TOPIC)\n', (12045, 12071), False, 'from contentcuration.models import ContentKind\n'), ((13774, 13784), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (13781, 13784), True, 'import numpy as np\n')]
|
import os
import numpy
from pydub import AudioSegment
from scipy.fftpack import fft
class AudioSignal(object):
def __init__(self, sample_rate, signal=None, filename=None):
# Set sample rate
self._sample_rate = sample_rate
if signal is None:
# Get file name and file extension
file, file_extension = os.path.splitext(filename)
# Check if file extension if audio format
if file_extension in ['.mp3', '.wav']:
# Read audio file
self._signal = self.read_audio_file(filename)
# Check if file extension if video format
elif file_extension in ['.mp4', '.mkv', 'avi']:
# Extract audio from video
new_filename = self.extract_audio_from_video(filename)
# read audio file from extracted audio file
self._signal = self.read_audio_file(new_filename)
# Case file extension is not supported
else:
print("Error: file not found or file extension not supported.")
elif filename is None:
# Cast signal to array
self._signal = signal
else:
print("Error : argument missing in AudioSignal() constructor.")
'''
Function to extract audio from a video
'''
def extract_audio_from_video(self, filename):
# Get video file name and extension
file, file_extension = os.path.splitext(filename)
# Extract audio (.wav) from video
os.system('ffmpeg -i ' + file + file_extension + ' ' + '-ar ' + str(self._sample_rate) + ' ' + file + '.wav')
print("Sucessfully converted {} into audio!".format(filename))
# Return audio file name created
return file + '.wav'
'''
Function to read audio file and to return audio samples of a specified WAV file
'''
def read_audio_file(self, filename):
# Get audio signal
audio_file = AudioSegment.from_file(filename)
# Resample audio signal
audio_file = audio_file.set_frame_rate(self._sample_rate)
# Cast to integer
if audio_file.sample_width == 2:
data = numpy.fromstring(audio_file._data, numpy.int16)
elif audio_file.sample_width == 4:
data = numpy.fromstring(audio_file._data, numpy.int32)
# Merge audio channels
audio_signal = []
for chn in list(range(audio_file.channels)):
audio_signal.append(data[chn::audio_file.channels])
audio_signal = numpy.array(audio_signal).T
# Flat signals
if audio_signal.ndim == 2:
if audio_signal.shape[1] == 1:
audio_signal = audio_signal.flatten()
# Convert stereo to mono
audio_signal = self.stereo_to_mono(audio_signal)
# Return sample rate and audio signal
return audio_signal
'''
Function to convert an input signal from stereo to mono
'''
@staticmethod
def stereo_to_mono(audio_signal):
# Check if signal is stereo and convert to mono
if isinstance(audio_signal, int):
return -1
if audio_signal.ndim == 1:
return audio_signal
elif audio_signal.ndim == 2:
if audio_signal.shape[1] == 1:
return audio_signal.flatten()
else:
if audio_signal.shape[1] == 2:
return (audio_signal[:, 1] / 2) + (audio_signal[:, 0] / 2)
else:
return -1
'''
Function to split the input signal into windows of same size
'''
def framing(self, size, step, hamming=False):
# Rescale windows step and size
win_size = int(size * self._sample_rate)
win_step = int(step * self._sample_rate)
# Number of frames
nb_frames = 1 + int((len(self._signal) - win_size) / win_step)
# Build Hamming function
if hamming is True:
ham = numpy.hamming(win_size)
else:
ham = numpy.ones(win_size)
# Split signals (and multiply each windows signals by Hamming functions)
frames = []
for t in range(nb_frames):
sub_signal = AudioSignal(self._sample_rate, signal=self._signal[(t * win_step): (t * win_step + win_size)] * ham)
frames.append(sub_signal)
return frames
'''
Function to compute the magnitude of the Discrete Fourier Transform coefficient
'''
def dft(self, norm=False):
# Commpute the magnitude of the spectrum (and normalize by the number of sample)
if norm is True:
dft = abs(fft(self._signal)) / len(self._signal)
else:
dft = abs(fft(self._signal))
return dft
'''
Function to apply pre-emphasis filter on signal
'''
def pre_emphasis(self, alpha =0.97):
# Emphasized signal
emphasized_signal = numpy.append(self._signal[0], self._signal[1:] - alpha * self._signal[:-1])
return emphasized_signal
|
[
"numpy.ones",
"os.path.splitext",
"numpy.hamming",
"numpy.append",
"numpy.array",
"pydub.AudioSegment.from_file",
"scipy.fftpack.fft",
"numpy.fromstring"
] |
[((1474, 1500), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1490, 1500), False, 'import os\n'), ((1995, 2027), 'pydub.AudioSegment.from_file', 'AudioSegment.from_file', (['filename'], {}), '(filename)\n', (2017, 2027), False, 'from pydub import AudioSegment\n'), ((4962, 5037), 'numpy.append', 'numpy.append', (['self._signal[0]', '(self._signal[1:] - alpha * self._signal[:-1])'], {}), '(self._signal[0], self._signal[1:] - alpha * self._signal[:-1])\n', (4974, 5037), False, 'import numpy\n'), ((357, 383), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (373, 383), False, 'import os\n'), ((2214, 2261), 'numpy.fromstring', 'numpy.fromstring', (['audio_file._data', 'numpy.int16'], {}), '(audio_file._data, numpy.int16)\n', (2230, 2261), False, 'import numpy\n'), ((2570, 2595), 'numpy.array', 'numpy.array', (['audio_signal'], {}), '(audio_signal)\n', (2581, 2595), False, 'import numpy\n'), ((4013, 4036), 'numpy.hamming', 'numpy.hamming', (['win_size'], {}), '(win_size)\n', (4026, 4036), False, 'import numpy\n'), ((4069, 4089), 'numpy.ones', 'numpy.ones', (['win_size'], {}), '(win_size)\n', (4079, 4089), False, 'import numpy\n'), ((2324, 2371), 'numpy.fromstring', 'numpy.fromstring', (['audio_file._data', 'numpy.int32'], {}), '(audio_file._data, numpy.int32)\n', (2340, 2371), False, 'import numpy\n'), ((4757, 4774), 'scipy.fftpack.fft', 'fft', (['self._signal'], {}), '(self._signal)\n', (4760, 4774), False, 'from scipy.fftpack import fft\n'), ((4682, 4699), 'scipy.fftpack.fft', 'fft', (['self._signal'], {}), '(self._signal)\n', (4685, 4699), False, 'from scipy.fftpack import fft\n')]
|
from tensorflow import keras
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
model = keras.models.load_model("Saved models/2layerNet.h5")
x = np.load("data/preprocessedInputs.npy")
y = np.load("data/outputs.npy")
oosx = np.load("data/testx.npy")
oosy = np.load("data/testy.npy")
def evaluate(x, y):
c = 0
tp = 0
tn = 0
fp = 0
fn = 0
for pred in model.predict(x):
yHat = round(float(pred))
gtLabel = int(y[c])
if yHat == gtLabel and yHat == 1:
tp += 1
elif yHat == gtLabel and yHat == 0:
tn += 1
elif yHat == 1 and gtLabel == 0:
fp += 1
else:
fn += 1
c += 1
confMatrix = [[tp, fn], [fp, tn]]
sens = float(tp) / (tp + fn)
spec = float(tn) / (tn + fp)
perc = float(tp) / (tp + fp)
npv = float(tn) / (tn + fn)
acc = float((tp) + tn) / (fn+fp+tn+tp)
f1 = 2 * ((perc * sens) / (perc + sens))
return [[sens, spec, perc, npv, acc, f1], confMatrix]
print("------------Insample------------")
results = evaluate(x, y)
sens, spec, perc, npv, acc, f1 = results[0]
confMatrix = results[1]
print(f"Confusion matrix: {confMatrix}")
print(
f"sensitivity: {sens}\nspecificity: {spec}\nprecision: {perc}\nNegative Predictive Value: {npv}\nAccuracy: {acc}\nF1 Score: {f1}")
print("------------Out of Sample------------")
results2 = evaluate(oosx, oosy)
sens, spec, perc, npv, acc, f1 = results2[0]
confMatrix = results2[1]
print(f"Confusion matrix: {confMatrix}")
print(
f"sensitivity: {sens}\nspecificity: {spec}\nprecision: {perc}\nNegative Predictive Value: {npv}\nAccuracy: {acc}\nF1 Score: {f1}")
|
[
"numpy.load",
"tensorflow.keras.models.load_model"
] |
[((115, 167), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""Saved models/2layerNet.h5"""'], {}), "('Saved models/2layerNet.h5')\n", (138, 167), False, 'from tensorflow import keras\n'), ((173, 211), 'numpy.load', 'np.load', (['"""data/preprocessedInputs.npy"""'], {}), "('data/preprocessedInputs.npy')\n", (180, 211), True, 'import numpy as np\n'), ((217, 244), 'numpy.load', 'np.load', (['"""data/outputs.npy"""'], {}), "('data/outputs.npy')\n", (224, 244), True, 'import numpy as np\n'), ((255, 280), 'numpy.load', 'np.load', (['"""data/testx.npy"""'], {}), "('data/testx.npy')\n", (262, 280), True, 'import numpy as np\n'), ((289, 314), 'numpy.load', 'np.load', (['"""data/testy.npy"""'], {}), "('data/testy.npy')\n", (296, 314), True, 'import numpy as np\n')]
|
import numpy as np
import scipy as sp
from scipy.sparse.linalg import LinearOperator, lgmres, gmres
import tensornetwork as tn
import jax_vumps.numpy_backend.contractions as ct
# import jax_vumps.numpy_backend.mps_linalg as mps_linalg
def LH_linear_operator(A_L, lR):
"""
Return, as a LinearOperator, the LHS of the equation found by
summing the geometric series for
the left environment Hamiltonian.
"""
chi = A_L.shape[1]
Id = np.eye(chi, dtype=A_L.dtype)
def matvec(v):
v = v.reshape((chi, chi))
Th_v = ct.XopL(A_L, X=v)
vR = ct.proj(v, lR)*Id
v = v - Th_v + vR
v = v.flatten()
return v
op = LinearOperator((chi**2, chi**2), matvec=matvec, dtype=A_L.dtype)
return op
def call_solver(op, hI, params, x0, tol):
"""
Code used by both solve_for_RH and solve_for_LH to call the
sparse solver.
"""
if x0 is not None:
x0 = x0.flatten()
if params["solver"] == "gmres":
x, info = gmres(op,
hI.flatten(),
tol=tol,
restart=params["n_krylov"],
maxiter=params["max_restarts"],
x0=x0)
elif params["solver"] == "lgmres":
x, info = lgmres(op,
hI.flatten(),
tol=tol,
maxiter=params["maxiter"],
inner_m=params["inner_m"],
outer_k=params["outer_k"],
x0=x0)
new_hI = x.reshape(hI.shape)
return (new_hI, info)
def outermat(A, B):
chi = A.shape[0]
contract = [A, B]
idxs = [[-2, -1], [-3, -4]]
return tn.ncon(contract, idxs, backend="numpy").reshape((chi**2, chi**2))
def dense_LH_op(A_L, lR):
chi = A_L.shape[1]
eye = np.eye(chi, dtype=A_L.dtype)
term1 = outermat(eye, eye)
term2 = ct.tmdense(A_L).reshape((chi**2, chi**2))
term3 = outermat(eye, lR)
mat = term1-term2+term3
mat = mat.T
return mat
def prepare_for_LH_solve(A_L, H, lR):
hL_bare = ct.compute_hL(A_L, H)
hL_div = ct.proj(hL_bare, lR)*np.eye(hL_bare.shape[0])
hL = hL_bare - hL_div
return hL
def solve_for_LH(A_L, H, lR, params, delta, oldLH=None,
dense=False):
"""
Find the renormalized left environment Hamiltonian using a sparse
solver.
"""
hL = prepare_for_LH_solve(A_L, H, lR)
chi = hL.shape[0]
tol = params["tol_coef"]*delta
if dense:
mat = dense_LH_op(A_L, lR)
op = LH_linear_operator(A_L, lR)
LH = sp.linalg.solve(mat.T, hL.reshape((chi**2)))
LH = LH.reshape((chi, chi))
else:
op = LH_linear_operator(A_L, lR)
LH, info = call_solver(op, hL, params, oldLH, tol)
if info != 0:
print("Warning: Hleft solution failed with code: "+str(info))
return LH
def RH_linear_operator(A_R, rL):
chi = A_R.shape[1]
"""
Return, as a LinearOperator, the LHS of the equation found by
summing the geometric series for
the right environment Hamiltonian.
"""
Id = np.eye(chi, dtype=A_R.dtype)
def matvec(v):
v = v.reshape((chi, chi))
Th_v = ct.XopR(A_R, X=v)
Lv = ct.proj(rL, v)*Id
v = v - Th_v + Lv
v = v.flatten()
return v
op = LinearOperator((chi**2, chi**2), matvec=matvec, dtype=A_R.dtype)
return op
def solve_for_RH(A_R, H, rL, params, delta,
oldRH=None):
"""
Find the renormalized right environment Hamiltonian using a sparse
solver.
"""
hR_bare = ct.compute_hR(A_R, H)
hR_div = ct.proj(rL, hR_bare)*np.eye(hR_bare.shape[0])
hR = hR_bare - hR_div
op = RH_linear_operator(A_R, rL)
tol = params["tol_coef"]*delta
RH, info = call_solver(op, hR, params, oldRH, tol)
if info != 0:
print("Warning: RH solution failed with code: "+str(info))
# RHL = np.abs(ct.proj(rL, RH))
# if RHL > 1E-6:
# print("Warning: large <L|RH> = ", str(RHL))
return RH
|
[
"scipy.sparse.linalg.LinearOperator",
"numpy.eye",
"jax_vumps.numpy_backend.contractions.proj",
"jax_vumps.numpy_backend.contractions.tmdense",
"tensornetwork.ncon",
"jax_vumps.numpy_backend.contractions.compute_hR",
"jax_vumps.numpy_backend.contractions.XopR",
"jax_vumps.numpy_backend.contractions.compute_hL",
"jax_vumps.numpy_backend.contractions.XopL"
] |
[((462, 490), 'numpy.eye', 'np.eye', (['chi'], {'dtype': 'A_L.dtype'}), '(chi, dtype=A_L.dtype)\n', (468, 490), True, 'import numpy as np\n'), ((686, 754), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(chi ** 2, chi ** 2)'], {'matvec': 'matvec', 'dtype': 'A_L.dtype'}), '((chi ** 2, chi ** 2), matvec=matvec, dtype=A_L.dtype)\n', (700, 754), False, 'from scipy.sparse.linalg import LinearOperator, lgmres, gmres\n'), ((1855, 1883), 'numpy.eye', 'np.eye', (['chi'], {'dtype': 'A_L.dtype'}), '(chi, dtype=A_L.dtype)\n', (1861, 1883), True, 'import numpy as np\n'), ((2112, 2133), 'jax_vumps.numpy_backend.contractions.compute_hL', 'ct.compute_hL', (['A_L', 'H'], {}), '(A_L, H)\n', (2125, 2133), True, 'import jax_vumps.numpy_backend.contractions as ct\n'), ((3151, 3179), 'numpy.eye', 'np.eye', (['chi'], {'dtype': 'A_R.dtype'}), '(chi, dtype=A_R.dtype)\n', (3157, 3179), True, 'import numpy as np\n'), ((3374, 3442), 'scipy.sparse.linalg.LinearOperator', 'LinearOperator', (['(chi ** 2, chi ** 2)'], {'matvec': 'matvec', 'dtype': 'A_R.dtype'}), '((chi ** 2, chi ** 2), matvec=matvec, dtype=A_R.dtype)\n', (3388, 3442), False, 'from scipy.sparse.linalg import LinearOperator, lgmres, gmres\n'), ((3642, 3663), 'jax_vumps.numpy_backend.contractions.compute_hR', 'ct.compute_hR', (['A_R', 'H'], {}), '(A_R, H)\n', (3655, 3663), True, 'import jax_vumps.numpy_backend.contractions as ct\n'), ((560, 577), 'jax_vumps.numpy_backend.contractions.XopL', 'ct.XopL', (['A_L'], {'X': 'v'}), '(A_L, X=v)\n', (567, 577), True, 'import jax_vumps.numpy_backend.contractions as ct\n'), ((2147, 2167), 'jax_vumps.numpy_backend.contractions.proj', 'ct.proj', (['hL_bare', 'lR'], {}), '(hL_bare, lR)\n', (2154, 2167), True, 'import jax_vumps.numpy_backend.contractions as ct\n'), ((2168, 2192), 'numpy.eye', 'np.eye', (['hL_bare.shape[0]'], {}), '(hL_bare.shape[0])\n', (2174, 2192), True, 'import numpy as np\n'), ((3249, 3266), 'jax_vumps.numpy_backend.contractions.XopR', 'ct.XopR', (['A_R'], {'X': 'v'}), '(A_R, X=v)\n', (3256, 3266), True, 'import jax_vumps.numpy_backend.contractions as ct\n'), ((3677, 3697), 'jax_vumps.numpy_backend.contractions.proj', 'ct.proj', (['rL', 'hR_bare'], {}), '(rL, hR_bare)\n', (3684, 3697), True, 'import jax_vumps.numpy_backend.contractions as ct\n'), ((3698, 3722), 'numpy.eye', 'np.eye', (['hR_bare.shape[0]'], {}), '(hR_bare.shape[0])\n', (3704, 3722), True, 'import numpy as np\n'), ((591, 605), 'jax_vumps.numpy_backend.contractions.proj', 'ct.proj', (['v', 'lR'], {}), '(v, lR)\n', (598, 605), True, 'import jax_vumps.numpy_backend.contractions as ct\n'), ((1727, 1767), 'tensornetwork.ncon', 'tn.ncon', (['contract', 'idxs'], {'backend': '"""numpy"""'}), "(contract, idxs, backend='numpy')\n", (1734, 1767), True, 'import tensornetwork as tn\n'), ((1927, 1942), 'jax_vumps.numpy_backend.contractions.tmdense', 'ct.tmdense', (['A_L'], {}), '(A_L)\n', (1937, 1942), True, 'import jax_vumps.numpy_backend.contractions as ct\n'), ((3280, 3294), 'jax_vumps.numpy_backend.contractions.proj', 'ct.proj', (['rL', 'v'], {}), '(rL, v)\n', (3287, 3294), True, 'import jax_vumps.numpy_backend.contractions as ct\n')]
|
# -*- coding: utf-8 -*-
"""
File Name: utils
Description :
Author : mick.yi
date: 2019/1/4
"""
import numpy as np
def enqueue(np_array, elem):
"""
入队列,新增元素放到队首,队尾元素丢弃
:param np_array: 原始队列
:param elem: 增加元素
:return:
"""
np_array[1:] = np_array[:-1]
np_array[0] = elem
return np_array
def random_select(ids):
"""
随机选择一个id
:param ids: id列表,(N,)
:return:
"""
idx = np.random.choice(len(ids))
return ids[idx]
# def to_train_label(train_label):
def update_weights(h5_file, h5_dataset, weights, labels):
"""
更新保存在hdf5中的原型权重
:param h5_file: 原型权重的hdf5文件
:param h5_dataset: 原型权重在hdf5中的dataset
:param weights: 待更新的权重,numpy数组 (Batch,Dim)
:param labels: 待更新的权重对应的类别标签
:return:
备注:TypeError: Indexing elements must be in increasing order; idx要排序
TypeError: PointSelection __getitem__ only works with bool arrays; labels[idx]改为list(labels[idx])
"""
# for idx, label in enumerate(labels):
# h5_dataset[label] = weights[idx]
idx = np.argsort(labels)
h5_dataset[list(labels[idx])] = weights[idx]
h5_file.flush()
def get_weights(h5_dataset, labels):
weights = [h5_dataset[label] for label in labels]
return np.asarray(weights)
def update_queue(dominant_queue, candidate_queue, predict, current_labels):
"""
更新支配队列
:param dominant_queue: 支配队列
:param candidate_queue: 候选队列
:param predict: 预测的类别,numpy数组 (Batch,train_num_class)
:param current_labels: 实际的当前类别(Batch,)
:return:
"""
predict_label = np.argmax(predict, axis=-1)
for i in range(len(predict_label)):
d_label_queue = dominant_queue[current_labels[i]]
c_label_queue = candidate_queue[current_labels[i]]
real_predict_label = current_labels[predict_label[i]]
# 预测结果不是正确标签,不在正确标签的支配队列中,但在正确标签的候选队列中
# 更新支配队列
if predict_label[i] != i and \
real_predict_label not in d_label_queue and \
real_predict_label in c_label_queue:
dominant_queue[current_labels[i]] = enqueue(d_label_queue, real_predict_label)
|
[
"numpy.argsort",
"numpy.asarray",
"numpy.argmax"
] |
[((1123, 1141), 'numpy.argsort', 'np.argsort', (['labels'], {}), '(labels)\n', (1133, 1141), True, 'import numpy as np\n'), ((1322, 1341), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (1332, 1341), True, 'import numpy as np\n'), ((1658, 1685), 'numpy.argmax', 'np.argmax', (['predict'], {'axis': '(-1)'}), '(predict, axis=-1)\n', (1667, 1685), True, 'import numpy as np\n')]
|
"""
Module with reading functionalities of color and magnitude data from photometric and
spectral libraries.
"""
import os
import configparser
from typing import Optional, Tuple
import h5py
import numpy as np
from typeguard import typechecked
from species.core import box
from species.read import read_spectrum
from species.util import phot_util
class ReadColorMagnitude:
"""
Class for reading color-magnitude data from the database.
"""
@typechecked
def __init__(self,
library: str,
filters_color: Tuple[str, str],
filter_mag: str) -> None:
"""
Parameters
----------
library : str
Photometric ('vlm-plx' or 'leggett') or spectral ('irtf' or 'spex') library.
filters_color : tuple(str, str)
Filter names for the color. For a photometric library, these have to be present in
the database (typically in the MKO, 2MASS, or WISE system). For a spectral library,
any filter names can be provided as long as they overlap with the wavelength range
of the spectra.
filter_mag : str
Filter name for the absolute magnitudes (see also description of ``filters_color``).
Returns
-------
NoneType
None
"""
self.library = library
self.filters_color = filters_color
self.filter_mag = filter_mag
config_file = os.path.join(os.getcwd(), 'species_config.ini')
config = configparser.ConfigParser()
config.read_file(open(config_file))
self.database = config['species']['database']
with h5py.File(self.database, 'r') as hdf_file:
if 'photometry' in hdf_file and self.library in hdf_file['photometry']:
self.lib_type = 'phot_lib'
elif 'spectra' in hdf_file and self.library in hdf_file['spectra']:
self.lib_type = 'spec_lib'
else:
raise ValueError(f'The \'{self.library}\' library is not present in the database.')
@typechecked
def get_color_magnitude(self,
object_type: Optional[str] = None) -> box.ColorMagBox:
"""
Function for extracting color-magnitude data from the selected library.
Parameters
----------
object_type : str, None
Object type for which the colors and magnitudes are extracted. Either field dwarfs
('field') or young/low-gravity objects ('young'). All objects are selected if set
to ``None``.
Returns
-------
species.core.box.ColorMagBox
Box with the colors and magnitudes.
"""
if self.lib_type == 'phot_lib':
with h5py.File(self.database, 'r') as h5_file:
sptype = np.asarray(h5_file[f'photometry/{self.library}/sptype'])
dist = np.asarray(h5_file[f'photometry/{self.library}/distance'])
dist_error = np.asarray(h5_file[f'photometry/{self.library}/distance_error'])
flag = np.asarray(h5_file[f'photometry/{self.library}/flag'])
obj_names = np.asarray(h5_file[f'photometry/{self.library}/name'])
if object_type is None:
indices = np.arange(0, np.size(sptype), 1)
elif object_type == 'field':
indices = np.where(flag == 'null')[0]
elif object_type == 'young':
indices = []
for j, object_flag in enumerate(flag):
if 'young' in object_flag:
indices.append(j)
elif 'lowg' in object_flag:
indices.append(j)
indices = np.array(indices)
if indices.size > 0:
with h5py.File(self.database, 'r') as h5_file:
mag1 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_color[0]}'])
mag2 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_color[1]}'])
else:
raise ValueError(f'There is not data available from \'{self.library}\' for '
f'\'{object_type}\' type objects with the chosen filters.')
color = mag1 - mag2
if self.filter_mag == self.filters_color[0]:
mag, _ = phot_util.apparent_to_absolute((mag1, None), (dist, dist_error))
elif self.filter_mag == self.filters_color[1]:
mag, _ = phot_util.apparent_to_absolute((mag2, None), (dist, dist_error))
color = color[indices]
mag = mag[indices]
sptype = sptype[indices]
obj_names = obj_names[indices]
indices = []
for i in range(color.size):
if not np.isnan(color[i]) and not np.isnan(mag[i]):
indices.append(i)
colormag_box = box.create_box(boxtype='colormag',
library=self.library,
object_type=object_type,
filters_color=self.filters_color,
filter_mag=self.filter_mag,
color=color[indices],
magnitude=mag[indices],
sptype=sptype[indices],
names=obj_names[indices])
elif self.lib_type == 'spec_lib':
read_spec_0 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_color[0])
read_spec_1 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_color[1])
read_spec_2 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filter_mag)
phot_box_0 = read_spec_0.get_magnitude(sptypes=None)
phot_box_1 = read_spec_1.get_magnitude(sptypes=None)
phot_box_2 = read_spec_2.get_magnitude(sptypes=None)
colormag_box = box.create_box(boxtype='colormag',
library=self.library,
object_type=object_type,
filters_color=self.filters_color,
filter_mag=self.filter_mag,
color=phot_box_0.app_mag[:, 0]-phot_box_1.app_mag[:, 0],
magnitude=phot_box_2.abs_mag[:, 0],
sptype=phot_box_0.sptype,
names=None)
return colormag_box
class ReadColorColor:
"""
Class for reading color-color data from the database.
"""
@typechecked
def __init__(self,
library: str,
filters_colors: Tuple[Tuple[str, str], Tuple[str, str]]) -> None:
"""
Parameters
----------
library : str
Photometric ('vlm-plx' or 'leggett') or spectral ('irtf' or 'spex') library.
filters_colors : tuple(tuple(str, str), tuple(str, str))
Filter names for the colors. For a photometric library, these have to be present in
the database (typically in the MKO, 2MASS, or WISE system). For a spectral library,
any filter names can be provided as long as they overlap with the wavelength range
of the spectra.
Returns
-------
NoneType
None
"""
self.library = library
self.filters_colors = filters_colors
config_file = os.path.join(os.getcwd(), 'species_config.ini')
config = configparser.ConfigParser()
config.read_file(open(config_file))
self.database = config['species']['database']
with h5py.File(self.database, 'r') as hdf_file:
if 'photometry' in hdf_file and self.library in hdf_file['photometry']:
self.lib_type = 'phot_lib'
elif 'spectra' in hdf_file and self.library in hdf_file['spectra']:
self.lib_type = 'spec_lib'
else:
raise ValueError(f'The \'{self.library}\' library is not present in the database.')
@typechecked
def get_color_color(self,
object_type: Optional[str] = None) -> box.ColorColorBox:
"""
Function for extracting color-color data from the selected library.
Parameters
----------
object_type : str, None
Object type for which the colors and magnitudes are extracted. Either field dwarfs
('field') or young/low-gravity objects ('young'). All objects are selected if set
to ``None``.
Returns
-------
species.core.box.ColorColorBox
Box with the colors.
"""
if self.lib_type == 'phot_lib':
h5_file = h5py.File(self.database, 'r')
sptype = np.asarray(h5_file[f'photometry/{self.library}/sptype'])
flag = np.asarray(h5_file[f'photometry/{self.library}/flag'])
obj_names = np.asarray(h5_file[f'photometry/{self.library}/name'])
if object_type is None:
indices = np.arange(0, np.size(sptype), 1)
elif object_type == 'field':
indices = np.where(flag == 'null')[0]
elif object_type == 'young':
indices = []
for j, object_flag in enumerate(flag):
if 'young' in object_flag:
indices.append(j)
elif 'lowg' in object_flag:
indices.append(j)
indices = np.array(indices)
mag1 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[0][0]}'])
mag2 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[0][1]}'])
mag3 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[1][0]}'])
mag4 = np.asarray(h5_file[f'photometry/{self.library}/{self.filters_colors[1][1]}'])
color1 = mag1 - mag2
color2 = mag3 - mag4
color1 = color1[indices]
color2 = color2[indices]
sptype = sptype[indices]
obj_names = obj_names[indices]
indices = []
for i in range(color1.size):
if not np.isnan(color1[i]) and not np.isnan(color2[i]):
indices.append(i)
colorbox = box.create_box(boxtype='colorcolor',
library=self.library,
object_type=object_type,
filters=self.filters_colors,
color1=color1[indices],
color2=color2[indices],
sptype=sptype[indices],
names=obj_names[indices])
h5_file.close()
elif self.lib_type == 'spec_lib':
read_spec_0 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[0][0])
read_spec_1 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[0][1])
read_spec_2 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[1][0])
read_spec_3 = read_spectrum.ReadSpectrum(spec_library=self.library,
filter_name=self.filters_colors[1][1])
phot_box_0 = read_spec_0.get_magnitude(sptypes=None)
phot_box_1 = read_spec_1.get_magnitude(sptypes=None)
phot_box_2 = read_spec_2.get_magnitude(sptypes=None)
phot_box_3 = read_spec_3.get_magnitude(sptypes=None)
colorbox = box.create_box(boxtype='colorcolor',
library=self.library,
object_type=object_type,
filters=self.filters_colors,
color1=phot_box_0.app_mag[:, 0]-phot_box_1.app_mag[:, 0],
color2=phot_box_2.app_mag[:, 0]-phot_box_3.app_mag[:, 0],
sptype=phot_box_0.sptype,
names=None)
return colorbox
|
[
"configparser.ConfigParser",
"species.util.phot_util.apparent_to_absolute",
"numpy.where",
"numpy.size",
"numpy.asarray",
"species.core.box.create_box",
"h5py.File",
"os.getcwd",
"numpy.array",
"numpy.isnan",
"species.read.read_spectrum.ReadSpectrum"
] |
[((1538, 1565), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (1563, 1565), False, 'import configparser\n'), ((8001, 8028), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (8026, 8028), False, 'import configparser\n'), ((1485, 1496), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1494, 1496), False, 'import os\n'), ((1679, 1708), 'h5py.File', 'h5py.File', (['self.database', '"""r"""'], {}), "(self.database, 'r')\n", (1688, 1708), False, 'import h5py\n'), ((4979, 5233), 'species.core.box.create_box', 'box.create_box', ([], {'boxtype': '"""colormag"""', 'library': 'self.library', 'object_type': 'object_type', 'filters_color': 'self.filters_color', 'filter_mag': 'self.filter_mag', 'color': 'color[indices]', 'magnitude': 'mag[indices]', 'sptype': 'sptype[indices]', 'names': 'obj_names[indices]'}), "(boxtype='colormag', library=self.library, object_type=\n object_type, filters_color=self.filters_color, filter_mag=self.\n filter_mag, color=color[indices], magnitude=mag[indices], sptype=sptype\n [indices], names=obj_names[indices])\n", (4993, 5233), False, 'from species.core import box\n'), ((7948, 7959), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7957, 7959), False, 'import os\n'), ((8142, 8171), 'h5py.File', 'h5py.File', (['self.database', '"""r"""'], {}), "(self.database, 'r')\n", (8151, 8171), False, 'import h5py\n'), ((9237, 9266), 'h5py.File', 'h5py.File', (['self.database', '"""r"""'], {}), "(self.database, 'r')\n", (9246, 9266), False, 'import h5py\n'), ((9289, 9345), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/sptype']"], {}), "(h5_file[f'photometry/{self.library}/sptype'])\n", (9299, 9345), True, 'import numpy as np\n'), ((9365, 9419), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/flag']"], {}), "(h5_file[f'photometry/{self.library}/flag'])\n", (9375, 9419), True, 'import numpy as np\n'), ((9444, 9498), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/name']"], {}), "(h5_file[f'photometry/{self.library}/name'])\n", (9454, 9498), True, 'import numpy as np\n'), ((10063, 10140), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/{self.filters_colors[0][0]}']"], {}), "(h5_file[f'photometry/{self.library}/{self.filters_colors[0][0]}'])\n", (10073, 10140), True, 'import numpy as np\n'), ((10160, 10237), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/{self.filters_colors[0][1]}']"], {}), "(h5_file[f'photometry/{self.library}/{self.filters_colors[0][1]}'])\n", (10170, 10237), True, 'import numpy as np\n'), ((10257, 10334), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/{self.filters_colors[1][0]}']"], {}), "(h5_file[f'photometry/{self.library}/{self.filters_colors[1][0]}'])\n", (10267, 10334), True, 'import numpy as np\n'), ((10354, 10431), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/{self.filters_colors[1][1]}']"], {}), "(h5_file[f'photometry/{self.library}/{self.filters_colors[1][1]}'])\n", (10364, 10431), True, 'import numpy as np\n'), ((10855, 11074), 'species.core.box.create_box', 'box.create_box', ([], {'boxtype': '"""colorcolor"""', 'library': 'self.library', 'object_type': 'object_type', 'filters': 'self.filters_colors', 'color1': 'color1[indices]', 'color2': 'color2[indices]', 'sptype': 'sptype[indices]', 'names': 'obj_names[indices]'}), "(boxtype='colorcolor', library=self.library, object_type=\n object_type, filters=self.filters_colors, color1=color1[indices],\n color2=color2[indices], sptype=sptype[indices], names=obj_names[indices])\n", (10869, 11074), False, 'from species.core import box\n'), ((2792, 2821), 'h5py.File', 'h5py.File', (['self.database', '"""r"""'], {}), "(self.database, 'r')\n", (2801, 2821), False, 'import h5py\n'), ((2859, 2915), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/sptype']"], {}), "(h5_file[f'photometry/{self.library}/sptype'])\n", (2869, 2915), True, 'import numpy as np\n'), ((2939, 2997), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/distance']"], {}), "(h5_file[f'photometry/{self.library}/distance'])\n", (2949, 2997), True, 'import numpy as np\n'), ((3027, 3091), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/distance_error']"], {}), "(h5_file[f'photometry/{self.library}/distance_error'])\n", (3037, 3091), True, 'import numpy as np\n'), ((3115, 3169), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/flag']"], {}), "(h5_file[f'photometry/{self.library}/flag'])\n", (3125, 3169), True, 'import numpy as np\n'), ((3198, 3252), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/name']"], {}), "(h5_file[f'photometry/{self.library}/name'])\n", (3208, 3252), True, 'import numpy as np\n'), ((4417, 4481), 'species.util.phot_util.apparent_to_absolute', 'phot_util.apparent_to_absolute', (['(mag1, None)', '(dist, dist_error)'], {}), '((mag1, None), (dist, dist_error))\n', (4447, 4481), False, 'from species.util import phot_util\n'), ((5624, 5717), 'species.read.read_spectrum.ReadSpectrum', 'read_spectrum.ReadSpectrum', ([], {'spec_library': 'self.library', 'filter_name': 'self.filters_color[0]'}), '(spec_library=self.library, filter_name=self.\n filters_color[0])\n', (5650, 5717), False, 'from species.read import read_spectrum\n'), ((5793, 5886), 'species.read.read_spectrum.ReadSpectrum', 'read_spectrum.ReadSpectrum', ([], {'spec_library': 'self.library', 'filter_name': 'self.filters_color[1]'}), '(spec_library=self.library, filter_name=self.\n filters_color[1])\n', (5819, 5886), False, 'from species.read import read_spectrum\n'), ((5962, 6049), 'species.read.read_spectrum.ReadSpectrum', 'read_spectrum.ReadSpectrum', ([], {'spec_library': 'self.library', 'filter_name': 'self.filter_mag'}), '(spec_library=self.library, filter_name=self.\n filter_mag)\n', (5988, 6049), False, 'from species.read import read_spectrum\n'), ((6322, 6612), 'species.core.box.create_box', 'box.create_box', ([], {'boxtype': '"""colormag"""', 'library': 'self.library', 'object_type': 'object_type', 'filters_color': 'self.filters_color', 'filter_mag': 'self.filter_mag', 'color': '(phot_box_0.app_mag[:, 0] - phot_box_1.app_mag[:, 0])', 'magnitude': 'phot_box_2.abs_mag[:, 0]', 'sptype': 'phot_box_0.sptype', 'names': 'None'}), "(boxtype='colormag', library=self.library, object_type=\n object_type, filters_color=self.filters_color, filter_mag=self.\n filter_mag, color=phot_box_0.app_mag[:, 0] - phot_box_1.app_mag[:, 0],\n magnitude=phot_box_2.abs_mag[:, 0], sptype=phot_box_0.sptype, names=None)\n", (6336, 6612), False, 'from species.core import box\n'), ((11430, 11527), 'species.read.read_spectrum.ReadSpectrum', 'read_spectrum.ReadSpectrum', ([], {'spec_library': 'self.library', 'filter_name': 'self.filters_colors[0][0]'}), '(spec_library=self.library, filter_name=self.\n filters_colors[0][0])\n', (11456, 11527), False, 'from species.read import read_spectrum\n'), ((11603, 11700), 'species.read.read_spectrum.ReadSpectrum', 'read_spectrum.ReadSpectrum', ([], {'spec_library': 'self.library', 'filter_name': 'self.filters_colors[0][1]'}), '(spec_library=self.library, filter_name=self.\n filters_colors[0][1])\n', (11629, 11700), False, 'from species.read import read_spectrum\n'), ((11776, 11873), 'species.read.read_spectrum.ReadSpectrum', 'read_spectrum.ReadSpectrum', ([], {'spec_library': 'self.library', 'filter_name': 'self.filters_colors[1][0]'}), '(spec_library=self.library, filter_name=self.\n filters_colors[1][0])\n', (11802, 11873), False, 'from species.read import read_spectrum\n'), ((11949, 12046), 'species.read.read_spectrum.ReadSpectrum', 'read_spectrum.ReadSpectrum', ([], {'spec_library': 'self.library', 'filter_name': 'self.filters_colors[1][1]'}), '(spec_library=self.library, filter_name=self.\n filters_colors[1][1])\n', (11975, 12046), False, 'from species.read import read_spectrum\n'), ((12380, 12664), 'species.core.box.create_box', 'box.create_box', ([], {'boxtype': '"""colorcolor"""', 'library': 'self.library', 'object_type': 'object_type', 'filters': 'self.filters_colors', 'color1': '(phot_box_0.app_mag[:, 0] - phot_box_1.app_mag[:, 0])', 'color2': '(phot_box_2.app_mag[:, 0] - phot_box_3.app_mag[:, 0])', 'sptype': 'phot_box_0.sptype', 'names': 'None'}), "(boxtype='colorcolor', library=self.library, object_type=\n object_type, filters=self.filters_colors, color1=phot_box_0.app_mag[:, \n 0] - phot_box_1.app_mag[:, 0], color2=phot_box_2.app_mag[:, 0] -\n phot_box_3.app_mag[:, 0], sptype=phot_box_0.sptype, names=None)\n", (12394, 12664), False, 'from species.core import box\n'), ((3329, 3344), 'numpy.size', 'np.size', (['sptype'], {}), '(sptype)\n', (3336, 3344), True, 'import numpy as np\n'), ((3852, 3881), 'h5py.File', 'h5py.File', (['self.database', '"""r"""'], {}), "(self.database, 'r')\n", (3861, 3881), False, 'import h5py\n'), ((3921, 3994), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/{self.filters_color[0]}']"], {}), "(h5_file[f'photometry/{self.library}/{self.filters_color[0]}'])\n", (3931, 3994), True, 'import numpy as np\n'), ((4022, 4095), 'numpy.asarray', 'np.asarray', (["h5_file[f'photometry/{self.library}/{self.filters_color[1]}']"], {}), "(h5_file[f'photometry/{self.library}/{self.filters_color[1]}'])\n", (4032, 4095), True, 'import numpy as np\n'), ((4567, 4631), 'species.util.phot_util.apparent_to_absolute', 'phot_util.apparent_to_absolute', (['(mag2, None)', '(dist, dist_error)'], {}), '((mag2, None), (dist, dist_error))\n', (4597, 4631), False, 'from species.util import phot_util\n'), ((9575, 9590), 'numpy.size', 'np.size', (['sptype'], {}), '(sptype)\n', (9582, 9590), True, 'import numpy as np\n'), ((3417, 3441), 'numpy.where', 'np.where', (["(flag == 'null')"], {}), "(flag == 'null')\n", (3425, 3441), True, 'import numpy as np\n'), ((3779, 3796), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (3787, 3796), True, 'import numpy as np\n'), ((4868, 4886), 'numpy.isnan', 'np.isnan', (['color[i]'], {}), '(color[i])\n', (4876, 4886), True, 'import numpy as np\n'), ((4895, 4911), 'numpy.isnan', 'np.isnan', (['mag[i]'], {}), '(mag[i])\n', (4903, 4911), True, 'import numpy as np\n'), ((9663, 9687), 'numpy.where', 'np.where', (["(flag == 'null')"], {}), "(flag == 'null')\n", (9671, 9687), True, 'import numpy as np\n'), ((10025, 10042), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (10033, 10042), True, 'import numpy as np\n'), ((10744, 10763), 'numpy.isnan', 'np.isnan', (['color1[i]'], {}), '(color1[i])\n', (10752, 10763), True, 'import numpy as np\n'), ((10772, 10791), 'numpy.isnan', 'np.isnan', (['color2[i]'], {}), '(color2[i])\n', (10780, 10791), True, 'import numpy as np\n')]
|
# ===========================================================================
# imgcv.py ----------------------------------------------------------------
# ===========================================================================
# import ------------------------------------------------------------------
# ---------------------------------------------------------------------------
import rsvis.utils.imgtools as imgtools
import rsvis.utils.logger
import logging
import numpy as np
from PIL import Image, ImageTk
from tkinter import Canvas, NW
# class -------------------------------------------------------------------
# ---------------------------------------------------------------------------
class ImgCanvas(Canvas):
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def __init__(
self,
parent,
shift=[4,4],
sensitivity = 4,
logger=None,
**kwargs
):
super(ImgCanvas, self).__init__(parent)
self.bind("<Configure>", self.resize_image)
self._mask = [None]
self._mask_alpha = [150]
self._mask_color = [[0,0,0]]
self._mask_invert = [False]
self._shift = shift
self._scale = [1.0, 1.0]
self.set_size([self.winfo_reqwidth(), self.winfo_reqheight()])
self._parent = parent
self._logger = rsvis.utils.logger.Logger(logger=logger)
# key bindings ----------------------------------------------------
self._mouse_sensitivity = 4
self._mouse_box = [0, 0, 0, 0]
self._mouse_point = [0, 0]
self._mouse_event = [0, 0]
self._mouse_img = [0, 0]
self._keys = dict()
self.bind("<Button-1>", self.mouse_button_1_pressed)
self.bind("<ButtonRelease-1>", self.mouse_button_1_released)
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def clear(self, **kwargs):
pass
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_keys(self, **kwargs):
return self._keys
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_logger(self):
return self._logger
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def resize_image(self, event):
# determine the ratio of old width/height to new width/height
event_size = [event.width, event.height] #####################
self._scale = [float(e)/s for e, s in zip(event_size, self._size)]
self.set_size(event_size)
# resize the canvas
self.config(width=self._size[0], height=self._size[1]) #################
# rescale all the objects tagged with the "all" tag
self.scale("all", 0, 0, self._scale[0], self._scale[1]) ################
self.create_image()
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def resize_boxes(self, boxes, inversion=False):
scale = [float(s)/i for s, i in zip(self.get_size(), self._img_size)]
if inversion:
scale = [1/s for s in scale]
boxes = boxes if isinstance(boxes[0], list) and len(boxes[0]) !=2 else [boxes]
return [self.resize_bbox(box, scale) for box in boxes]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def resize_bbox(self, box, scale):
if len(box)==4:
return [
int(box[0]*scale[1]), int(box[1]*scale[1]),
int(box[2]*scale[0]), int(box[3]*scale[0])
]
else:
return [[int(n[0] *scale[0]), int(n[1]*scale[1])] for n in box ]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def resize_points(self, points, inversion=False):
scale = [float(s)/i for s, i in zip(self.get_size(), self._img_size)]
if inversion:
scale = [1/s for s in scale]
points = points if isinstance(points[0], list) else [points]
return [self.resize_point(point, scale) for point in points]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def resize_point(self, point, scale):
return [int(point[0]*scale[1]), int(point[1]*scale[0])]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def resize_event(self, event):
ev = [event.y, event.x]
ev[0] = ev[0] if ev[0] >= 0 else 0
ev[0] = ev[0] if ev[0] < self._img_draw.size[1] else self._img_draw.size[1]-1
ev[1] = ev[1] if ev[1] >= 0 else 0
ev[1] = ev[1] if ev[1] < self._img_draw.size[0] else self._img_draw.size[0]-1
return ev
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_event_box(self, event):
return [
min([self._mouse_point[0], self._mouse_event[0]]),
max([self._mouse_point[0], self._mouse_event[0]]),
min([self._mouse_point[1], self._mouse_event[1]]),
max([self._mouse_point[1], self._mouse_event[1]])
]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def set_img(self, img, clear_mask=True):
if not isinstance(img, np.ndarray):
return
self._img_size = [img.shape[1], img.shape[0]]
self._data_img = imgtools.expand_image_dim(img)
if not isinstance(img.dtype, np.uint8):
img = imgtools.project_and_stack(img, dtype=np.uint8, factor=255)
self._img = Image.fromarray(img)
if clear_mask:
self.set_mask(show=False)
self.create_image()
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def set_mask(self, mask=None, show=True, alpha=150, color=[0,0,0], invert=False):
self._mask = mask if isinstance(mask, list) else [mask]
self._mask_alpha = alpha if isinstance(alpha, list) else [alpha]
self._mask_color = color if isinstance(color[0], list) else [color]
self._mask_invert= invert if isinstance(invert, list) else [invert]
if show:
self.create_image()
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_mask(self, index=None, resize=False):
if index is None:
mask = self._mask[0]
for idx in range(1, len(self._mask)):
if isinstance(self._mask[idx], np.ndarray):
mask = np.where(np.logical_and(mask, self._mask[idx]), 1, 0).astype(np.uint8)
return mask
else:
if isinstance(self._mask[index], np.ndarray):
return np.asarray(Image.fromarray(self._mask[index]).resize(self.get_size())) if resize else self._mask[index]
else:
return self._mask[index]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def is_mouse_event(self, bbox):
if not (bbox[1]-bbox[0] > self._mouse_sensitivity and bbox[3]-bbox[2] > self._mouse_sensitivity):
return False
return True
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_img(self, show=False):
if show:
return np.asarray(self._img).copy()
return self._data_img.copy()
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def set_size(self, size):
self._size = [s - sh for s, sh in zip(size, self._shift)]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_size(self):
return [s + sh for s, sh in zip(self._size, self._shift)]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_shape(self):
size = self.get_size()
return (size[1], size[0], 3)
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_intial_draw_image(self):
return np.zeros(self.get_shape(), dtype=np.int16) - 1
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def create_image(self, **kwargs):
self._img_draw = self._img.resize(self.get_size())
if isinstance(self._mask[0], np.ndarray):
for idx, (mask, color, alpha, invert) in enumerate(zip(self._mask, self._mask_color, self._mask_alpha, self._mask_invert)):
mask = self.get_mask(index=idx, resize=True)
mask = mask if not invert else imgtools.invert_bool_img(mask)
mask = Image.fromarray(
imgtools.get_transparent_image(
imgtools.bool_to_img(mask, value=-1, dtype=np.int16, color=color, factor=255),
value=alpha
)
)
self._img_draw.paste(mask, (0, 0), mask)
image = Image.fromarray(
imgtools.get_transparent_image(self.draw_image(), value=200))
self._img_draw.paste(image, (0, 0), image)
self._img_canvas = ImageTk.PhotoImage(image=self._img_draw)
self._img_on_canvas = super(ImgCanvas, self).create_image(0, 0, image=self._img_canvas, anchor=NW)
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def draw_image(self, **kwargs):
img_assembly = self.get_intial_draw_image()
return img_assembly
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def mouse_button_1_pressed(self, event):
self.focus_set()
self._mouse_event = self.resize_event(event)
self._mouse_point = [self._mouse_event[0], self._mouse_event[1]]
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def get_roi(self):
if sum(self._mouse_box):
roi_xy = self.resize_boxes(self._mouse_box, inversion=True)[0]
roi = [roi_xy[2], roi_xy[0], roi_xy[3]-roi_xy[2], roi_xy[1]-roi_xy[0]]
else:
roi = [0, 0, self._data_img.shape[1]-1, self._data_img.shape[0]-1]
return roi
# method --------------------------------------------------------------
# -----------------------------------------------------------------------
def mouse_button_1_released(self, event):
self.focus_set()
self._mouse_event = self.resize_event(event)
self._mouse_box = self.get_event_box(event)
self._mouse_img = self.resize_points(self._mouse_event, inversion=True)[0]
self._logger("[MOUSE] Pixel: {}, Value: {}".format(self._mouse_img,
self._data_img[self._mouse_img[0], self._mouse_img[1], :]
)
)
|
[
"rsvis.utils.imgtools.expand_image_dim",
"PIL.Image.fromarray",
"numpy.logical_and",
"rsvis.utils.imgtools.project_and_stack",
"numpy.asarray",
"rsvis.utils.imgtools.invert_bool_img",
"rsvis.utils.imgtools.bool_to_img",
"PIL.ImageTk.PhotoImage"
] |
[((6408, 6438), 'rsvis.utils.imgtools.expand_image_dim', 'imgtools.expand_image_dim', (['img'], {}), '(img)\n', (6433, 6438), True, 'import rsvis.utils.imgtools as imgtools\n'), ((6594, 6614), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (6609, 6614), False, 'from PIL import Image, ImageTk\n'), ((10803, 10843), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'self._img_draw'}), '(image=self._img_draw)\n', (10821, 10843), False, 'from PIL import Image, ImageTk\n'), ((6505, 6564), 'rsvis.utils.imgtools.project_and_stack', 'imgtools.project_and_stack', (['img'], {'dtype': 'np.uint8', 'factor': '(255)'}), '(img, dtype=np.uint8, factor=255)\n', (6531, 6564), True, 'import rsvis.utils.imgtools as imgtools\n'), ((8628, 8649), 'numpy.asarray', 'np.asarray', (['self._img'], {}), '(self._img)\n', (8638, 8649), True, 'import numpy as np\n'), ((10249, 10279), 'rsvis.utils.imgtools.invert_bool_img', 'imgtools.invert_bool_img', (['mask'], {}), '(mask)\n', (10273, 10279), True, 'import rsvis.utils.imgtools as imgtools\n'), ((10396, 10473), 'rsvis.utils.imgtools.bool_to_img', 'imgtools.bool_to_img', (['mask'], {'value': '(-1)', 'dtype': 'np.int16', 'color': 'color', 'factor': '(255)'}), '(mask, value=-1, dtype=np.int16, color=color, factor=255)\n', (10416, 10473), True, 'import rsvis.utils.imgtools as imgtools\n'), ((7712, 7749), 'numpy.logical_and', 'np.logical_and', (['mask', 'self._mask[idx]'], {}), '(mask, self._mask[idx])\n', (7726, 7749), True, 'import numpy as np\n'), ((7904, 7938), 'PIL.Image.fromarray', 'Image.fromarray', (['self._mask[index]'], {}), '(self._mask[index])\n', (7919, 7938), False, 'from PIL import Image, ImageTk\n')]
|
#! /usr/bin/env python3
#
"""
Plot time series data in an interactive plot viewer.
Usage
=====
$ python3 plot_time_series.py --loglevel=20 --stderr
Plots add new data every second, and update on screen every 5 sec.
24 hours of data is kept. Each plot starts out in "autoaxis X PAN"
and "autoaxis Y VIS".
Things you can do in a plot:
1) Scroll the mouse wheel to zoom the X axis
2) Hold CTRL and scroll the mouse wheel to zoom the Y axis.
3) Press 'y' to autozoom the Y axis to the full range of the Y data.
(The message "Autoaxis Y ON" should appear briefly).
Press 'y' again to toggle off this behavior.
4) Press 'v' to autozoom the Y axis to the range of the *visible* data shown
(The message "Autoaxis Y VIS" should appear briefly).
Press 'v' again to toggle off this behavior.
5) Press 'x' to autozoom the X axis to the full range of the X data
(The message "Autoaxis X ON" should appear briefly).
Press 'x' again to toggle off this behavior.
6) Press 'p' to autopan the X axis to always show the latest data on
the right (the message "Autoaxis X PAN" should appear briefly).
Press 'p' again to toggle off this behavior.
.
"""
import sys
import time
import threading
import numpy as np
import ginga.toolkit as ginga_toolkit
from ginga.misc import log, Bunch
from ginga.plot.plotaide import PlotAide
import ginga.plot.data_source as dsp
win_wd, win_ht = 800, 280
class FakeData:
"""Generate fake time-series data."""
def __init__(self, name, t_start, y_range, num_pts):
self.name = name
self.t_start = t_start
self.y_range = y_range
self.num_pts = num_pts
self.data_src = None
self.tv = 0.0
self.tv_dir = 'up'
self.tv_dct = {0: 'up', 1: 'down'}
self.tv_delta = 0.1
self.tv_dmin = 1.0
self.tv_dmax = 30.0
self.tv_deadline = time.time()
def rand_rng(self, a, b):
return (b - a) * np.random.random_sample() + a
def generate_point(self, t):
x, y = t, self.tv
if self.tv_dir == 'up':
y = self.rand_rng(y, min(y + self.tv_delta, self.y_range[1]))
else:
y = self.rand_rng(max(y - self.tv_delta, self.y_range[0]), y)
self.tv = y
if t >= self.tv_deadline:
v = np.random.randint(0, 2)
self.tv_dir = self.tv_dct[v]
self.tv_deadline = t + self.rand_rng(self.tv_dmin, self.tv_dmax)
## p = np.random.randint(0, 100)
## if p >= 98:
## y = np.nan
return (x, y)
def init_points(self, data_src, start=None):
N, t = self.num_pts, self.t_start
if start is None:
start = (self.y_range[0] + self.y_range[1]) * 0.5
self.tv = start
self.tv_deadline = t - N + self.rand_rng(self.tv_dmin, self.tv_dmax)
points = np.array([self.generate_point(ti)
for ti in np.arange(t - N, t, 1.0)])
data_src.set_points(points)
self.data_src = data_src
def add_point(self, t):
pt = self.generate_point(t)
self.data_src.add(pt)
return pt
def timer1_cb(timer, fdg_l, interval):
t = time.time()
timer.set(interval)
for fdg in fdg_l:
fdg.add_point(t)
dsp.update_plot_from_source(fdg.data_src, fdg.data_src.plot,
update_limits=True)
def timer2_cb(timer, app, aides, fdg_l, interval):
timer.set(interval)
for a in aides:
# keep plots responsive
app.process_events()
a.aide.update_plots()
def make_plot(logger, dims, sources, y_rng, y_acc=np.mean,
title='', warn_y=None, alert_y=None,
show_x_axis=True, show_y_axis=True):
from ginga.gw import Viewers
from ginga.canvas.types import plots as gplots
import ginga.plot.time_series as tsp
win_wd, win_ht = dims[:2]
viewer = Viewers.CanvasView(logger, render='widget')
viewer.set_desired_size(win_wd, win_ht)
viewer.set_zoom_algorithm('rate')
viewer.set_zoomrate(1.41)
viewer.enable_autozoom('off')
viewer.set_background('white')
viewer.set_foreground('black')
viewer.set_enter_focus(True)
# our plot
aide = PlotAide(viewer)
aide.settings.set(autoaxis_x='pan', autoaxis_y='vis')
bg = tsp.TimePlotBG(warn_y=warn_y, alert_y=alert_y, linewidth=2)
aide.add_plot_decor(bg)
title = tsp.TimePlotTitle(title=title)
aide.add_plot_decor(title)
x_axis = tsp.XTimeAxis(num_labels=4)
aide.add_plot_decor(x_axis)
y_axis = gplots.YAxis(num_labels=4)
aide.add_plot_decor(y_axis)
colors = ['purple', 'palegreen4', 'red', 'brown', 'blue']
for i, src in enumerate(sources):
psrc = gplots.XYPlot(name=src.name, color=colors[i % len(colors)],
x_acc=np.mean, y_acc=y_acc,
linewidth=2.0, coord='data')
buf = np.zeros((src.num_pts, 2), dtype=np.float)
dsrc = dsp.XYDataSource(buf, none_for_empty=True, overwrite=True)
dsrc.plot = psrc
src.init_points(dsrc)
aide.add_plot(psrc)
dsp.update_plot_from_source(dsrc, psrc, update_limits=True)
# initially, show last 4 hours worth of data.
t, _ = dsrc.get_latest()
aide.zoom_limit_x(t - 4 * 3600, t)
# add scrollbar interface around this viewer
si = Viewers.GingaScrolledViewerWidget(viewer=viewer, width=win_wd,
height=win_ht)
aide.configure_scrollbars(si)
res = Bunch.Bunch(viewer=viewer, aide=aide, widget=si)
return res
def make_data(t, N, names, y_range):
srcs = []
for i, name in enumerate(names):
fdg = FakeData(name, t, y_range, N)
srcs.append(fdg)
return srcs
def cross_connect_plots(plot_info):
# cross connect the plots so that zooming or panning in X in one
# does the same to all the others
m_settings = plot_info[0].aide.settings
for res_a in plot_info:
for res_b in set(plot_info) - set([res_a]):
res_a.aide.add_callback('plot-zoom-x', res_b.aide.plot_zoom_x_cb)
if res_a.aide.settings is not m_settings:
m_settings.share_settings(res_a.aide.settings, keylist=['autoaxis_x'])
def main(options, args):
logger = log.get_logger("example1", options=options)
if options.toolkit is None:
logger.error("Please choose a GUI toolkit with -t option")
# decide our toolkit, then import
ginga_toolkit.use(options.toolkit)
# now we can import
from ginga.gw import Widgets
def quit(self, *args):
logger.info("Top window closed.")
sys.exit()
ev_quit = threading.Event()
app = Widgets.Application(logger=logger)
app.add_callback('shutdown', quit)
w = app.make_window("EnvMon")
w.add_callback('close', quit)
vbox = Widgets.VBox()
vbox.set_spacing(1)
dims = (win_wd, win_ht)
# default: data every second for 24 hours
N = options.numvalues
M = options.numplots
t = time.time()
plots = []
fdgs = []
# make a plot of outside and dome wind speed
y_rng = (0.0, 50.0)
srcs = make_data(t, N, ["Outside", "Dome"], y_rng)
fdgs.extend(srcs)
res = make_plot(logger, dims, srcs, y_rng,
y_acc=np.mean, title="Wind Speed (m/s)")
vbox.add_widget(res.widget, stretch=1)
plots.append(res)
# make a plot of outside and dome temperature
y_rng = (-30.0, 50.0)
srcs = make_data(t, N, ["Outside", "Dome"], y_rng)
fdgs.extend(srcs)
res = make_plot(logger, dims, srcs, y_rng,
y_acc=np.mean, title="Temperature (C)")
vbox.add_widget(res.widget, stretch=1)
plots.append(res)
# make a plot of outside and dome humidity
y_rng = (0.0, 100.0)
srcs = make_data(t, N, ["Outside", "Dome"], y_rng)
fdgs.extend(srcs)
res = make_plot(logger, dims, srcs, y_rng,
y_acc=np.mean, title="Humidity (%)",
warn_y=70, alert_y=80)
vbox.add_widget(res.widget, stretch=1)
plots.append(res)
# make a plot of outside and dome dew point
y_rng = (-30.0, 50.0)
srcs = make_data(t, N, ["Outside", "Dome"], y_rng)
fdgs.extend(srcs)
res = make_plot(logger, dims, srcs, y_rng,
y_acc=np.mean, title="M1 & Dew (C)")
vbox.add_widget(res.widget, stretch=1)
plots.append(res)
# make a plot of front and rear top-ring wind speed
y_rng = (0.0, 50.0)
srcs = make_data(t, N, ["Front", "Rear"], y_rng)
fdgs.extend(srcs)
res = make_plot(logger, dims, srcs, y_rng,
y_acc=np.mean, title="Top Ring Wind (m/s)")
vbox.add_widget(res.widget, stretch=1)
plots.append(res)
# cross connect plots so zooming/panning in X affects all plots
cross_connect_plots(plots)
hbox = Widgets.HBox()
hbox.set_margins(4, 2, 4, 2)
wquit = Widgets.Button("Quit")
wquit.add_callback('activated', quit)
hbox.add_widget(Widgets.Label(''), stretch=1)
hbox.add_widget(wquit)
vbox.add_widget(hbox, stretch=0)
w.set_widget(vbox)
# timer to add a point every second
t1 = app.make_timer()
t1.add_callback('expired', timer1_cb, fdgs, 1.0)
t1.set(1.0)
# timer to update the plot every interval seconds
t2 = app.make_timer()
t2.add_callback('expired', timer2_cb, app, plots, fdgs,
options.update_interval)
t2.set(options.update_interval)
w.resize(win_wd, win_ht * len(plots) + 50)
w.show()
app.mainloop()
if __name__ == "__main__":
# Parse command line options
from argparse import ArgumentParser
argprs = ArgumentParser("test ginga plot")
argprs.add_argument("--debug", dest="debug", default=False,
action="store_true",
help="Enter the pdb debugger on main()")
argprs.add_argument("-n", "--numvalues", dest="numvalues", default=86400,
type=int,
help="Number of items to show per plot")
argprs.add_argument("-m", "--numplots", dest="numplots", default=2,
type=int,
help="Number of plots to show per graph")
argprs.add_argument("--profile", dest="profile", action="store_true",
default=False,
help="Run the profiler on main()")
argprs.add_argument("-t", "--toolkit", dest="toolkit", metavar="NAME",
default='qt',
help="Choose GUI toolkit (gtk|qt)")
argprs.add_argument("--update", dest="update_interval", default=5.0,
type=float,
help="Number of seconds between plot updates")
log.addlogopts(argprs)
(options, args) = argprs.parse_known_args(sys.argv[1:])
# Are we debugging this?
if options.debug:
import pdb
pdb.run('main(options, args)')
# Are we profiling this?
elif options.profile:
import profile
print(("%s profile:" % sys.argv[0]))
profile.run('main(options, args)')
else:
main(options, args)
|
[
"ginga.plot.data_source.XYDataSource",
"ginga.gw.Widgets.Button",
"ginga.toolkit.use",
"ginga.plot.data_source.update_plot_from_source",
"ginga.plot.time_series.TimePlotTitle",
"ginga.gw.Widgets.HBox",
"ginga.gw.Viewers.CanvasView",
"sys.exit",
"numpy.arange",
"ginga.plot.time_series.TimePlotBG",
"ginga.plot.plotaide.PlotAide",
"ginga.misc.log.addlogopts",
"argparse.ArgumentParser",
"profile.run",
"ginga.gw.Widgets.Application",
"ginga.misc.Bunch.Bunch",
"ginga.canvas.types.plots.YAxis",
"ginga.gw.Widgets.VBox",
"numpy.random.random_sample",
"ginga.gw.Widgets.Label",
"time.time",
"ginga.plot.time_series.XTimeAxis",
"pdb.run",
"threading.Event",
"numpy.zeros",
"numpy.random.randint",
"ginga.misc.log.get_logger",
"ginga.gw.Viewers.GingaScrolledViewerWidget"
] |
[((3175, 3186), 'time.time', 'time.time', ([], {}), '()\n', (3184, 3186), False, 'import time\n'), ((3906, 3949), 'ginga.gw.Viewers.CanvasView', 'Viewers.CanvasView', (['logger'], {'render': '"""widget"""'}), "(logger, render='widget')\n", (3924, 3949), False, 'from ginga.gw import Viewers\n'), ((4227, 4243), 'ginga.plot.plotaide.PlotAide', 'PlotAide', (['viewer'], {}), '(viewer)\n', (4235, 4243), False, 'from ginga.plot.plotaide import PlotAide\n'), ((4312, 4371), 'ginga.plot.time_series.TimePlotBG', 'tsp.TimePlotBG', ([], {'warn_y': 'warn_y', 'alert_y': 'alert_y', 'linewidth': '(2)'}), '(warn_y=warn_y, alert_y=alert_y, linewidth=2)\n', (4326, 4371), True, 'import ginga.plot.time_series as tsp\n'), ((4413, 4443), 'ginga.plot.time_series.TimePlotTitle', 'tsp.TimePlotTitle', ([], {'title': 'title'}), '(title=title)\n', (4430, 4443), True, 'import ginga.plot.time_series as tsp\n'), ((4489, 4516), 'ginga.plot.time_series.XTimeAxis', 'tsp.XTimeAxis', ([], {'num_labels': '(4)'}), '(num_labels=4)\n', (4502, 4516), True, 'import ginga.plot.time_series as tsp\n'), ((4563, 4589), 'ginga.canvas.types.plots.YAxis', 'gplots.YAxis', ([], {'num_labels': '(4)'}), '(num_labels=4)\n', (4575, 4589), True, 'from ginga.canvas.types import plots as gplots\n'), ((5375, 5452), 'ginga.gw.Viewers.GingaScrolledViewerWidget', 'Viewers.GingaScrolledViewerWidget', ([], {'viewer': 'viewer', 'width': 'win_wd', 'height': 'win_ht'}), '(viewer=viewer, width=win_wd, height=win_ht)\n', (5408, 5452), False, 'from ginga.gw import Viewers\n'), ((5541, 5589), 'ginga.misc.Bunch.Bunch', 'Bunch.Bunch', ([], {'viewer': 'viewer', 'aide': 'aide', 'widget': 'si'}), '(viewer=viewer, aide=aide, widget=si)\n', (5552, 5589), False, 'from ginga.misc import log, Bunch\n'), ((6303, 6346), 'ginga.misc.log.get_logger', 'log.get_logger', (['"""example1"""'], {'options': 'options'}), "('example1', options=options)\n", (6317, 6346), False, 'from ginga.misc import log, Bunch\n'), ((6490, 6524), 'ginga.toolkit.use', 'ginga_toolkit.use', (['options.toolkit'], {}), '(options.toolkit)\n', (6507, 6524), True, 'import ginga.toolkit as ginga_toolkit\n'), ((6687, 6704), 'threading.Event', 'threading.Event', ([], {}), '()\n', (6702, 6704), False, 'import threading\n'), ((6716, 6750), 'ginga.gw.Widgets.Application', 'Widgets.Application', ([], {'logger': 'logger'}), '(logger=logger)\n', (6735, 6750), False, 'from ginga.gw import Widgets\n'), ((6871, 6885), 'ginga.gw.Widgets.VBox', 'Widgets.VBox', ([], {}), '()\n', (6883, 6885), False, 'from ginga.gw import Widgets\n'), ((7045, 7056), 'time.time', 'time.time', ([], {}), '()\n', (7054, 7056), False, 'import time\n'), ((8863, 8877), 'ginga.gw.Widgets.HBox', 'Widgets.HBox', ([], {}), '()\n', (8875, 8877), False, 'from ginga.gw import Widgets\n'), ((8924, 8946), 'ginga.gw.Widgets.Button', 'Widgets.Button', (['"""Quit"""'], {}), "('Quit')\n", (8938, 8946), False, 'from ginga.gw import Widgets\n'), ((9684, 9717), 'argparse.ArgumentParser', 'ArgumentParser', (['"""test ginga plot"""'], {}), "('test ginga plot')\n", (9698, 9717), False, 'from argparse import ArgumentParser\n'), ((10771, 10793), 'ginga.misc.log.addlogopts', 'log.addlogopts', (['argprs'], {}), '(argprs)\n', (10785, 10793), False, 'from ginga.misc import log, Bunch\n'), ((1870, 1881), 'time.time', 'time.time', ([], {}), '()\n', (1879, 1881), False, 'import time\n'), ((3267, 3352), 'ginga.plot.data_source.update_plot_from_source', 'dsp.update_plot_from_source', (['fdg.data_src', 'fdg.data_src.plot'], {'update_limits': '(True)'}), '(fdg.data_src, fdg.data_src.plot, update_limits=True\n )\n', (3294, 3352), True, 'import ginga.plot.data_source as dsp\n'), ((4928, 4970), 'numpy.zeros', 'np.zeros', (['(src.num_pts, 2)'], {'dtype': 'np.float'}), '((src.num_pts, 2), dtype=np.float)\n', (4936, 4970), True, 'import numpy as np\n'), ((4986, 5044), 'ginga.plot.data_source.XYDataSource', 'dsp.XYDataSource', (['buf'], {'none_for_empty': '(True)', 'overwrite': '(True)'}), '(buf, none_for_empty=True, overwrite=True)\n', (5002, 5044), True, 'import ginga.plot.data_source as dsp\n'), ((5137, 5196), 'ginga.plot.data_source.update_plot_from_source', 'dsp.update_plot_from_source', (['dsrc', 'psrc'], {'update_limits': '(True)'}), '(dsrc, psrc, update_limits=True)\n', (5164, 5196), True, 'import ginga.plot.data_source as dsp\n'), ((6661, 6671), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6669, 6671), False, 'import sys\n'), ((9010, 9027), 'ginga.gw.Widgets.Label', 'Widgets.Label', (['""""""'], {}), "('')\n", (9023, 9027), False, 'from ginga.gw import Widgets\n'), ((10935, 10965), 'pdb.run', 'pdb.run', (['"""main(options, args)"""'], {}), "('main(options, args)')\n", (10942, 10965), False, 'import pdb\n'), ((2293, 2316), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (2310, 2316), True, 'import numpy as np\n'), ((11099, 11133), 'profile.run', 'profile.run', (['"""main(options, args)"""'], {}), "('main(options, args)')\n", (11110, 11133), False, 'import profile\n'), ((1938, 1963), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (1961, 1963), True, 'import numpy as np\n'), ((2917, 2941), 'numpy.arange', 'np.arange', (['(t - N)', 't', '(1.0)'], {}), '(t - N, t, 1.0)\n', (2926, 2941), True, 'import numpy as np\n')]
|
#-*- coding:utf-8 -*-
from __future__ import print_function
import os,sys,sip,time
from datetime import datetime,timedelta
from qtpy.QtWidgets import QTreeWidgetItem,QMenu,QApplication,QAction,QMainWindow
from qtpy import QtGui,QtWidgets
from qtpy.QtCore import Qt,QUrl,QDate
from Graph import graphpage
from layout import Ui_MainWindow
from pandas import DataFrame as df
import pandas as pd
import tushare as ts
import pickle
import numpy as np
list1 = []
class MyUi(QMainWindow):
def __init__(self):
super(MyUi, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
cwd = os.getcwd()
cwd = str(cwd)
if os.path.isfile(cwd+"/time"):
with open("time","rb") as outfile:#reads current time
history = pickle.load(outfile)
if (datetime.now()-history).total_seconds()<43200: #measures if time elapse>12 hours
print("Less than 12 hours. Loading previously saved Pickle...")
else:
print("More than 12 hours. Updating Pickle...")
data = ts.get_industry_classified()
with open("class","wb+") as outfile:
pickle.dump(data,outfile)
now = datetime.now()
with open("time", "wb+") as outfile: #update time
pickle.dump(now, outfile)
else:
print("No Pickle found!") #If this is first time using tuchart in this directory
data = df()
data = ts.get_industry_classified()
with open('class', 'wb+') as outfile: #records pickle
pickle.dump(data, outfile)
now = datetime.now()
with open("time", "wb+") as outfile:
pickle.dump(now,outfile)
with open("class", "rb") as infile: # reads current time
series = pickle.load(infile)
#series = pd.read_json(cwd + "\\class.json")
#series = ts.get_industry_classified()
series = pd.DataFrame(series)
curdate = time.strftime("%Y/%m/%d") # gets current time to put into dateedit
curdateQ = QDate.fromString(curdate,"yyyy/MM/dd")
dateobj = datetime.strptime(curdate, "%Y/%m/%d")#converts to datetime object
past = dateobj - timedelta(days = 7) #minus a week to start date
pasttime = datetime.strftime(past, "%Y/%m/%d")
pastQ = QDate.fromString(pasttime,"yyyy/MM/dd") #convert to qtime so that widget accepts the values
pastL = dateobj - timedelta(days=30) # minus a month to start date
pasttimeL = datetime.strftime(pastL, "%Y/%m/%d")
pastQL = QDate.fromString(pasttimeL, "yyyy/MM/dd")
np_indexes = np.array([['sh', '上证指数', '大盘指数'],
['sz', '深证成指', '大盘指数'],
['hs300', '沪深300指数', '大盘指数'],
['sz50', '上证50', '大盘指数'],
['zxb', '中小板', '大盘指数'],
['cyb', '创业板', '大盘指数']])
indexes = df(data=np_indexes,
index=range(5000, 5006),
columns=["code", "name", "c_name"])
series = indexes.append(series)
list1_bfr = series["c_name"].tolist() #Get industry categories. Filters out redundant ones
list1 = list(set(list1_bfr))
list1.sort(key=list1_bfr.index)
#w = database()
#zsparent = QTreeWidgetItem(self.ui.treeWidget)
#zsparent.setText(0,"股票指数")
#zsnames =["上证指数-sh","深圳成指-sz","沪深300指数-hs300","上证50-"]
self.init_treeWidget(list1,series)
self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget.customContextMenuRequested.connect(self.openMenu)
#self.ui.webView.setGeometry(QtCore.QRect(0, 30,1550, 861))
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "render.html")) #path to read html file
local_url = QUrl.fromLocalFile(file_path)
self.ui.webView.load(local_url)
#self.ui.commandLinkButton.setFixedSize(50, 50)
self.ui.search_btn.clicked.connect(lambda: self.search_comp(series))
self.ui.init_code_btn.clicked.connect(lambda: self.code_sort_tree(series))
self.ui.init_category_btn.clicked.connect(lambda: self.init_treeWidget(list1, series))
self.ui.commandLinkButton.clicked.connect(self.classify) #when the arrow button is clicked, trigger events
#self.ui.commandLinkButton.clicked.connect(lambda action: self.classify(action, self.ui.treewidget))
# QSizePolicy
try:
retain_size = self.ui.dateEdit_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.dateEdit_2.setSizePolicy(retain_size)
retain_size = self.ui.comboBox.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.comboBox.setSizePolicy(retain_size)
retain_size = self.ui.label_2.sizePolicy()
retain_size.setRetainSizeWhenHidden(True)
self.ui.label_2.setSizePolicy(retain_size)
except AttributeError:
print("No PYQT5 Binding! Widgets might be deformed")
self.ui.dateEdit.setDate(pastQL)
self.ui.dateEdit_2.setDate(curdateQ)#populate widgets
self.ui.dateEdit.setCalendarPopup(True)
self.ui.dateEdit_2.setCalendarPopup(True)
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])
self.ui.treeWidget_2.setDragDropMode(self.ui.treeWidget_2.InternalMove)
self.ui.treeWidget_2.setContextMenuPolicy(Qt.CustomContextMenu)
self.ui.treeWidget_2.customContextMenuRequested.connect(self.openWidgetMenu)
#self.ui.toolbutton.clicked.connect(lambda action: self.graphmerge(action, CombineKeyword))
self.ui.combobox.currentIndexChanged.connect(lambda: self.modifycombo(pastQL,pastQ))
def init_treeWidget(self, list1, series):
self.ui.treeWidget.clear()
for j in list1:
parent = QTreeWidgetItem(self.ui.treeWidget) #populate treewidget with names
parent.setText(0,j)
var = series.loc[series["c_name"] == j]
list2 = var["code"].tolist()
name = var["name"].tolist()
#var = showcollection(i) #Display database items
for idx,val in enumerate(list2):
child = QTreeWidgetItem(parent)
child.setText(0, name[idx]+"-"+val)
#for i in Drag:
#grandson = QTreeWidgetItem(child) #Commented out because increases program response time
#grandson.setText(0, i)
#self.ui.treeWidget.itemDoubleClicked.connect(self.onClickItem) #Display Collection items
def code_sort_tree(self, companies):
self.ui.treeWidget.clear()
sorted_comps = companies.sort_values(["code"])
code_list = sorted_comps["code"].tolist()
name_list = sorted_comps["name"].tolist()
shares_parent = QTreeWidgetItem(self.ui.treeWidget)
shares_parent.setText(0, "个股行情")
for idx, val in enumerate(code_list):
child = QTreeWidgetItem(shares_parent)
child.setText(0, name_list[idx] + "-" + str(val))
self.ui.treeWidget.expandToDepth(0)
def search_comp(self, companies):
self.ui.treeWidget.clear()
text = self.ui.search_lineEdit.text()
filtered_codes = companies[companies['code'].str.contains(text)]
filtered_names = companies[companies['name'].str.contains(text)]
filtered_comps = filtered_codes.append(filtered_names)
code_list = filtered_comps["code"].tolist()
name_list = filtered_comps["name"].tolist()
parent = QTreeWidgetItem(self.ui.treeWidget)
parent.setText(0, "搜索结果")
for idx, val in enumerate(code_list):
child = QTreeWidgetItem(parent)
child.setText(0, name_list[idx] + "-" + str(val))
self.ui.treeWidget.expandToDepth(0)
def modifycombo(self,pastQL,pastQ):
if self.ui.combobox.currentText()=="复权": #if 复权 is selected, clear all existing queries to avoid value conflict
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.dateEdit.setDate(pastQL)
self.ui.interval_label.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["hfq", "qfq"])
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="K线":
self.ui.label_2.show()
self.ui.dateEdit_2.show()
self.ui.dateEdit.setDate(pastQL)
self.ui.interval_label.show()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["D", "W", "M", "5", "15", "30", "60"])#same as above
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="分笔数据":
self.ui.interval_label.hide()
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.dateEdit.setDate(pastQ)
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()=="历史分钟":
self.ui.interval_label.hide()
self.ui.comboBox.show()
self.ui.comboBox.clear()
self.ui.comboBox.addItems(["1min","5min","15min","30min","60min"])
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.dateEdit.setDate(pastQ)
self.ui.treeWidget_2.clear()
if self.ui.combobox.currentText()==u"十大股东":
self.ui.interval_label.hide()
self.ui.comboBox.hide()
self.ui.label_2.hide()
self.ui.dateEdit_2.hide()
self.ui.treeWidget_2.clear()
def openMenu(self,position):
indexes = self.ui.treeWidget.selectedIndexes()
item = self.ui.treeWidget.itemAt(position)
db_origin = ""
#if item.parent():
# db_origin = item.parent().text(0)
collec = item.text(0)
if len(indexes) > 0:
level = 0
index = indexes[0]
while index.parent().isValid():
index = index.parent()
level = level + 1
menu = QMenu()
#print((collec, db_origin))
if level ==0:
pass
else:
#keyarray = GetKeys(collec, db_origin)
#if "Open" in keyarray:
if self.ui.combobox.currentText()==u"K线":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))#open up different menu with different kind of graphs
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
#menu.addAction(QAction("P_change", menu, checkable=True))
#menu.addAction(QAction("Turnover",menu,checkable=True))
if self.ui.combobox.currentText()==u"复权":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"分笔数据":
menu.addAction(QAction("分笔", menu, checkable=True))
if self.ui.combobox.currentText()==u"历史分钟":
menu.addAction(QAction("Kline", menu, checkable=True))
menu.addAction(QAction("Open", menu, checkable=True))
menu.addAction(QAction("Close", menu, checkable=True))
menu.addAction(QAction("High", menu, checkable=True))
menu.addAction(QAction("Low", menu, checkable=True))
menu.addAction(QAction("Volume", menu, checkable=True))
menu.addAction(QAction("Amount", menu, checkable=True))
if self.ui.combobox.currentText()==u"十大股东":
menu.addAction(QAction("季度饼图", menu, checkable=True))
#menu.addAction(QAction("持股比例", menu, checkable=True))
#for g in keyarray:
#menu.addAction(QAction(g, menu, checkable=True))
menu.triggered.connect(lambda action: self.methodSelected(action, collec))
menu.exec_(self.ui.treeWidget.viewport().mapToGlobal(position))
def methodSelected(self, action, collec):
# print(action.text()) #Choice
# if (self.ui.treewidget.count() == 5):
# self.ui.label.setText("Maximum number of queries")
# return
# self.ui.label.setText("")
Choice = action.text()
Stock = collec
# print(collec) #Stock Name
# print(db_origin) #DataBase name
# list1 = [self.tr(Stock+"-"+Choice+"-"+db_origin)]
# self.ui.treewidget.addItems(list1)
parent = QTreeWidgetItem(self.ui.treeWidget_2)
parent.setText(0, Stock+ "-" + Choice)
def openWidgetMenu(self,position):
indexes = self.ui.treeWidget_2.selectedIndexes()
item = self.ui.treeWidget_2.itemAt(position)
if item == None:
return
#item = self.ui.listWidget.itemAt(position)
if len(indexes) > 0:
menu = QMenu()
menu.addAction(QAction("Delete", menu,checkable = True))#This function is perhaps useless
#menu.triggered.connect(self.eraseItem)
item = self.ui.treeWidget_2.itemAt(position)
#collec = str(item.text())
menu.triggered.connect(lambda action: self.ListMethodSelected(action, item))
menu.exec_(self.ui.treeWidget_2.viewport().mapToGlobal(position))
def ListMethodSelected(self, action, item):
if action.text() == "Delete":
self.eraseItem()
if action.text() == "Combine":
global CombineKeyword
collec = str(item.text())
CombineKeyword.append(collec)#Useless function(maybe?)
list1 = [self.tr(collec)]
self.ui.listwidget.addItems(list1)
self.eraseItem()
def eraseItem(self):
for x in self.ui.treeWidget_2.selectedItems():#delete with write click menu
#item = self.ui.treewidget.takeItem(self.ui.treewidget.currentRow())
sip.delete(x)
#item.delete
def classify(self, folder):
startdate = self.ui.dateEdit.date()
startdate = startdate.toPyDate()
startdate = startdate.strftime("%Y/%m/%d")#converts date from dateedit to tushare readable date
enddate = self.ui.dateEdit_2.date()
enddate = enddate.toPyDate()
enddate = enddate.strftime("%Y/%m/%d")
option = self.ui.comboBox.currentText()
option = str(option)
#if (self.ui.treewidget) == 0:
#self.ui.label.setText("Need to select at least one query")
#return
root = self.ui.treeWidget_2.invisibleRootItem()# This is for iterating child items
child_count = root.childCount()
texts = []
if child_count==0:
return
for i in range(child_count):
item = root.child(i)
text = item.text(0)#with 3 part'stock_name'+'-'+'code'+'-'+action
texts.append(text)
labels = [k for k in texts]
#items = ([x.encode("utf-8") for x in labels])
width = self.ui.webView.width()#give width and height of user's screen so that graphs can be generated with dynamic size
height = self.ui.webView.height()
mode_combo = self.ui.combobox.currentText()
graphpage(labels,mode_combo, startdate,enddate,option,width, height)#labels:复权ork线or分笔 option:hfq, qfq or 15, 30, D, etc
self.ui.webView.reload()#refreshes webengine
self.ui.webView.repaint()
self.ui.webView.update()
def graphmerge(self, combineKeyword):
sth = ""
for i in combineKeyword:
if sth == "":
sth = sth + i
else :
sth = sth + "\n" + "&"+ "-"+i
list1 = sth
return sth
global CombineKeyword
CombineKeyword = []
self.ui.listwidget.clear() #combine stuff so that different graphs can be drawn together
app = QApplication(sys.argv)
w = MyUi()
w.show()
sys.exit(app.exec_())
|
[
"qtpy.QtCore.QUrl.fromLocalFile",
"qtpy.QtCore.QDate.fromString",
"Graph.graphpage",
"numpy.array",
"qtpy.QtWidgets.QAction",
"tushare.get_industry_classified",
"datetime.timedelta",
"qtpy.QtWidgets.QTreeWidgetItem",
"pandas.DataFrame",
"layout.Ui_MainWindow",
"qtpy.QtWidgets.QMenu",
"pickle.load",
"os.path.isfile",
"os.path.dirname",
"qtpy.QtWidgets.QApplication",
"pickle.dump",
"datetime.datetime.strptime",
"time.strftime",
"sip.delete",
"os.getcwd",
"datetime.datetime.now",
"datetime.datetime.strftime"
] |
[((16921, 16943), 'qtpy.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (16933, 16943), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((564, 579), 'layout.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (577, 579), False, 'from layout import Ui_MainWindow\n'), ((624, 635), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (633, 635), False, 'import os, sys, sip, time\n'), ((671, 700), 'os.path.isfile', 'os.path.isfile', (["(cwd + '/time')"], {}), "(cwd + '/time')\n", (685, 700), False, 'import os, sys, sip, time\n'), ((2024, 2044), 'pandas.DataFrame', 'pd.DataFrame', (['series'], {}), '(series)\n', (2036, 2044), True, 'import pandas as pd\n'), ((2064, 2089), 'time.strftime', 'time.strftime', (['"""%Y/%m/%d"""'], {}), "('%Y/%m/%d')\n", (2077, 2089), False, 'import os, sys, sip, time\n'), ((2151, 2190), 'qtpy.QtCore.QDate.fromString', 'QDate.fromString', (['curdate', '"""yyyy/MM/dd"""'], {}), "(curdate, 'yyyy/MM/dd')\n", (2167, 2190), False, 'from qtpy.QtCore import Qt, QUrl, QDate\n'), ((2209, 2247), 'datetime.datetime.strptime', 'datetime.strptime', (['curdate', '"""%Y/%m/%d"""'], {}), "(curdate, '%Y/%m/%d')\n", (2226, 2247), False, 'from datetime import datetime, timedelta\n'), ((2370, 2405), 'datetime.datetime.strftime', 'datetime.strftime', (['past', '"""%Y/%m/%d"""'], {}), "(past, '%Y/%m/%d')\n", (2387, 2405), False, 'from datetime import datetime, timedelta\n'), ((2422, 2462), 'qtpy.QtCore.QDate.fromString', 'QDate.fromString', (['pasttime', '"""yyyy/MM/dd"""'], {}), "(pasttime, 'yyyy/MM/dd')\n", (2438, 2462), False, 'from qtpy.QtCore import Qt, QUrl, QDate\n'), ((2614, 2650), 'datetime.datetime.strftime', 'datetime.strftime', (['pastL', '"""%Y/%m/%d"""'], {}), "(pastL, '%Y/%m/%d')\n", (2631, 2650), False, 'from datetime import datetime, timedelta\n'), ((2668, 2709), 'qtpy.QtCore.QDate.fromString', 'QDate.fromString', (['pasttimeL', '"""yyyy/MM/dd"""'], {}), "(pasttimeL, 'yyyy/MM/dd')\n", (2684, 2709), False, 'from qtpy.QtCore import Qt, QUrl, QDate\n'), ((2733, 2904), 'numpy.array', 'np.array', (["[['sh', '上证指数', '大盘指数'], ['sz', '深证成指', '大盘指数'], ['hs300', '沪深300指数',\n '大盘指数'], ['sz50', '上证50', '大盘指数'], ['zxb', '中小板', '大盘指数'], ['cyb',\n '创业板', '大盘指数']]"], {}), "([['sh', '上证指数', '大盘指数'], ['sz', '深证成指', '大盘指数'], ['hs300',\n '沪深300指数', '大盘指数'], ['sz50', '上证50', '大盘指数'], ['zxb', '中小板', '大盘指数'], [\n 'cyb', '创业板', '大盘指数']])\n", (2741, 2904), True, 'import numpy as np\n'), ((3986, 4015), 'qtpy.QtCore.QUrl.fromLocalFile', 'QUrl.fromLocalFile', (['file_path'], {}), '(file_path)\n', (4004, 4015), False, 'from qtpy.QtCore import Qt, QUrl, QDate\n'), ((7045, 7080), 'qtpy.QtWidgets.QTreeWidgetItem', 'QTreeWidgetItem', (['self.ui.treeWidget'], {}), '(self.ui.treeWidget)\n', (7060, 7080), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((7775, 7810), 'qtpy.QtWidgets.QTreeWidgetItem', 'QTreeWidgetItem', (['self.ui.treeWidget'], {}), '(self.ui.treeWidget)\n', (7790, 7810), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((13557, 13594), 'qtpy.QtWidgets.QTreeWidgetItem', 'QTreeWidgetItem', (['self.ui.treeWidget_2'], {}), '(self.ui.treeWidget_2)\n', (13572, 13594), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((16264, 16336), 'Graph.graphpage', 'graphpage', (['labels', 'mode_combo', 'startdate', 'enddate', 'option', 'width', 'height'], {}), '(labels, mode_combo, startdate, enddate, option, width, height)\n', (16273, 16336), False, 'from Graph import graphpage\n'), ((1500, 1504), 'pandas.DataFrame', 'df', ([], {}), '()\n', (1502, 1504), True, 'from pandas import DataFrame as df\n'), ((1524, 1552), 'tushare.get_industry_classified', 'ts.get_industry_classified', ([], {}), '()\n', (1550, 1552), True, 'import tushare as ts\n'), ((1680, 1694), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1692, 1694), False, 'from datetime import datetime, timedelta\n'), ((1873, 1892), 'pickle.load', 'pickle.load', (['infile'], {}), '(infile)\n', (1884, 1892), False, 'import pickle\n'), ((2302, 2319), 'datetime.timedelta', 'timedelta', ([], {'days': '(7)'}), '(days=7)\n', (2311, 2319), False, 'from datetime import datetime, timedelta\n'), ((2544, 2562), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (2553, 2562), False, 'from datetime import datetime, timedelta\n'), ((6061, 6096), 'qtpy.QtWidgets.QTreeWidgetItem', 'QTreeWidgetItem', (['self.ui.treeWidget'], {}), '(self.ui.treeWidget)\n', (6076, 6096), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((7188, 7218), 'qtpy.QtWidgets.QTreeWidgetItem', 'QTreeWidgetItem', (['shares_parent'], {}), '(shares_parent)\n', (7203, 7218), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((7911, 7934), 'qtpy.QtWidgets.QTreeWidgetItem', 'QTreeWidgetItem', (['parent'], {}), '(parent)\n', (7926, 7934), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((10365, 10372), 'qtpy.QtWidgets.QMenu', 'QMenu', ([], {}), '()\n', (10370, 10372), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((13937, 13944), 'qtpy.QtWidgets.QMenu', 'QMenu', ([], {}), '()\n', (13942, 13944), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((14969, 14982), 'sip.delete', 'sip.delete', (['x'], {}), '(x)\n', (14979, 14982), False, 'import os, sys, sip, time\n'), ((792, 812), 'pickle.load', 'pickle.load', (['outfile'], {}), '(outfile)\n', (803, 812), False, 'import pickle\n'), ((1096, 1124), 'tushare.get_industry_classified', 'ts.get_industry_classified', ([], {}), '()\n', (1122, 1124), True, 'import tushare as ts\n'), ((1246, 1260), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1258, 1260), False, 'from datetime import datetime, timedelta\n'), ((1635, 1661), 'pickle.dump', 'pickle.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (1646, 1661), False, 'import pickle\n'), ((1760, 1785), 'pickle.dump', 'pickle.dump', (['now', 'outfile'], {}), '(now, outfile)\n', (1771, 1785), False, 'import pickle\n'), ((3899, 3924), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3914, 3924), False, 'import os, sys, sip, time\n'), ((6425, 6448), 'qtpy.QtWidgets.QTreeWidgetItem', 'QTreeWidgetItem', (['parent'], {}), '(parent)\n', (6440, 6448), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((13972, 14011), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Delete"""', 'menu'], {'checkable': '(True)'}), "('Delete', menu, checkable=True)\n", (13979, 14011), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((1198, 1224), 'pickle.dump', 'pickle.dump', (['data', 'outfile'], {}), '(data, outfile)\n', (1209, 1224), False, 'import pickle\n'), ((1347, 1372), 'pickle.dump', 'pickle.dump', (['now', 'outfile'], {}), '(now, outfile)\n', (1358, 1372), False, 'import pickle\n'), ((10666, 10704), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Kline"""', 'menu'], {'checkable': '(True)'}), "('Kline', menu, checkable=True)\n", (10673, 10704), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((10741, 10778), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Open"""', 'menu'], {'checkable': '(True)'}), "('Open', menu, checkable=True)\n", (10748, 10778), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((10815, 10853), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Close"""', 'menu'], {'checkable': '(True)'}), "('Close', menu, checkable=True)\n", (10822, 10853), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((10943, 10980), 'qtpy.QtWidgets.QAction', 'QAction', (['"""High"""', 'menu'], {'checkable': '(True)'}), "('High', menu, checkable=True)\n", (10950, 10980), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11017, 11053), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Low"""', 'menu'], {'checkable': '(True)'}), "('Low', menu, checkable=True)\n", (11024, 11053), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11090, 11129), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Volume"""', 'menu'], {'checkable': '(True)'}), "('Volume', menu, checkable=True)\n", (11097, 11129), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11380, 11418), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Kline"""', 'menu'], {'checkable': '(True)'}), "('Kline', menu, checkable=True)\n", (11387, 11418), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11455, 11492), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Open"""', 'menu'], {'checkable': '(True)'}), "('Open', menu, checkable=True)\n", (11462, 11492), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11529, 11567), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Close"""', 'menu'], {'checkable': '(True)'}), "('Close', menu, checkable=True)\n", (11536, 11567), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11604, 11641), 'qtpy.QtWidgets.QAction', 'QAction', (['"""High"""', 'menu'], {'checkable': '(True)'}), "('High', menu, checkable=True)\n", (11611, 11641), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11678, 11714), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Low"""', 'menu'], {'checkable': '(True)'}), "('Low', menu, checkable=True)\n", (11685, 11714), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11751, 11790), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Volume"""', 'menu'], {'checkable': '(True)'}), "('Volume', menu, checkable=True)\n", (11758, 11790), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11827, 11866), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Amount"""', 'menu'], {'checkable': '(True)'}), "('Amount', menu, checkable=True)\n", (11834, 11866), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((11963, 11998), 'qtpy.QtWidgets.QAction', 'QAction', (['"""分笔"""', 'menu'], {'checkable': '(True)'}), "('分笔', menu, checkable=True)\n", (11970, 11998), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((12095, 12133), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Kline"""', 'menu'], {'checkable': '(True)'}), "('Kline', menu, checkable=True)\n", (12102, 12133), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((12170, 12207), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Open"""', 'menu'], {'checkable': '(True)'}), "('Open', menu, checkable=True)\n", (12177, 12207), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((12244, 12282), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Close"""', 'menu'], {'checkable': '(True)'}), "('Close', menu, checkable=True)\n", (12251, 12282), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((12319, 12356), 'qtpy.QtWidgets.QAction', 'QAction', (['"""High"""', 'menu'], {'checkable': '(True)'}), "('High', menu, checkable=True)\n", (12326, 12356), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((12393, 12429), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Low"""', 'menu'], {'checkable': '(True)'}), "('Low', menu, checkable=True)\n", (12400, 12429), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((12466, 12505), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Volume"""', 'menu'], {'checkable': '(True)'}), "('Volume', menu, checkable=True)\n", (12473, 12505), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((12542, 12581), 'qtpy.QtWidgets.QAction', 'QAction', (['"""Amount"""', 'menu'], {'checkable': '(True)'}), "('Amount', menu, checkable=True)\n", (12549, 12581), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((12678, 12715), 'qtpy.QtWidgets.QAction', 'QAction', (['"""季度饼图"""', 'menu'], {'checkable': '(True)'}), "('季度饼图', menu, checkable=True)\n", (12685, 12715), False, 'from qtpy.QtWidgets import QTreeWidgetItem, QMenu, QApplication, QAction, QMainWindow\n'), ((829, 843), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (841, 843), False, 'from datetime import datetime, timedelta\n')]
|
#Función que calcula la matriz resultante "C" después de aplicar la operación convolución de A*B=
# EJERCICIO 28 DE OCTUBRE
# <NAME> A01377098
import numpy as np
def convolucion (A, B):
contaFil = 0
contaCol = 0
limiteFil = len(A)
limiteCol = len(A)
longitudB = len(B)
for x in range (len(C)):
for y in range (len(C)):
for n in range (contaFil, len(B)+contaFil):
if len(B)+contaFil > limiteFil:
break
for o in range (contaCol, len(B)+contaCol):
if len(B)+contaCol> limiteCol:
break
C[x][y] += A[n][o] * B[n-contaFil][o-contaCol]
if contaCol+longitudB<limiteCol:
contaCol += 1
elif contaCol+longitudB== limiteCol:
contaCol = 0
if contaFil+longitudB< limiteFil:
contaFil += 1
elif contaFil+longitudB == limiteFil:
return
Matriz = [[6, 9, 0, 3], [8, 4, 9, 1], [4, 1, 3, 12], [3, 2, 1, 100]]
Filtro = [[1, 0, 2], [5, 0, 9], [6, 2, 1]]
A = np.array(Matriz)
B = np.array(Filtro)
C = np.zeros((2,2))
convolucion(A,B)
print (C)
|
[
"numpy.array",
"numpy.zeros"
] |
[((1153, 1169), 'numpy.array', 'np.array', (['Matriz'], {}), '(Matriz)\n', (1161, 1169), True, 'import numpy as np\n'), ((1174, 1190), 'numpy.array', 'np.array', (['Filtro'], {}), '(Filtro)\n', (1182, 1190), True, 'import numpy as np\n'), ((1196, 1212), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1204, 1212), True, 'import numpy as np\n')]
|
"""
This module is used to generate correlation (R) and regression (b)
coefficients for relationships between the 2015 Census,
2018 Yale Climate Opinion Maps (YCOM) and land area datasets,
as well as p values for these relationships.
"""
import numpy as np
import pandas as pd
from scipy.stats import linregress
def calculate_stats_outputs(n_ycom, n_census, ycom_county, census):
"""
Function to estimate regression coefficients correlation between YCOM data variables and US
Census variables.
Inputs: n_ycom, a full list of names for ycom variables,
n_census, a full list of names for census variables
Outputs: a matrix of correlation values between each variable each dataset
"""
stats_outputs = np.zeros((len(n_ycom), len(n_census), 5))
for yind, yvar in enumerate(n_ycom):
for cind, cvar in enumerate(n_census):
ycom_notnull = ycom_county[yvar][census[cvar].notnull()]
census_notnull = census[cvar][census[cvar].notnull()]
stats_outputs[yind, cind, 0:5] = linregress(ycom_notnull, census_notnull)
return stats_outputs
def calculate_stats_outputs_standard(n_ycom, n_census, ycom_county, census):
"""
Function to estimate regression coefficients between YCOM data variables and US
Census variables on standardized variables
standardized_column = (column - mean(column)) / std(column)
Inputs: n_ycom, a full list of names for ycom variables,
n_census, a full list of names for census variables
Outputs: a matrix of correlation values between each variable each dataset
"""
stats_outputs_standard = np.zeros((len(n_ycom), len(n_census), 5))
for yind, yvar in enumerate(n_ycom):
for cind, cvar in enumerate(n_census):
ycom_notnull = ycom_county[yvar][census[cvar].notnull()]
census_notnull = census[cvar][census[cvar].notnull()]
#also doing calculations on standardized variables
census_standard = (census_notnull - np.mean(census_notnull)) / np.std(census_notnull)
stats_outputs_standard[yind, cind, 0:5] = linregress(ycom_notnull, census_standard)
return stats_outputs_standard
def get_regs_df(stats_outputs_standard, n_census, n_ycom):
"""
making dataframe of regression coefficients
these are kinda standardized -they show what % change in an opinion is given
a 1 standard deviation change in a census variable
"""
regs = pd.DataFrame(stats_outputs_standard[:, :, 0], columns=n_census, index=n_ycom)
return regs
def get_cors_df(stats_outputs, n_census, n_ycom):
"""
making dataframe of correlation coefficients
"""
cors = pd.DataFrame(stats_outputs[:, :, 2], columns=n_census, index=n_ycom)
return cors
def get_pvalues_df(stats_outputs, n_census, n_ycom):
"""
making dataframes of pvalues
"""
pval = pd.DataFrame(stats_outputs[:, :, 3], columns=n_census, index=n_ycom)
return pval
|
[
"pandas.DataFrame",
"scipy.stats.linregress",
"numpy.mean",
"numpy.std"
] |
[((2469, 2546), 'pandas.DataFrame', 'pd.DataFrame', (['stats_outputs_standard[:, :, 0]'], {'columns': 'n_census', 'index': 'n_ycom'}), '(stats_outputs_standard[:, :, 0], columns=n_census, index=n_ycom)\n', (2481, 2546), True, 'import pandas as pd\n'), ((2691, 2759), 'pandas.DataFrame', 'pd.DataFrame', (['stats_outputs[:, :, 2]'], {'columns': 'n_census', 'index': 'n_ycom'}), '(stats_outputs[:, :, 2], columns=n_census, index=n_ycom)\n', (2703, 2759), True, 'import pandas as pd\n'), ((2891, 2959), 'pandas.DataFrame', 'pd.DataFrame', (['stats_outputs[:, :, 3]'], {'columns': 'n_census', 'index': 'n_ycom'}), '(stats_outputs[:, :, 3], columns=n_census, index=n_ycom)\n', (2903, 2959), True, 'import pandas as pd\n'), ((1051, 1091), 'scipy.stats.linregress', 'linregress', (['ycom_notnull', 'census_notnull'], {}), '(ycom_notnull, census_notnull)\n', (1061, 1091), False, 'from scipy.stats import linregress\n'), ((2121, 2162), 'scipy.stats.linregress', 'linregress', (['ycom_notnull', 'census_standard'], {}), '(ycom_notnull, census_standard)\n', (2131, 2162), False, 'from scipy.stats import linregress\n'), ((2044, 2066), 'numpy.std', 'np.std', (['census_notnull'], {}), '(census_notnull)\n', (2050, 2066), True, 'import numpy as np\n'), ((2017, 2040), 'numpy.mean', 'np.mean', (['census_notnull'], {}), '(census_notnull)\n', (2024, 2040), True, 'import numpy as np\n')]
|
import numpy as np
from mindspore import context
import mindspore as ms
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore import Tensor
from mindspore.common.api import _executor
from tests.ut.python.ops.test_math_ops import VirtualLoss
from mindspore.parallel import set_algo_parameters
from mindspore.parallel._utils import _reset_op_id as reset_op_id
import re
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class Blockcell(nn.Cell):
def __init__(self):
super(Blockcell, self).__init__()
self.bn = nn.BatchNorm2d(64, momentum=0.9)
def construct(self, x):
out = self.bn(x)
return out
def getBlock():
return Blockcell()
def test_two_bn():
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.block1 = getBlock()
self.block2 = getBlock()
self.relu = P.ReLU()
self.add = P.TensorAdd()
self.bias = Tensor(np.ones([64, 64]), dtype=ms.float32)
def construct(self, x):
out = self.block1(x)
out = self.relu(out)
out = self.add(out, self.bias)
out = self.block2(out)
return out
net = NetWithLoss(Net())
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
context.set_context(save_graphs=True)
context.set_auto_parallel_context(device_num=8, global_rank=0)
context.set_auto_parallel_context(parallel_mode="auto_parallel")
set_algo_parameters(elementwise_op_strategy_follow=True)
reset_op_id()
_executor.compile(net, x, phase='train')
strategies = _executor._get_strategy(net)
assert len(strategies) == 4
for (k, v) in strategies.items():
if re.search('BatchNorm-op', k) is not None:
assert v == [[8, 1], [1], [1], [1], [1]]
elif re.search('TensorAdd-op', k) is not None:
assert v == [[8, 1], [8, 1]]
elif re.search('ReLU-op', k) is not None:
assert v == [[8, 1]]
|
[
"mindspore.common.api._executor._get_strategy",
"numpy.ones",
"mindspore.ops.operations.ReLU",
"mindspore.context.set_context",
"mindspore.nn.BatchNorm2d",
"mindspore.parallel.set_algo_parameters",
"tests.ut.python.ops.test_math_ops.VirtualLoss",
"mindspore.parallel._utils._reset_op_id",
"mindspore.common.api._executor.compile",
"mindspore.ops.operations.TensorAdd",
"mindspore.context.set_auto_parallel_context",
"re.search"
] |
[((1523, 1560), 'mindspore.context.set_context', 'context.set_context', ([], {'save_graphs': '(True)'}), '(save_graphs=True)\n', (1542, 1560), False, 'from mindspore import context\n'), ((1565, 1627), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'device_num': '(8)', 'global_rank': '(0)'}), '(device_num=8, global_rank=0)\n', (1598, 1627), False, 'from mindspore import context\n'), ((1632, 1696), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""auto_parallel"""'}), "(parallel_mode='auto_parallel')\n", (1665, 1696), False, 'from mindspore import context\n'), ((1701, 1757), 'mindspore.parallel.set_algo_parameters', 'set_algo_parameters', ([], {'elementwise_op_strategy_follow': '(True)'}), '(elementwise_op_strategy_follow=True)\n', (1720, 1757), False, 'from mindspore.parallel import set_algo_parameters\n'), ((1762, 1775), 'mindspore.parallel._utils._reset_op_id', 'reset_op_id', ([], {}), '()\n', (1773, 1775), True, 'from mindspore.parallel._utils import _reset_op_id as reset_op_id\n'), ((1781, 1821), 'mindspore.common.api._executor.compile', '_executor.compile', (['net', 'x'], {'phase': '"""train"""'}), "(net, x, phase='train')\n", (1798, 1821), False, 'from mindspore.common.api import _executor\n'), ((1839, 1867), 'mindspore.common.api._executor._get_strategy', '_executor._get_strategy', (['net'], {}), '(net)\n', (1862, 1867), False, 'from mindspore.common.api import _executor\n'), ((523, 536), 'tests.ut.python.ops.test_math_ops.VirtualLoss', 'VirtualLoss', ([], {}), '()\n', (534, 536), False, 'from tests.ut.python.ops.test_math_ops import VirtualLoss\n'), ((776, 808), 'mindspore.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {'momentum': '(0.9)'}), '(64, momentum=0.9)\n', (790, 808), True, 'import mindspore.nn as nn\n'), ((1482, 1499), 'numpy.ones', 'np.ones', (['[64, 64]'], {}), '([64, 64])\n', (1489, 1499), True, 'import numpy as np\n'), ((1123, 1131), 'mindspore.ops.operations.ReLU', 'P.ReLU', ([], {}), '()\n', (1129, 1131), True, 'from mindspore.ops import operations as P\n'), ((1155, 1168), 'mindspore.ops.operations.TensorAdd', 'P.TensorAdd', ([], {}), '()\n', (1166, 1168), True, 'from mindspore.ops import operations as P\n'), ((1950, 1978), 're.search', 're.search', (['"""BatchNorm-op"""', 'k'], {}), "('BatchNorm-op', k)\n", (1959, 1978), False, 'import re\n'), ((1200, 1217), 'numpy.ones', 'np.ones', (['[64, 64]'], {}), '([64, 64])\n', (1207, 1217), True, 'import numpy as np\n'), ((2058, 2086), 're.search', 're.search', (['"""TensorAdd-op"""', 'k'], {}), "('TensorAdd-op', k)\n", (2067, 2086), False, 'import re\n'), ((2154, 2177), 're.search', 're.search', (['"""ReLU-op"""', 'k'], {}), "('ReLU-op', k)\n", (2163, 2177), False, 'import re\n')]
|
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)
data = np.loadtxt('data-04-zoo.csv', delimiter=',', dtype=np.float32)
x_data = data[:, 0:-1]
y_data = data[:, [-1]]
x = tf.placeholder(dtype=tf.float32, shape=[None, 16])
y = tf.placeholder(dtype=tf.int32, shape=[None, 1])
y_ont_hot = tf.one_hot(y, 7)
print(y_ont_hot.get_shape())
y_ont_hot = tf.reshape(y_ont_hot, [-1, 7])
print(y_ont_hot.get_shape())
w = tf.Variable(tf.random_normal([16, 7]), name='weight')
b = tf.Variable(tf.random_normal([7]), name='bias')
logit = tf.matmul(x, w) + b
hypothesis = tf.nn.softmax(logit)
cost_i = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logit, labels=y_ont_hot)
cost = tf.reduce_mean(cost_i)
train = tf.train.GradientDescentOptimizer(learning_rate=0.1).minimize(cost)
prediction = tf.argmax(hypothesis, axis=1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, tf.argmax(y_ont_hot, axis=1)), dtype=tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(2000):
_, _cost, _a, _p = sess.run([train, cost, accuracy, prediction], feed_dict={x:x_data, y:y_data})
if step % 100 == 0:
print("step:{}\nprediction:\n{}\n\ncost:{}\naccuracy:{}".format(step, _p, _cost, _a))
_p = sess.run(prediction, feed_dict={x:x_data})
for p, y in zip(_p, y_data):
print('prediction:{} target:{}'.format(p, y))
|
[
"tensorflow.one_hot",
"tensorflow.random_normal",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.reshape",
"tensorflow.reduce_mean",
"numpy.loadtxt",
"tensorflow.set_random_seed"
] |
[((43, 66), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(777)'], {}), '(777)\n', (61, 66), True, 'import tensorflow as tf\n'), ((75, 137), 'numpy.loadtxt', 'np.loadtxt', (['"""data-04-zoo.csv"""'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "('data-04-zoo.csv', delimiter=',', dtype=np.float32)\n", (85, 137), True, 'import numpy as np\n'), ((189, 239), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None, 16]'}), '(dtype=tf.float32, shape=[None, 16])\n', (203, 239), True, 'import tensorflow as tf\n'), ((244, 291), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.int32', 'shape': '[None, 1]'}), '(dtype=tf.int32, shape=[None, 1])\n', (258, 291), True, 'import tensorflow as tf\n'), ((304, 320), 'tensorflow.one_hot', 'tf.one_hot', (['y', '(7)'], {}), '(y, 7)\n', (314, 320), True, 'import tensorflow as tf\n'), ((362, 392), 'tensorflow.reshape', 'tf.reshape', (['y_ont_hot', '[-1, 7]'], {}), '(y_ont_hot, [-1, 7])\n', (372, 392), True, 'import tensorflow as tf\n'), ((575, 595), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logit'], {}), '(logit)\n', (588, 595), True, 'import tensorflow as tf\n'), ((605, 679), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'logit', 'labels': 'y_ont_hot'}), '(logits=logit, labels=y_ont_hot)\n', (647, 679), True, 'import tensorflow as tf\n'), ((687, 709), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cost_i'], {}), '(cost_i)\n', (701, 709), True, 'import tensorflow as tf\n'), ((801, 830), 'tensorflow.argmax', 'tf.argmax', (['hypothesis'], {'axis': '(1)'}), '(hypothesis, axis=1)\n', (810, 830), True, 'import tensorflow as tf\n'), ((439, 464), 'tensorflow.random_normal', 'tf.random_normal', (['[16, 7]'], {}), '([16, 7])\n', (455, 464), True, 'import tensorflow as tf\n'), ((497, 518), 'tensorflow.random_normal', 'tf.random_normal', (['[7]'], {}), '([7])\n', (513, 518), True, 'import tensorflow as tf\n'), ((542, 557), 'tensorflow.matmul', 'tf.matmul', (['x', 'w'], {}), '(x, w)\n', (551, 557), True, 'import tensorflow as tf\n'), ((942, 954), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (952, 954), True, 'import tensorflow as tf\n'), ((719, 771), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (752, 771), True, 'import tensorflow as tf\n'), ((977, 1010), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1008, 1010), True, 'import tensorflow as tf\n'), ((886, 914), 'tensorflow.argmax', 'tf.argmax', (['y_ont_hot'], {'axis': '(1)'}), '(y_ont_hot, axis=1)\n', (895, 914), True, 'import tensorflow as tf\n')]
|
from cmx import doc
import gym
import numpy as np
from env_wrappers.flat_env import FlatGoalEnv
from sawyer.misc import space2dict, obs2dict
def test_start():
doc @ """
# Sawyer Blocks Environment
## To-do
- [ ] automatically generate the environment table
We include the following domains in this test:
"""
doc.csv @ """
Name, goal_keys, Action Space, Observation Space
Reach-v0, "hand"
Push-v0, "obj_0"
PushMove-v0, "hand", "obj_0"
"""
def test_reach_reward():
doc @ """
## sawyer:Reach-v0
"""
with doc:
env = gym.make("sawyer:Reach-v0")
env.seed(100)
frames = []
obs = env.reset()
for step in range(100):
# gripper dimension does not matter
act = env.goal['hand'] - obs['hand']
obs, r, done, info = env.step(np.array([*act, 0]) * 10)
img = env.render('rgb')
frames.append(img)
if done:
break
else:
raise RuntimeError("Reach failed to terminate")
doc.video(frames, f"videos/reach.gif")
doc.flush()
def test_reach_flat_goal():
doc @ """
### Using FlatGoalEnv Wrapper
"""
with doc:
env = gym.make("sawyer:Reach-v0")
env.seed(100)
env = FlatGoalEnv(env)
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_push():
doc @ """
## sawyer:Push-v0
"""
with doc:
env = gym.make("sawyer:Push-v0")
env.seed(100)
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_push_flat_goal():
doc @ """
### with FlatGoalEnv Wrapper
"""
with doc:
env = gym.make("sawyer:Push-v0")
env.seed(100)
env = FlatGoalEnv(env, )
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_push_move():
doc @ """
## sawyer:PushMove-v0 Domain
This is different from the push domain by the
additional goal key that specifies the final
position for the hand.
"""
with doc:
env = gym.make("sawyer:PushMove-v0")
env.seed(100)
env = FlatGoalEnv(env, )
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_pick_place():
doc @ """
## sawyer:PickPlace-v0 Domain
"""
with doc:
env = gym.make("sawyer:PickPlace-v0")
env.seed(100)
env = FlatGoalEnv(env, )
obs = env.reset()
with doc("Make sure that the spec agrees with what it returns"):
doc.yaml(space2dict(env.observation_space))
with doc:
doc.yaml(obs2dict(obs))
def test_pick_place_reward():
doc @ """
## sawyer:PickPlace-v0
We set the goal_key to ['hand',] (the same as the reaching
task) to test the termination.
"""
with doc:
env = gym.make("sawyer:PickPlace-v0", goal_keys=["hand"])
env.seed(100)
frames = []
obs = env.reset()
for step in range(100):
act = env.goal['hand'] - obs['hand']
obs, r, done, info = env.step(np.array([*act, 0]) * 10)
img = env.render('rgb')
frames.append(img)
if done:
break
else:
# raise RuntimeError("Reach failed to terminate")
print('failed')
pass
doc.video(frames, f"videos/pick_place.gif")
doc.flush()
def test_block_distribution():
doc @ """
Show the distribution of the block after initialization
"""
with doc:
env = gym.make("sawyer:PickPlace-v0", width=240, height=160)
env.seed(100)
frames = []
for step in range(20):
obs = env.reset()
frames.append(env.render('rgb'))
doc.image(np.min(frames, axis=0))
doc.flush()
# def test_fetch():
# with doc:
# import gym
# env = gym.make("FetchReach-v1")
#
# assert env.compute_reward is not None
|
[
"env_wrappers.flat_env.FlatGoalEnv",
"cmx.doc",
"numpy.array",
"cmx.doc.video",
"sawyer.misc.space2dict",
"numpy.min",
"sawyer.misc.obs2dict",
"cmx.doc.flush",
"gym.make"
] |
[((1064, 1102), 'cmx.doc.video', 'doc.video', (['frames', 'f"""videos/reach.gif"""'], {}), "(frames, f'videos/reach.gif')\n", (1073, 1102), False, 'from cmx import doc\n'), ((1107, 1118), 'cmx.doc.flush', 'doc.flush', ([], {}), '()\n', (1116, 1118), False, 'from cmx import doc\n'), ((3801, 3844), 'cmx.doc.video', 'doc.video', (['frames', 'f"""videos/pick_place.gif"""'], {}), "(frames, f'videos/pick_place.gif')\n", (3810, 3844), False, 'from cmx import doc\n'), ((3849, 3860), 'cmx.doc.flush', 'doc.flush', ([], {}), '()\n', (3858, 3860), False, 'from cmx import doc\n'), ((4231, 4242), 'cmx.doc.flush', 'doc.flush', ([], {}), '()\n', (4240, 4242), False, 'from cmx import doc\n'), ((633, 660), 'gym.make', 'gym.make', (['"""sawyer:Reach-v0"""'], {}), "('sawyer:Reach-v0')\n", (641, 660), False, 'import gym\n'), ((1233, 1260), 'gym.make', 'gym.make', (['"""sawyer:Reach-v0"""'], {}), "('sawyer:Reach-v0')\n", (1241, 1260), False, 'import gym\n'), ((1297, 1313), 'env_wrappers.flat_env.FlatGoalEnv', 'FlatGoalEnv', (['env'], {}), '(env)\n', (1308, 1313), False, 'from env_wrappers.flat_env import FlatGoalEnv\n'), ((1349, 1407), 'cmx.doc', 'doc', (['"""Make sure that the spec agrees with what it returns"""'], {}), "('Make sure that the spec agrees with what it returns')\n", (1352, 1407), False, 'from cmx import doc\n'), ((1598, 1624), 'gym.make', 'gym.make', (['"""sawyer:Push-v0"""'], {}), "('sawyer:Push-v0')\n", (1606, 1624), False, 'import gym\n'), ((1682, 1740), 'cmx.doc', 'doc', (['"""Make sure that the spec agrees with what it returns"""'], {}), "('Make sure that the spec agrees with what it returns')\n", (1685, 1740), False, 'from cmx import doc\n'), ((1953, 1979), 'gym.make', 'gym.make', (['"""sawyer:Push-v0"""'], {}), "('sawyer:Push-v0')\n", (1961, 1979), False, 'import gym\n'), ((2016, 2032), 'env_wrappers.flat_env.FlatGoalEnv', 'FlatGoalEnv', (['env'], {}), '(env)\n', (2027, 2032), False, 'from env_wrappers.flat_env import FlatGoalEnv\n'), ((2071, 2129), 'cmx.doc', 'doc', (['"""Make sure that the spec agrees with what it returns"""'], {}), "('Make sure that the spec agrees with what it returns')\n", (2074, 2129), False, 'from cmx import doc\n'), ((2468, 2498), 'gym.make', 'gym.make', (['"""sawyer:PushMove-v0"""'], {}), "('sawyer:PushMove-v0')\n", (2476, 2498), False, 'import gym\n'), ((2535, 2551), 'env_wrappers.flat_env.FlatGoalEnv', 'FlatGoalEnv', (['env'], {}), '(env)\n', (2546, 2551), False, 'from env_wrappers.flat_env import FlatGoalEnv\n'), ((2590, 2648), 'cmx.doc', 'doc', (['"""Make sure that the spec agrees with what it returns"""'], {}), "('Make sure that the spec agrees with what it returns')\n", (2593, 2648), False, 'from cmx import doc\n'), ((2863, 2894), 'gym.make', 'gym.make', (['"""sawyer:PickPlace-v0"""'], {}), "('sawyer:PickPlace-v0')\n", (2871, 2894), False, 'import gym\n'), ((2931, 2947), 'env_wrappers.flat_env.FlatGoalEnv', 'FlatGoalEnv', (['env'], {}), '(env)\n', (2942, 2947), False, 'from env_wrappers.flat_env import FlatGoalEnv\n'), ((2986, 3044), 'cmx.doc', 'doc', (['"""Make sure that the spec agrees with what it returns"""'], {}), "('Make sure that the spec agrees with what it returns')\n", (2989, 3044), False, 'from cmx import doc\n'), ((3351, 3402), 'gym.make', 'gym.make', (['"""sawyer:PickPlace-v0"""'], {'goal_keys': "['hand']"}), "('sawyer:PickPlace-v0', goal_keys=['hand'])\n", (3359, 3402), False, 'import gym\n'), ((4004, 4058), 'gym.make', 'gym.make', (['"""sawyer:PickPlace-v0"""'], {'width': '(240)', 'height': '(160)'}), "('sawyer:PickPlace-v0', width=240, height=160)\n", (4012, 4058), False, 'import gym\n'), ((4203, 4225), 'numpy.min', 'np.min', (['frames'], {'axis': '(0)'}), '(frames, axis=0)\n', (4209, 4225), True, 'import numpy as np\n'), ((1426, 1459), 'sawyer.misc.space2dict', 'space2dict', (['env.observation_space'], {}), '(env.observation_space)\n', (1436, 1459), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((1492, 1505), 'sawyer.misc.obs2dict', 'obs2dict', (['obs'], {}), '(obs)\n', (1500, 1505), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((1759, 1792), 'sawyer.misc.space2dict', 'space2dict', (['env.observation_space'], {}), '(env.observation_space)\n', (1769, 1792), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((1825, 1838), 'sawyer.misc.obs2dict', 'obs2dict', (['obs'], {}), '(obs)\n', (1833, 1838), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((2148, 2181), 'sawyer.misc.space2dict', 'space2dict', (['env.observation_space'], {}), '(env.observation_space)\n', (2158, 2181), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((2214, 2227), 'sawyer.misc.obs2dict', 'obs2dict', (['obs'], {}), '(obs)\n', (2222, 2227), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((2667, 2700), 'sawyer.misc.space2dict', 'space2dict', (['env.observation_space'], {}), '(env.observation_space)\n', (2677, 2700), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((2733, 2746), 'sawyer.misc.obs2dict', 'obs2dict', (['obs'], {}), '(obs)\n', (2741, 2746), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((3063, 3096), 'sawyer.misc.space2dict', 'space2dict', (['env.observation_space'], {}), '(env.observation_space)\n', (3073, 3096), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((3129, 3142), 'sawyer.misc.obs2dict', 'obs2dict', (['obs'], {}), '(obs)\n', (3137, 3142), False, 'from sawyer.misc import space2dict, obs2dict\n'), ((873, 892), 'numpy.array', 'np.array', (['[*act, 0]'], {}), '([*act, 0])\n', (881, 892), True, 'import numpy as np\n'), ((3571, 3590), 'numpy.array', 'np.array', (['[*act, 0]'], {}), '([*act, 0])\n', (3579, 3590), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pa
import time
from sklearn.metrics import pairwise_distances
from scipy.sparse import csr_matrix
class Kmeans:
def __init__(self,data,k,geneNames,cellNames,cluster_label=None,seed=None):
self.data=data
self.k=k
self.geneNames=geneNames
self.cellNames=cellNames
self.seed=seed
self.centroids=None
self.cluster_assignment=None
self.cluster_label=cluster_label
self.heterogeneity=0.0
self.get_initial_centroids()
self.heterogeneities=None
def getCentroids(self):
return self.centroids
def getCluster_assignment(self):
return self.cluster_assignment
def getHeterogenity(self):
return self.heterogeneity
def getHetrogenities(self):
return self.heterogeneities
def get_initial_centroids(self):
'''Randomly choose k data points as initial centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
n = self.data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, self.k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = self.data[rand_indices,:].toarray()
self.centroids=centroids
return centroids
def smart_initialize(self):
'''Use k-means++ to initialize a good set of centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
centroids = np.zeros((self.k, self.data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(self.data.shape[0])
centroids[0] = self.data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(self.data, centroids[0:1], metric='euclidean').flatten()**2
for i in range(1, self.k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(self.data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = self.data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(self.data, centroids[0:i+1], metric='euclidean')**2,axis=1)
self.centroids=centroids
return centroids
def assign_clusters(self):
# Compute distances between each data point and the set of centroids:
# Fill in the blank (RHS only)
distances_from_centroids = pairwise_distances(self.data,self.centroids,metric='euclidean')
# Compute cluster assignments for each data point:
# Fill in the blank (RHS only)
cluster_assignment = np.apply_along_axis(np.argmin, 1, distances_from_centroids)
self.cluster_assignment=cluster_assignment
return cluster_assignment
def revise_centroids(self):
new_centroids = []
for i in range(self.k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = self.data[self.cluster_assignment==i]
# Compute the mean of the data points. Fill in the blank (RHS only)
centroid = member_data_points.mean(axis=0)
# Convert numpy.matrix type to numpy.ndarray type
centroid = centroid.A1
new_centroids.append(centroid)
new_centroids = np.array(new_centroids)
self.centroids=new_centroids
return new_centroids
def kmeans(self, maxiter, record_heterogeneity=None, verbose=False):
'''This function runs k-means on given data and initial set of centroids.
maxiter: maximum number of iterations to run.
record_heterogeneity: (optional) a list, to store the history of heterogeneity as function of iterations
if None, do not store the history.
verbose: if True, print how many data points changed their cluster labels in each iteration'''
centroids = self.centroids[:]
prev_cluster_assignment = None
for itr in range(int(maxiter)):
if verbose:
print(itr)
# 1. Make cluster assignments using nearest centroids
# YOUR CODE HERE
cluster_assignment = self.assign_clusters()
# 2. Compute a new centroid for each of the k clusters, averaging all data points assigned to that cluster.
# YOUR CODE HERE
centroids = self.revise_centroids()
# Check for convergence: if none of the assignments changed, stop
if prev_cluster_assignment is not None and \
(prev_cluster_assignment==self.cluster_assignment).all():
break
# Print number of new assignments
if prev_cluster_assignment is not None:
num_changed = np.sum(prev_cluster_assignment!=self.cluster_assignment)
if verbose:
print(' {0:5d} elements changed their cluster assignment.'.format(num_changed))
# Record heterogeneity convergence metric
if record_heterogeneity is not None:
# YOUR CODE HERE
score = compute_heterogeneity(self.data, self.k, centroids, cluster_assignment)
record_heterogeneity.append(score)
prev_cluster_assignment = cluster_assignment[:]
self.centroids=centroids
self.cluster_assignment=cluster_assignment
return centroids, cluster_assignment
def kmeans_multiple_runs(self, maxiter, num_runs, seed_list=None, verbose=False):
heterogeneity = {}
min_heterogeneity_achieved = float('inf')
best_seed = None
final_centroids = None
final_cluster_assignment = None
for i in range(num_runs):
# Use UTC time if no seeds are provided
if seed_list is not None:
seed = seed_list[i]
np.random.seed(seed)
else:
seed = int(time.time())
np.random.seed(seed)
# Use k-means++ initialization
self.initial_centroids = self.smart_initialize()
# Run k-means
centroids, cluster_assignment = self.kmeans(maxiter, record_heterogeneity=None, verbose=False)
# To save time, compute heterogeneity only once in the end
heterogeneity[seed] = self.compute_heterogeneity()
if verbose:
print('seed={0:06d}, heterogeneity={1:.5f}'.format(seed, heterogeneity[seed]))
sys.stdout.flush()
# if current measurement of heterogeneity is lower than previously seen,
# update the minimum record of heterogeneity.
if heterogeneity[seed] < min_heterogeneity_achieved:
min_heterogeneity_achieved = heterogeneity[seed]
best_seed = seed
final_centroids = centroids
final_cluster_assignment = cluster_assignment
self.centroids=final_centroids
self.cluster_assignment=final_cluster_assignment
self.heterogeneities=heterogeneity
return final_centroids, final_cluster_assignment
def clusterEvaluation(self):
clustMaxDist={}
clustMinDist={}
clustMeanDist={}
for i in range(self.k):
binMaxDist=[]
binMinDist=[]
binMeanDist=[]
for j in np.concatenate(np.argwhere(self.cluster_assignment==i)):
dist=pairwise_distances(self.data[np.concatenate(np.argwhere(self.cluster_assignment==i))], self.data[j], metric='euclidean').flatten()
dist=dist**2
binMaxDist.append(np.max(dist))
binMinDist.append(np.min(dist))
binMeanDist.append(np.mean(dist))
clustMaxDist[i]=np.max(binMaxDist)
clustMinDist[i]=np.min(binMinDist)
clustMeanDist[i]=np.mean(binMeanDist)
plt.figure(figsize=(7,4.5))
plt.plot(clustMaxDist.keys(),clustMaxDist.values(), linewidth=2, label='Maximum distance among clusters')
plt.plot(clustMaxDist.keys(),clustMinDist.values(), linewidth=2, label='Minimum distance among clusters')
plt.plot(clustMaxDist.keys(),clustMeanDist.values(), linewidth=2, label='avarage distance among clusters')
plt.xlabel('Cluster number')
plt.ylabel('Eculidean distance')
plt.legend(loc='best', prop={'size':15})
plt.rcParams.update({'font.size':16})
plt.tight_layout()
plt.show()
return np.sum(clustMeanDist)
def compute_heterogeneity(self):
heterogeneity = 0.0
for i in range(self.k):
# Select all data points that belong to cluster i. Fill in the blank (RHS only)
member_data_points = self.data[self.cluster_assignment==i, :]
if member_data_points.shape[0] > 0: # check if i-th cluster is non-empty
# Compute distances from centroid to data points (RHS only)
distances = pairwise_distances(member_data_points, [self.centroids[i]], metric='euclidean')
squared_distances = distances**2
heterogeneity += np.sum(squared_distances)
self.heterogeneity=heterogeneity
return heterogeneity
def plot_k_vs_heterogeneity(self, k_values, heterogeneity_values):
plt.figure(figsize=(7,4))
plt.plot(k_values, heterogeneity_values, linewidth=4)
plt.xlabel('K')
plt.ylabel('Heterogeneity')
plt.title('K vs. Heterogeneity')
plt.rcParams.update({'font.size': 16})
plt.tight_layout()
plt.show()
return None
def get_cluster_data(self, cluster_number):
return self.data[np.in1d(np.array(self.cluster_assignment), cluster_number),:], self.cellNames[np.in1d(np.array(self.cluster_assignment), cluster_number)]
def select_K(self):
cluster_centroids={}
cluster_assignments={}
hetroginity_score=float('inf')
delta_k={}
max_K_value=self.k
hetro_Per_K={}
deltaHetro=None
for i in range(max_K_value):
self.k=i+1
print("going for k=", i+1)
cluster_centroid, cluster_assignment=self.kmeans_multiple_runs(5,100)
hetro=self.compute_heterogeneity()
hetro_Per_K[i+1]=hetro
if hetro<hetroginity_score:
if hetroginity_score==float('inf'):
hetroginity_score=hetro
deltaHetro=0
else:
deltaHetro=hetroginity_score-hetro
hetroginity_score=hetro
cluster_centroids[i+1]=cluster_centroid
cluster_assignments[i+1]=cluster_assignment
delta_k[i+1]=deltaHetro
best_k=sum(delta_k.values()[1:]>sum(delta_k.values())/(2*len(delta_k.values())))
print("best k value:", best_k, delta_k)
self.centroids=cluster_centroids[best_k]
self.cluster_assignment=cluster_assignments[best_k]
self.k=best_k
self.getVisualization(method="tsne")
self.plot_k_vs_heterogeneity(hetro_Per_K.keys(), hetro_Per_K.values())
return self.k
|
[
"numpy.mean",
"sklearn.metrics.pairwise_distances",
"numpy.max",
"numpy.array",
"numpy.random.randint",
"numpy.apply_along_axis",
"numpy.zeros",
"numpy.sum",
"numpy.random.seed",
"numpy.min",
"numpy.argwhere",
"time.time"
] |
[((1207, 1238), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n', 'self.k'], {}), '(0, n, self.k)\n', (1224, 1238), True, 'import numpy as np\n'), ((1839, 1877), 'numpy.zeros', 'np.zeros', (['(self.k, self.data.shape[1])'], {}), '((self.k, self.data.shape[1]))\n', (1847, 1877), True, 'import numpy as np\n'), ((2009, 2046), 'numpy.random.randint', 'np.random.randint', (['self.data.shape[0]'], {}), '(self.data.shape[0])\n', (2026, 2046), True, 'import numpy as np\n'), ((3228, 3293), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['self.data', 'self.centroids'], {'metric': '"""euclidean"""'}), "(self.data, self.centroids, metric='euclidean')\n", (3246, 3293), False, 'from sklearn.metrics import pairwise_distances\n'), ((3424, 3483), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.argmin', '(1)', 'distances_from_centroids'], {}), '(np.argmin, 1, distances_from_centroids)\n', (3443, 3483), True, 'import numpy as np\n'), ((4145, 4168), 'numpy.array', 'np.array', (['new_centroids'], {}), '(new_centroids)\n', (4153, 4168), True, 'import numpy as np\n'), ((9452, 9473), 'numpy.sum', 'np.sum', (['clustMeanDist'], {}), '(clustMeanDist)\n', (9458, 9473), True, 'import numpy as np\n'), ((1050, 1075), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1064, 1075), True, 'import numpy as np\n'), ((1793, 1818), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1807, 1818), True, 'import numpy as np\n'), ((8723, 8741), 'numpy.max', 'np.max', (['binMaxDist'], {}), '(binMaxDist)\n', (8729, 8741), True, 'import numpy as np\n'), ((8770, 8788), 'numpy.min', 'np.min', (['binMinDist'], {}), '(binMinDist)\n', (8776, 8788), True, 'import numpy as np\n'), ((8818, 8838), 'numpy.mean', 'np.mean', (['binMeanDist'], {}), '(binMeanDist)\n', (8825, 8838), True, 'import numpy as np\n'), ((5623, 5681), 'numpy.sum', 'np.sum', (['(prev_cluster_assignment != self.cluster_assignment)'], {}), '(prev_cluster_assignment != self.cluster_assignment)\n', (5629, 5681), True, 'import numpy as np\n'), ((6751, 6771), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6765, 6771), True, 'import numpy as np\n'), ((6847, 6867), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6861, 6867), True, 'import numpy as np\n'), ((8326, 8367), 'numpy.argwhere', 'np.argwhere', (['(self.cluster_assignment == i)'], {}), '(self.cluster_assignment == i)\n', (8337, 8367), True, 'import numpy as np\n'), ((9937, 10016), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['member_data_points', '[self.centroids[i]]'], {'metric': '"""euclidean"""'}), "(member_data_points, [self.centroids[i]], metric='euclidean')\n", (9955, 10016), False, 'from sklearn.metrics import pairwise_distances\n'), ((10099, 10124), 'numpy.sum', 'np.sum', (['squared_distances'], {}), '(squared_distances)\n', (10105, 10124), True, 'import numpy as np\n'), ((2213, 2278), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['self.data', 'centroids[0:1]'], {'metric': '"""euclidean"""'}), "(self.data, centroids[0:1], metric='euclidean')\n", (2231, 2278), False, 'from sklearn.metrics import pairwise_distances\n'), ((2894, 2963), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['self.data', 'centroids[0:i + 1]'], {'metric': '"""euclidean"""'}), "(self.data, centroids[0:i + 1], metric='euclidean')\n", (2912, 2963), False, 'from sklearn.metrics import pairwise_distances\n'), ((6818, 6829), 'time.time', 'time.time', ([], {}), '()\n', (6827, 6829), False, 'import time\n'), ((8583, 8595), 'numpy.max', 'np.max', (['dist'], {}), '(dist)\n', (8589, 8595), True, 'import numpy as np\n'), ((8631, 8643), 'numpy.min', 'np.min', (['dist'], {}), '(dist)\n', (8637, 8643), True, 'import numpy as np\n'), ((8680, 8693), 'numpy.mean', 'np.mean', (['dist'], {}), '(dist)\n', (8687, 8693), True, 'import numpy as np\n'), ((10744, 10777), 'numpy.array', 'np.array', (['self.cluster_assignment'], {}), '(self.cluster_assignment)\n', (10752, 10777), True, 'import numpy as np\n'), ((10666, 10699), 'numpy.array', 'np.array', (['self.cluster_assignment'], {}), '(self.cluster_assignment)\n', (10674, 10699), True, 'import numpy as np\n'), ((8433, 8474), 'numpy.argwhere', 'np.argwhere', (['(self.cluster_assignment == i)'], {}), '(self.cluster_assignment == i)\n', (8444, 8474), True, 'import numpy as np\n')]
|
from flask import Flask, render_template, request
from keras.preprocessing.image import img_to_array, load_img
from keras.models import load_model
import cv2
import os
import numpy as np
from flask_cors import CORS, cross_origin
import tensorflow.keras
from PIL import Image, ImageOps
import base64
import json
import dlib
import imutils
from imutils import face_utils
handLabels = ["Stretched", "NotStretched"]
faceLabels = ["MildPain", "NoPain"]
facialLabels = ["No Face Droop", "Face Droop"]
model_face = tensorflow.keras.models.load_model('keras_face_model.h5')
model_hand = tensorflow.keras.models.load_model('keras_hand_model.h5')
model_facial_path = tensorflow.keras.models.load_model('Model.h5')
# Process image and predict label
def processImgFacial(IMG_PATH):
global shape
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
image = cv2.imread(IMG_PATH)
image = imutils.resize(image, width=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
coord = []
print(rects)
if len(rects) > 0:
for (i, rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
i = 0
for (x, y) in shape:
if i > 36:
coord.append(x)
coord.append(y)
i += 1
t2 = np.array([coord])
normalized_image_array = (t2.astype(np.float32) / 127.0) - 1
model_facial_path.load_weights('weight.h5')
prediction = model_facial_path.predict(normalized_image_array)
print("pred", prediction)
lastfacialLabel = facialLabels[np.argmax(np.squeeze(prediction[0]))]
print(lastfacialLabel)
confidence = np.max(np.squeeze(prediction))
writeList = [str(confidence), lastfacialLabel]
with open('facialdroop.txt', 'w') as filehandle:
json.dump(writeList, filehandle)
return lastfacialLabel
# Process image and predict label
def processImgFace(IMG_PATH):
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Replace this with the path to your image
image = Image.open(IMG_PATH)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model_face.predict(data)
print(prediction)
lastpainLabel = faceLabels[np.argmax(np.squeeze(prediction))]
confidence = np.max(np.squeeze(prediction))
writeList = [str(confidence), lastpainLabel]
with open('face.txt', 'w') as filehandle:
json.dump(writeList, filehandle)
return lastpainLabel
# Process image and predict label
def processImgHand(IMG_PATH):
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)
# Load the model
# determined by the first position in the shape tuple, in this case 1.
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# Replace this with the path to your image
image = Image.open(IMG_PATH)
# resize the image to a 224x224 with the same strategy as in TM2:
# resizing the image to be at least 224x224 and then cropping from the center
size = (224, 224)
image = ImageOps.fit(image, size, Image.ANTIALIAS)
# turn the image into a numpy array
image_array = np.asarray(image)
# Normalize the image
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# Load the image into the array
data[0] = normalized_image_array
# run the inference
prediction = model_hand.predict(data)
lasthandLabel = handLabels[np.argmax(np.squeeze(prediction))]
confidence = np.max(np.squeeze(prediction))
writeList = [str(confidence), lasthandLabel]
with open('hand.txt', 'w') as filehandle:
json.dump(writeList, filehandle)
return lasthandLabel
# Initializing flask application
app = Flask(__name__)
cors = CORS(app)
@app.route("/")
def main():
return """
Application is working
"""
# About page with render template
@app.route("/about")
def postsPage():
return render_template("about.html")
@app.route("/analysisreport", methods=["POST"])
def resultPage():
# open output file for reading
with open('face.txt', 'r') as filehandle:
faceResult = json.load(filehandle)
# open output file for reading
with open('hand.txt', 'r') as filehandle:
handResult = json.load(filehandle)
with open('facialdroop.txt', 'r') as filehandle:
FacialDroop = json.load(filehandle)
dictRecult = {}
dictRecult["hand_lbl"] = handResult[1]
dictRecult["face_lbl"] = faceResult[1]
dictRecult["facial_lbl"] = FacialDroop[1]
dictRecult["hand_acc"] = str(round(float(handResult[0]) * 100, 2))
dictRecult["face_acc"] = str(round(float(faceResult[0]) * 100, 2))
dictRecult["facial_acc"] = str(round(float(FacialDroop[0]) * 100, 2))
app_json = json.dumps(dictRecult)
return app_json
@app.route("/processfacial", methods=["POST"])
def processReqFacial():
if request.user_agent.browser is None:
data = request.files["img"]
data.save("temp.jpg")
else:
data = request.form["photo"]
data = data.split(",")[1]
buff = np.fromstring(base64.b64decode(data), np.uint8)
data = cv2.imdecode(buff, cv2.IMREAD_COLOR)
im = Image.fromarray(data)
im.save("temp.jpg")
resp = processImgFacial("temp.jpg")
return resp
@app.route("/processface", methods=["POST"])
def processReqFace():
if request.user_agent.browser is None:
data = request.files["img"]
data.save("temp.jpg")
else:
data = request.form["photo"]
data = data.split(",")[1]
buff = np.fromstring(base64.b64decode(data), np.uint8)
data = cv2.imdecode(buff, cv2.IMREAD_COLOR)
im = Image.fromarray(data)
im.save("temp.jpg")
resp = processImgFace("temp.jpg")
return resp
@app.route("/processhand", methods=["POST"])
def processReqHand():
if request.user_agent.browser is None:
data = request.files["img"]
data.save("temp.jpg")
else:
data = request.form["photo"]
data = data.split(",")[1]
buff = np.fromstring(base64.b64decode(data), np.uint8)
data = cv2.imdecode(buff, cv2.IMREAD_COLOR)
im = Image.fromarray(data)
im.save("temp.jpg")
resp = processImgHand("temp.jpg")
return resp
if __name__ == "__main__":
app.run(debug=True)
|
[
"flask.render_template",
"flask_cors.CORS",
"flask.Flask",
"PIL.ImageOps.fit",
"numpy.array",
"cv2.imdecode",
"json.dumps",
"numpy.asarray",
"dlib.shape_predictor",
"dlib.get_frontal_face_detector",
"numpy.squeeze",
"cv2.cvtColor",
"imutils.face_utils.shape_to_np",
"cv2.imread",
"numpy.set_printoptions",
"PIL.Image.fromarray",
"PIL.Image.open",
"base64.b64decode",
"imutils.resize",
"numpy.ndarray",
"json.load",
"json.dump"
] |
[((4465, 4480), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (4470, 4480), False, 'from flask import Flask, render_template, request\n'), ((4488, 4497), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (4492, 4497), False, 'from flask_cors import CORS, cross_origin\n'), ((807, 839), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (837, 839), False, 'import dlib\n'), ((856, 917), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""shape_predictor_68_face_landmarks.dat"""'], {}), "('shape_predictor_68_face_landmarks.dat')\n", (876, 917), False, 'import dlib\n'), ((931, 951), 'cv2.imread', 'cv2.imread', (['IMG_PATH'], {}), '(IMG_PATH)\n', (941, 951), False, 'import cv2\n'), ((964, 996), 'imutils.resize', 'imutils.resize', (['image'], {'width': '(500)'}), '(image, width=500)\n', (978, 996), False, 'import imutils\n'), ((1008, 1047), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1020, 1047), False, 'import cv2\n'), ((1451, 1468), 'numpy.array', 'np.array', (['[coord]'], {}), '([coord])\n', (1459, 1468), True, 'import numpy as np\n'), ((2119, 2153), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (2138, 2153), True, 'import numpy as np\n'), ((2240, 2292), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, 224, 224, 3)', 'dtype': 'np.float32'}), '(shape=(1, 224, 224, 3), dtype=np.float32)\n', (2250, 2292), True, 'import numpy as np\n'), ((2352, 2372), 'PIL.Image.open', 'Image.open', (['IMG_PATH'], {}), '(IMG_PATH)\n', (2362, 2372), False, 'from PIL import Image, ImageOps\n'), ((2559, 2601), 'PIL.ImageOps.fit', 'ImageOps.fit', (['image', 'size', 'Image.ANTIALIAS'], {}), '(image, size, Image.ANTIALIAS)\n', (2571, 2601), False, 'from PIL import Image, ImageOps\n'), ((2660, 2677), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (2670, 2677), True, 'import numpy as np\n'), ((3330, 3364), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (3349, 3364), True, 'import numpy as np\n'), ((3472, 3524), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(1, 224, 224, 3)', 'dtype': 'np.float32'}), '(shape=(1, 224, 224, 3), dtype=np.float32)\n', (3482, 3524), True, 'import numpy as np\n'), ((3584, 3604), 'PIL.Image.open', 'Image.open', (['IMG_PATH'], {}), '(IMG_PATH)\n', (3594, 3604), False, 'from PIL import Image, ImageOps\n'), ((3791, 3833), 'PIL.ImageOps.fit', 'ImageOps.fit', (['image', 'size', 'Image.ANTIALIAS'], {}), '(image, size, Image.ANTIALIAS)\n', (3803, 3833), False, 'from PIL import Image, ImageOps\n'), ((3892, 3909), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (3902, 3909), True, 'import numpy as np\n'), ((4667, 4696), 'flask.render_template', 'render_template', (['"""about.html"""'], {}), "('about.html')\n", (4682, 4696), False, 'from flask import Flask, render_template, request\n'), ((5495, 5517), 'json.dumps', 'json.dumps', (['dictRecult'], {}), '(dictRecult)\n', (5505, 5517), False, 'import json\n'), ((1806, 1828), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (1816, 1828), True, 'import numpy as np\n'), ((1943, 1975), 'json.dump', 'json.dump', (['writeList', 'filehandle'], {}), '(writeList, filehandle)\n', (1952, 1975), False, 'import json\n'), ((3029, 3051), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (3039, 3051), True, 'import numpy as np\n'), ((3156, 3188), 'json.dump', 'json.dump', (['writeList', 'filehandle'], {}), '(writeList, filehandle)\n', (3165, 3188), False, 'import json\n'), ((4239, 4261), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (4249, 4261), True, 'import numpy as np\n'), ((4366, 4398), 'json.dump', 'json.dump', (['writeList', 'filehandle'], {}), '(writeList, filehandle)\n', (4375, 4398), False, 'import json\n'), ((4867, 4888), 'json.load', 'json.load', (['filehandle'], {}), '(filehandle)\n', (4876, 4888), False, 'import json\n'), ((4991, 5012), 'json.load', 'json.load', (['filehandle'], {}), '(filehandle)\n', (5000, 5012), False, 'import json\n'), ((5088, 5109), 'json.load', 'json.load', (['filehandle'], {}), '(filehandle)\n', (5097, 5109), False, 'import json\n'), ((5879, 5915), 'cv2.imdecode', 'cv2.imdecode', (['buff', 'cv2.IMREAD_COLOR'], {}), '(buff, cv2.IMREAD_COLOR)\n', (5891, 5915), False, 'import cv2\n'), ((5929, 5950), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (5944, 5950), False, 'from PIL import Image, ImageOps\n'), ((6372, 6408), 'cv2.imdecode', 'cv2.imdecode', (['buff', 'cv2.IMREAD_COLOR'], {}), '(buff, cv2.IMREAD_COLOR)\n', (6384, 6408), False, 'import cv2\n'), ((6422, 6443), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (6437, 6443), False, 'from PIL import Image, ImageOps\n'), ((6863, 6899), 'cv2.imdecode', 'cv2.imdecode', (['buff', 'cv2.IMREAD_COLOR'], {}), '(buff, cv2.IMREAD_COLOR)\n', (6875, 6899), False, 'import cv2\n'), ((6913, 6934), 'PIL.Image.fromarray', 'Image.fromarray', (['data'], {}), '(data)\n', (6928, 6934), False, 'from PIL import Image, ImageOps\n'), ((1238, 1267), 'imutils.face_utils.shape_to_np', 'face_utils.shape_to_np', (['shape'], {}), '(shape)\n', (1260, 1267), False, 'from imutils import face_utils\n'), ((1726, 1751), 'numpy.squeeze', 'np.squeeze', (['prediction[0]'], {}), '(prediction[0])\n', (1736, 1751), True, 'import numpy as np\n'), ((2980, 3002), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (2990, 3002), True, 'import numpy as np\n'), ((4190, 4212), 'numpy.squeeze', 'np.squeeze', (['prediction'], {}), '(prediction)\n', (4200, 4212), True, 'import numpy as np\n'), ((5830, 5852), 'base64.b64decode', 'base64.b64decode', (['data'], {}), '(data)\n', (5846, 5852), False, 'import base64\n'), ((6323, 6345), 'base64.b64decode', 'base64.b64decode', (['data'], {}), '(data)\n', (6339, 6345), False, 'import base64\n'), ((6814, 6836), 'base64.b64decode', 'base64.b64decode', (['data'], {}), '(data)\n', (6830, 6836), False, 'import base64\n')]
|
import pandas as pd
import nltk
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import os
nltk.download('stopwords')
nltk.download('punkt')
nltk.download('wordnet_ic')
nltk.download('genesis')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
from src.Preprocess import Utils
from src.Preprocess import Lexical_Features
from src.Preprocess import WordNet_Features
from src.Normalization import Normalization
# Set seed for all libraries
np.random.seed(123)
# To print the whole df
pd.options.display.width= None
pd.options.display.max_columns= None
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 100)
# Load the datasets
names = [
'PhrasIS_test_h_n',
'PhrasIS_test_h_p',
'PhrasIS_test_i_n',
'PhrasIS_test_i_p',
'PhrasIS_train_h_n',
'PhrasIS_train_h_p',
'PhrasIS_train_i_n',
'PhrasIS_train_i_p'
]
paths = [
'dataset/PhrasIS.test.headlines.negatives.txt',
'dataset/PhrasIS.test.headlines.positives.txt',
'dataset/PhrasIS.test.images.negatives.txt',
'dataset/PhrasIS.test.images.positives.txt',
'dataset/PhrasIS.train.headlines.negatives.txt',
'dataset/PhrasIS.train.headlines.positives.txt',
'dataset/PhrasIS.train.images.negatives.txt',
'dataset/PhrasIS.train.images.positives.txt',
]
# For development only
nrows=30
datasets = dict( {name : Utils.readDataset(path, nrows=nrows) for (name,path) in zip(names,paths)})
# Preprocess dataset
preprocess_pipeline = [
Utils.addColumnsLower,
Utils.addColumnsStrip,
Utils.addColumnsTokenized,
Utils.addColumnsNoPunctuations,
Utils.addColumnsPOStags,
Utils.addColumnsLemmatized,
Utils.addColumnsContentWords,
Utils.addColumnsStopWords
]
step=1
for name,dataset in datasets.items():
for func in preprocess_pipeline:
func(dataset)
print("Processing dataset {}/{}".format(step, len(datasets.keys())))
step+=1
# Compute lexical features
lexical_pipeline = [
Lexical_Features.addColumnsJaccardStripTokenized,
Lexical_Features.addColumnsJaccardContentWords,
Lexical_Features.addColumnsJaccardStopwords,
Lexical_Features.addColumnsLength,
Lexical_Features.addColumnsLeftRight,
Lexical_Features.addColumnsRightLeft
]
step=1
for name,dataset in datasets.items():
for func in lexical_pipeline:
func(dataset)
print("Processing lexical features {}/{}".format(step, len(datasets.keys())))
step+=1
# Compute wordnet features
wordnet_pipeline = [
WordNet_Features.addColumnsPathSimilarity,
WordNet_Features.addColumnsLchSimilarityNouns,
WordNet_Features.addColumnsLchSimilarityVerbs,
WordNet_Features.addColumnsJcnSimilarityBrownNouns,
WordNet_Features.addColumnsJcnSimilarityBrownVerbs,
WordNet_Features.addColumnsJcnSimilarityGenesisNouns,
WordNet_Features.addColumnsJcnSimilarityGenesisVerbs,
WordNet_Features.addColumnsWupSimilarity,
WordNet_Features.addColumnsPathSimilarityRoot,
WordNet_Features.addColumnsLchSimilarityNounsRoot,
WordNet_Features.addColumnsLchSimilarityVerbsRoot,
WordNet_Features.addColumnsWupSimilarityRoot,
WordNet_Features.addColumnsChunkMaximum,
WordNet_Features.addColumnsChunk1Specific,
WordNet_Features.addColumnsChunk2Specific,
WordNet_Features.addColumnsDifference,
WordNet_Features.addColumnsMinimumDifference,
WordNet_Features.addColumnsMaximumDifference
]
step=1
for name,dataset in datasets.items():
for func in wordnet_pipeline:
func(dataset)
print("Processing wordnet features {}/{}".format(step, len(datasets.keys())))
step+=1
# Normalization
normalization_pipeline= [
Normalization.miniMaxNormalization
#Normalization.standardNormalization
]
step=1
for name,dataset in datasets.items():
for func in normalization_pipeline:
func(dataset)
print("Normalizing {}/{}".format(step, len(datasets.keys())))
step += 1
# Save files
saveFolder ="dirty"
if not os.path.exists(saveFolder):
os.makedirs(saveFolder+"/bin")
os.makedirs(saveFolder+ "/csv")
for name, df in datasets.items():
Utils.saveDatasetCSV(df, os.path.join("dirty/csv", name + ".csv"))
Utils.saveDatasetPickle(df, os.path.join("dirty/bin" , name + ".pickle"))
|
[
"os.path.exists",
"os.makedirs",
"nltk.download",
"src.Preprocess.Utils.readDataset",
"os.path.join",
"pandas.set_option",
"numpy.random.seed"
] |
[((116, 142), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (129, 142), False, 'import nltk\n'), ((143, 165), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (156, 165), False, 'import nltk\n'), ((166, 193), 'nltk.download', 'nltk.download', (['"""wordnet_ic"""'], {}), "('wordnet_ic')\n", (179, 193), False, 'import nltk\n'), ((194, 218), 'nltk.download', 'nltk.download', (['"""genesis"""'], {}), "('genesis')\n", (207, 218), False, 'import nltk\n'), ((219, 262), 'nltk.download', 'nltk.download', (['"""averaged_perceptron_tagger"""'], {}), "('averaged_perceptron_tagger')\n", (232, 262), False, 'import nltk\n'), ((263, 287), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (276, 287), False, 'import nltk\n'), ((485, 504), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (499, 504), True, 'import numpy as np\n'), ((598, 636), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(100)'], {}), "('display.max_rows', 100)\n", (611, 636), True, 'import pandas as pd\n'), ((637, 678), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', '(100)'], {}), "('display.max_columns', 100)\n", (650, 678), True, 'import pandas as pd\n'), ((3992, 4018), 'os.path.exists', 'os.path.exists', (['saveFolder'], {}), '(saveFolder)\n', (4006, 4018), False, 'import os\n'), ((4024, 4056), 'os.makedirs', 'os.makedirs', (["(saveFolder + '/bin')"], {}), "(saveFolder + '/bin')\n", (4035, 4056), False, 'import os\n'), ((4059, 4091), 'os.makedirs', 'os.makedirs', (["(saveFolder + '/csv')"], {}), "(saveFolder + '/csv')\n", (4070, 4091), False, 'import os\n'), ((1386, 1422), 'src.Preprocess.Utils.readDataset', 'Utils.readDataset', (['path'], {'nrows': 'nrows'}), '(path, nrows=nrows)\n', (1403, 1422), False, 'from src.Preprocess import Utils\n'), ((4155, 4195), 'os.path.join', 'os.path.join', (['"""dirty/csv"""', "(name + '.csv')"], {}), "('dirty/csv', name + '.csv')\n", (4167, 4195), False, 'import os\n'), ((4229, 4272), 'os.path.join', 'os.path.join', (['"""dirty/bin"""', "(name + '.pickle')"], {}), "('dirty/bin', name + '.pickle')\n", (4241, 4272), False, 'import os\n')]
|
import numpy as np
from prml.nn.function import Function
class Product(Function):
def __init__(self, axis=None, keepdims=False):
if isinstance(axis, int):
axis = (axis,)
elif isinstance(axis, tuple):
axis = tuple(sorted(axis))
self.axis = axis
self.keepdims = keepdims
def _forward(self, x):
self.output = np.prod(x, axis=self.axis, keepdims=True)
if not self.keepdims:
return np.squeeze(self.output)
else:
return self.output
def backward(self, delta, x):
if not self.keepdims and self.axis is not None:
for ax in self.axis:
delta = np.expand_dims(delta, ax)
dx = delta * self.output / x
return dx
def prod(x, axis=None, keepdims=False):
"""
product of all element in the array
Parameters
----------
x : tensor_like
input array
axis : int, tuple of ints
axis or axes along which a product is performed
keepdims : bool
keep dimensionality or not
Returns
-------
product : tensor_like
product of all element
"""
return Product(axis=axis, keepdims=keepdims).forward(x)
|
[
"numpy.prod",
"numpy.expand_dims",
"numpy.squeeze"
] |
[((382, 423), 'numpy.prod', 'np.prod', (['x'], {'axis': 'self.axis', 'keepdims': '(True)'}), '(x, axis=self.axis, keepdims=True)\n', (389, 423), True, 'import numpy as np\n'), ((473, 496), 'numpy.squeeze', 'np.squeeze', (['self.output'], {}), '(self.output)\n', (483, 496), True, 'import numpy as np\n'), ((690, 715), 'numpy.expand_dims', 'np.expand_dims', (['delta', 'ax'], {}), '(delta, ax)\n', (704, 715), True, 'import numpy as np\n')]
|
import numpy as np
from scipy import sparse
from sklearn.model_selection import train_test_split
rows = [0,1,2,8]
cols = [1,0,4,8]
vals = [1,2,1,4]
A = sparse.coo_matrix((vals, (rows, cols)))
print(A.todense())
B = A.tocsr()
C = sparse.csr_matrix(np.array([0,1,0,0,2,0,0,0,1]).reshape(1,9))
print(B.shape,C.shape)
D = sparse.vstack([B,C])
print(D.todense())
## read and write
file_name = "sparse_matrix.npz"
sparse.save_npz(file_name, D)
E = sparse.load_npz(file_name)
X = E
y = np.random.randint(0,2,E.shape[0])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
|
[
"sklearn.model_selection.train_test_split",
"scipy.sparse.load_npz",
"numpy.array",
"numpy.random.randint",
"scipy.sparse.coo_matrix",
"scipy.sparse.save_npz",
"scipy.sparse.vstack"
] |
[((156, 195), 'scipy.sparse.coo_matrix', 'sparse.coo_matrix', (['(vals, (rows, cols))'], {}), '((vals, (rows, cols)))\n', (173, 195), False, 'from scipy import sparse\n'), ((325, 346), 'scipy.sparse.vstack', 'sparse.vstack', (['[B, C]'], {}), '([B, C])\n', (338, 346), False, 'from scipy import sparse\n'), ((417, 446), 'scipy.sparse.save_npz', 'sparse.save_npz', (['file_name', 'D'], {}), '(file_name, D)\n', (432, 446), False, 'from scipy import sparse\n'), ((451, 477), 'scipy.sparse.load_npz', 'sparse.load_npz', (['file_name'], {}), '(file_name)\n', (466, 477), False, 'from scipy import sparse\n'), ((490, 525), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'E.shape[0]'], {}), '(0, 2, E.shape[0])\n', (507, 525), True, 'import numpy as np\n'), ((559, 614), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(X, y, test_size=0.33, random_state=42)\n', (575, 614), False, 'from sklearn.model_selection import train_test_split\n'), ((254, 291), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 2, 0, 0, 0, 1]'], {}), '([0, 1, 0, 0, 2, 0, 0, 0, 1])\n', (262, 291), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
# mass, spring constant, initial position and velocity
m = 1
k = 1
x = 0
v = 1
# Creating first two data using Euler's method
t_max = 0.2
dt = 0.1
t_array = np.arange(0, t_max, dt)
x_list = []
v_list = []
for t in t_array:
x_list.append(x)
v_list.append(v)
a = -k * x / m
x = x + dt * v
v = v + dt * a
# Verlet method
t_max = 10
dt = 0.1
t_array = np.arange(0.2, t_max, dt)
# initialise empty lists to record trajectories
counter = 1
# Euler integration
for t in t_array:
# append current state to trajectories
# calculate new position and velocity
a = -k * x / m
x = 2*x_list[counter]-x_list[counter-1]+(dt**2)*a
v = (1/dt)*(x-x_list[counter])
x_list.append(x)
v_list.append(v)
counter +=1
# convert trajectory lists into arrays, so they can be sliced (useful for Assignment 2)
x_array = np.array(x_list)
v_array = np.array(v_list)
t_array_plot = np.arange(0, t_max, dt)
# plot the position-time graph
plt.figure(1)
plt.clf()
plt.xlabel('time (s)')
plt.grid()
plt.plot(t_array_plot, x_array, label='x (m)')
plt.plot(t_array_plot, v_array, label='v (m/s)')
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((210, 233), 'numpy.arange', 'np.arange', (['(0)', 't_max', 'dt'], {}), '(0, t_max, dt)\n', (219, 233), True, 'import numpy as np\n'), ((426, 451), 'numpy.arange', 'np.arange', (['(0.2)', 't_max', 'dt'], {}), '(0.2, t_max, dt)\n', (435, 451), True, 'import numpy as np\n'), ((905, 921), 'numpy.array', 'np.array', (['x_list'], {}), '(x_list)\n', (913, 921), True, 'import numpy as np\n'), ((932, 948), 'numpy.array', 'np.array', (['v_list'], {}), '(v_list)\n', (940, 948), True, 'import numpy as np\n'), ((966, 989), 'numpy.arange', 'np.arange', (['(0)', 't_max', 'dt'], {}), '(0, t_max, dt)\n', (975, 989), True, 'import numpy as np\n'), ((1021, 1034), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1031, 1034), True, 'import matplotlib.pyplot as plt\n'), ((1035, 1044), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1042, 1044), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1067), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (s)"""'], {}), "('time (s)')\n", (1055, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1078), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1076, 1078), True, 'import matplotlib.pyplot as plt\n'), ((1079, 1125), 'matplotlib.pyplot.plot', 'plt.plot', (['t_array_plot', 'x_array'], {'label': '"""x (m)"""'}), "(t_array_plot, x_array, label='x (m)')\n", (1087, 1125), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1174), 'matplotlib.pyplot.plot', 'plt.plot', (['t_array_plot', 'v_array'], {'label': '"""v (m/s)"""'}), "(t_array_plot, v_array, label='v (m/s)')\n", (1134, 1174), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1187), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1185, 1187), True, 'import matplotlib.pyplot as plt\n'), ((1188, 1198), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1196, 1198), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
import sys
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import numpy as np
import argparse
global pred_map,sat_map,inst_map
pred_map = { 1 : '1 (constant) ',
2 : '1000-300hPa thickness',
3 : '200-50hPa thickness ',
4 : 'T_skin ',
5 : 'total column water ',
6 : '10-2hPa thickness ',
7 : '50-5hPa thickness ',
8 : 'surface wind speed ',
9 : 'nadir view angle ',
10 : 'nadir view angle **2 ',
11 : 'nadir view angle **3 ',
12 : 'nadir view angle **4 ',
13 : 'cos solar zen angle ',
14 : 'solar elevation ',
15 : 'TMI diurnal bias ',
16 : 'land or sea ice mask ',
17 : 'view angle (land) ',
18 : 'view angle **2 (land)',
19 : 'view angle **3 (land)',
20 : 'ln(rain rate+1) (1)',
21 : 'ln(rain rate+1)**2(1)',
22 : 'ln(rain rate+1)**3(1)',
23 : 'ln(rain rate+1) (2)',
24 : 'ln(rain rate+1)**2(2)',
25 : 'ln(rain rate+1)**3(2)',
26 : 'ascent rate (hPa/s) ',
27 : 'descent rate (hPa/s) ',
28 : 'land mask times winds',
29 : 'day/night ',
30 : 'thermal contrast ',
31 : 'Radiosonde T 100-850 ',
32 : 'Radiosonde T 30-200 ',
33 : 'Radiosonde T 0- 60 ',
34 : 'Radiosonde T s.elv**1',
35 : 'Radiosonde T s.elv**2',
36 : 'Radiosonde log press ',
37 : 'cos solar zen (full) ',
}
pred_cols = { 1 : 'black',
2 : 'red',
3 : 'orange',
4 : 'black',
5 : 'red',
6 : 'black',
7 : 'black',
8 : 'green',
9 : 'purple',
10 : 'magenta',
11 : 'blue',
12 : 'black',
13 : 'black',
14 : 'black',
15 : 'black',
16 : 'black',
17 : 'black',
18 : 'black',
19 : 'black',
20 : 'black',
21 : 'black',
22 : 'black',
23 : 'black',
24 : 'black',
25 : 'black',
26 : 'black',
27 : 'black',
28 : 'black',
29 : 'black',
30 : 'black',
31 : 'black',
32 : 'black',
33 : 'black',
34 : 'black',
35 : 'black',
36 : 'black',
37 : 'black',
}
sat_map = {
3 : "Metop-B",
4 : "Metop-A",
5 : "Metop-C",
70 : "METEOSAT-11",
206 : "NOAA-15",
207 : "NOAA-16",
209 : "NOAA-18",
223 : "NOAA-19",
225 : "NOAA-20",
523 : "FY-3D",
}
sen_map = {
0 : "HIRS",
1 : "MSU",
2 : "SSU",
3 : "AMSU-A",
4 : "AMSU-B",
11 : "AIRS",
15 : "MHS",
16 : "IASI",
19 : "ATMS",
27 : "CRIS",
21 : "SEVIRI",
29 : "SEVIRI HR",
34 : "SAPHIR",
72 : "MWTS2",
73 : "MWHS2",
}
#############################################################################################
def plot_varbc_pred_ts(datetime,data,labels,lloc,batch) :
# Grab meta data from the first row of data
nsat=int(data[1][1])
nsen=int(data[2][1])
nchn=int(data[3][1])
npred=int(data[5][1])
# Number of observations
nobs=data[3]
fig, ax = plt.subplots(figsize=(8.27,3.6))
title_string='VarBC Predictors for '+sat_map[nsat]+': ' +sen_map[nsen]+ ' Channel '+str(nchn)
plt.title(title_string)
ax2=ax.twinx()
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
# Only show ticks on the left and bottom spines
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_visible(True)
ax.yaxis.set_visible(True)
ax.yaxis.grid() #vertical lines
ax.xaxis.grid() #horizontal lines
dfmt = mdates.DateFormatter('%d')
ax.xaxis.set_major_formatter(dfmt)
ax2.xaxis.set_major_formatter(dfmt)
ax2.plot_date(x=datetime,y=data[4],fmt=':',color='lightgrey',label='nobs',linewidth=1)
for pred in range(1,npred+1):
totpred=np.add(data[pred+5],data[5+npred+pred])
label_entry=int(data[5+npred+npred+pred][1])
ax.plot_date(x=datetime,y=totpred,fmt='-',color=pred_cols[label_entry+1],label=pred_map[label_entry+1],linewidth=2)
ax.set_xlabel('Day',fontsize=10)
majdfmt = mdates.DateFormatter("%b\n%d")
ax.xaxis.set_major_formatter(majdfmt)
# Ensure a major tick for each week using (interval=1)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(interval=1))
ax.set_ylabel('Normalised Predictor Value',fontsize=10)
ax2.set_ylabel('Number of observations',fontsize=10)
#plot legend
ax.legend(loc=lloc,prop={'size':10},labelspacing=0,fancybox=False, frameon=True, ncol=1)
#defining display layout
plt.tight_layout()
figsuffix=str(nsat)+'_'+str(nsen)+'_'+str(nchn)
figname = 'varbc_pred_'+figsuffix+'.png'
plt.savefig(figname)
print("Output:",figname)
if not batch :
plt.show()
#############################################################################################
def read_data(filename):
data = {}
dtdata = []
print("Read:",filename)
with open(filename, "r") as a_file:
for line in a_file:
line = line.strip()
tmp = line.split()
dto = dt.datetime.strptime(tmp[0], '%Y%m%d:%H%M%S')
dtdata.append(dto)
for x in range(1,len(tmp)) :
if x not in data :
data[x] = []
data[x].append(float(tmp[x]))
a_file.close()
return dtdata, data
#############################################################################################
def main(argv) :
parser = argparse.ArgumentParser(description='Plot VarBC predictor time-series')
parser.add_argument('-i',dest="ipath",help='Input file name',default=None,required=True)
parser.add_argument('-l',dest="lloc",help='Legend location using matplotlib syntax',default=None,required=False)
parser.add_argument('-d',dest="labels",help='Optional experiment description',default=None,required=False)
parser.add_argument('-b',action="store_true",help='Batch mode, produce png only',default=False,required=False)
if len(argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.labels is None :
labels = None
else:
labels = args.labels
data = {}
ipath = args.ipath
tsdatetime,tsdata = read_data(ipath)
plot_varbc_pred_ts(tsdatetime,tsdata,args.labels,args.lloc,args.b)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[
"matplotlib.pyplot.savefig",
"numpy.add",
"argparse.ArgumentParser",
"matplotlib.dates.WeekdayLocator",
"datetime.datetime.strptime",
"matplotlib.dates.DateFormatter",
"matplotlib.pyplot.tight_layout",
"sys.exit",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((3935, 3968), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8.27, 3.6)'}), '(figsize=(8.27, 3.6))\n', (3947, 3968), True, 'import matplotlib.pyplot as plt\n'), ((4067, 4090), 'matplotlib.pyplot.title', 'plt.title', (['title_string'], {}), '(title_string)\n', (4076, 4090), True, 'import matplotlib.pyplot as plt\n'), ((4490, 4516), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%d"""'], {}), "('%d')\n", (4510, 4516), True, 'import matplotlib.dates as mdates\n'), ((4987, 5017), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b\n%d"""'], {}), "('%b\\n%d')\n", (5007, 5017), True, 'import matplotlib.dates as mdates\n'), ((5431, 5449), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5447, 5449), True, 'import matplotlib.pyplot as plt\n'), ((5546, 5566), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figname'], {}), '(figname)\n', (5557, 5566), True, 'import matplotlib.pyplot as plt\n'), ((6266, 6337), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot VarBC predictor time-series"""'}), "(description='Plot VarBC predictor time-series')\n", (6289, 6337), False, 'import argparse\n'), ((4727, 4773), 'numpy.add', 'np.add', (['data[pred + 5]', 'data[5 + npred + pred]'], {}), '(data[pred + 5], data[5 + npred + pred])\n', (4733, 4773), True, 'import numpy as np\n'), ((5146, 5179), 'matplotlib.dates.WeekdayLocator', 'mdates.WeekdayLocator', ([], {'interval': '(1)'}), '(interval=1)\n', (5167, 5179), True, 'import matplotlib.dates as mdates\n'), ((5615, 5625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5623, 5625), True, 'import matplotlib.pyplot as plt\n'), ((6824, 6835), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6832, 6835), False, 'import sys\n'), ((5917, 5962), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['tmp[0]', '"""%Y%m%d:%H%M%S"""'], {}), "(tmp[0], '%Y%m%d:%H%M%S')\n", (5937, 5962), True, 'import datetime as dt\n')]
|
'''
This code is used for testing MoDL on JPEG-compressed data, for the results shown in figures 6, 7 and 8c in the paper.
Before running this script you should update the following:
basic_data_folder - it should be the same as the output folder defined in the script /crime_2_jpeg/data_prep/jpeg_data_prep.py
(c) <NAME>, UC Berkeley, 2021
'''
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from MoDL_single import UnrolledModel
from subtle_data_crimes.functions.error_funcs import error_metrics
from utils import complex_utils as cplx
from utils.datasets import create_data_loaders
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# create a folder for the test figures
if not os.path.exists('test_figs'):
os.makedirs('test_figs')
##################### create test loader ###########################
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# Hyper parameters
params = Namespace()
params.batch_size = 1
# image dimensions
params.NX = 640
params.NY = 372
# calib is assumed to be 12 for NX=640
calib_x = int(12)
calib_y = int(12 * params.NY / params.NX)
params.calib = np.array([calib_x, calib_y])
params.shuffle_flag = False # should be True for training, False for testing. Notice that this is not a string, semicolons aren't necessary.
params.sampling_flag = 'var_dens_2D'
params.var_dens_flag = 'strong' # 'weak' / 'strong'
checkpoint_num = int(69) # load saved model (trained network)
q_vec = np.array([20, 50, 75, 999])
R_vec = np.array([4])
N_examples_4display = 15 # number of examples to display
N_examples_stats = 15 # number of examples over which the mean and STD will be computed
NRMSE_av_vs_q_and_R = np.zeros((R_vec.shape[0], q_vec.shape[0]))
NRMSE_std_vs_q_and_R = np.zeros((R_vec.shape[0], q_vec.shape[0]))
SSIM_av_vs_q_and_R = np.zeros((R_vec.shape[0], q_vec.shape[0]))
SSIM_std_vs_q_and_R = np.zeros((R_vec.shape[0], q_vec.shape[0]))
N_calc_err = 200
NRMSE_examples_4display = np.zeros((R_vec.shape[0], q_vec.shape[0], N_calc_err))
SSIM_examples_4display = np.zeros((R_vec.shape[0], q_vec.shape[0], N_calc_err))
small_dataset_flag = 0
for r in range(R_vec.shape[0]):
R = R_vec[r]
print('================================================== ')
print(' R={} '.format(R))
print('================================================== ')
# Important - here we update R in the params in order to create masks with appropriate sampling
# The mask is created in the DataTransform (utils/datasets
params.R = R
for qi in range(q_vec.shape[0]):
q = q_vec[qi]
params.q = q
# update the next path to YOUR path
basic_data_folder = "/mikQNAP/NYU_knee_data/multicoil_efrat/5_JPEG_compressed_data/"
data_type = 'test'
im_type_str = 'full_im' # training & validation is done on blocks (to accelerate training). Test is done on full-size images.
params.data_path = basic_data_folder + data_type + "/q" + str(params.q) + "/" + im_type_str + "/"
test_loader = create_data_loaders(params)
N_test_batches = len(test_loader.dataset)
print('N_test_batches =', N_test_batches)
checkpoint_file = 'R{}_q{}/checkpoints/model_{}.pt'.format(R, q, checkpoint_num)
checkpoint = torch.load(checkpoint_file, map_location=device)
# load the parameters of the trained network
params_loaded = checkpoint["params"]
single_MoDL = UnrolledModel(params_loaded).to(device)
single_MoDL.load_state_dict(checkpoint['model'])
single_MoDL.eval()
NRMSE_test_list = []
SSIM_test_list = []
cnt = 0
with torch.no_grad():
for iter, data in enumerate(test_loader):
if iter % 10 == 0:
print('loading test batch ', iter)
# input_batch, target_batch, mask_batch, target_no_JPEG_batch = data
input_batch, target_batch, mask_batch = data
# display the mask (before converting it to torch tensor)
if (iter == 0):
# print('mask_batch shape:',mask_batch.shape)
mask_squeezed = mask_batch[0, :, :, 0].squeeze()
# fig = plt.figure()
# plt.imshow(mask_squeezed, cmap="gray")
# plt.title(params.sampling_flag + ' epoch 0, iter {}'.format(iter))
# plt.show()
# fig.savefig('mask_iter{}.png'.format(iter))
# move data to GPU
input_batch = input_batch.to(device)
target_batch = target_batch.to(device)
mask_batch = mask_batch.to(device)
# forward pass - for the full batch
out_batch = single_MoDL(input_batch.float(), mask=mask_batch)
for i in range(params.batch_size):
cnt += 1 # counts the number of test images
print('cnt={}'.format(cnt))
im_input = cplx.to_numpy(input_batch.cpu())[i, :, :]
im_target = cplx.to_numpy(target_batch.cpu())[i, :, :]
im_out = cplx.to_numpy(out_batch.cpu())[i, :, :]
MoDL_err = error_metrics(np.abs(im_target), np.abs(im_out))
MoDL_err.calc_NRMSE()
MoDL_err.calc_SSIM()
NRMSE_test_list.append(MoDL_err.NRMSE)
SSIM_test_list.append(MoDL_err.SSIM)
if cnt < N_calc_err:
NRMSE_examples_4display[r, qi, cnt - 1] = MoDL_err.NRMSE
SSIM_examples_4display[r, qi, cnt - 1] = MoDL_err.SSIM
if cnt <= N_examples_4display:
target_im_rotated = np.rot90(np.abs(im_target), 2)
im_out_rotated = np.rot90(np.abs(im_out), 2)
NX = im_out_rotated.shape[0]
NY = im_out_rotated.shape[1]
if (r == 0) & (qi == 0) & (iter == 0):
TARGETS = np.zeros((NX, NY, q_vec.shape[0], N_examples_4display))
RECS = np.zeros((NX, NY, R_vec.shape[0], q_vec.shape[0], N_examples_4display))
TARGETS[:, :, qi, iter] = target_im_rotated
RECS[:, :, r, qi, iter] = im_out_rotated
# if iter==0:
fig = plt.figure()
plt.imshow(target_im_rotated, cmap="gray")
plt.colorbar(shrink=0.5)
plt.axis('off')
plt.title('target - iter={} - R{} q{}'.format(iter, R, q))
plt.show()
figname = 'check3_target_R{}_q{}_iter{}'.format(R, q, iter)
fig.savefig(figname)
if iter >= N_examples_stats:
break
# NRMSE - calc av & std
NRMSE_test_array = np.asarray(NRMSE_test_list)
NRMSE_av = np.mean(NRMSE_test_array[0:N_examples_stats].squeeze())
NRMSE_std = np.std(NRMSE_test_array[0:N_examples_stats].squeeze())
NRMSE_av_vs_q_and_R[r, qi] = NRMSE_av
NRMSE_std_vs_q_and_R[r, qi] = NRMSE_std
# SSIM - calc av & std
SSIM_test_array = np.asarray(SSIM_test_list)
SSIM_av = np.mean(SSIM_test_array[0:N_examples_stats].squeeze())
SSIM_std = np.std(SSIM_test_array[0:N_examples_stats].squeeze())
SSIM_av_vs_q_and_R[r, qi] = SSIM_av
SSIM_std_vs_q_and_R[r, qi] = SSIM_std
print('q={} NRMSE_av = {}, SSIM_av = {}'.format(q, NRMSE_av, SSIM_av))
# save NRMSE_av & SSIM
print('saving results')
results_filename = 'Res_for_Fig6.npz'
np.savez(results_filename, R_vec=R_vec, q_vec=q_vec, params=params, checkpoint_num=checkpoint_num,
NRMSE_av_vs_q_and_R=NRMSE_av_vs_q_and_R,
NRMSE_std_vs_q_and_R=NRMSE_std_vs_q_and_R,
SSIM_av_vs_q_and_R=SSIM_av_vs_q_and_R,
SSIM_std_vs_q_and_R=SSIM_std_vs_q_and_R,
NRMSE_examples_4display=NRMSE_examples_4display,
SSIM_examples_4display=SSIM_examples_4display,
N_examples_stats=N_examples_stats,
N_examples_4display=N_examples_4display,
TARGETS=TARGETS,
RECS=RECS,
)
|
[
"logging.getLogger",
"utils.datasets.create_data_loaders",
"MoDL_single.UnrolledModel",
"numpy.array",
"torch.cuda.is_available",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.savez",
"numpy.asarray",
"matplotlib.pyplot.axis",
"numpy.abs",
"matplotlib.pyplot.show",
"logging.basicConfig",
"os.makedirs",
"torch.load",
"matplotlib.pyplot.colorbar",
"numpy.zeros",
"matplotlib.pyplot.figure",
"torch.no_grad"
] |
[((652, 691), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (671, 691), False, 'import logging\n'), ((702, 729), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (719, 729), False, 'import logging\n'), ((1324, 1352), 'numpy.array', 'np.array', (['[calib_x, calib_y]'], {}), '([calib_x, calib_y])\n', (1332, 1352), True, 'import numpy as np\n'), ((1667, 1694), 'numpy.array', 'np.array', (['[20, 50, 75, 999]'], {}), '([20, 50, 75, 999])\n', (1675, 1694), True, 'import numpy as np\n'), ((1704, 1717), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (1712, 1717), True, 'import numpy as np\n'), ((1894, 1936), 'numpy.zeros', 'np.zeros', (['(R_vec.shape[0], q_vec.shape[0])'], {}), '((R_vec.shape[0], q_vec.shape[0]))\n', (1902, 1936), True, 'import numpy as np\n'), ((1961, 2003), 'numpy.zeros', 'np.zeros', (['(R_vec.shape[0], q_vec.shape[0])'], {}), '((R_vec.shape[0], q_vec.shape[0]))\n', (1969, 2003), True, 'import numpy as np\n'), ((2026, 2068), 'numpy.zeros', 'np.zeros', (['(R_vec.shape[0], q_vec.shape[0])'], {}), '((R_vec.shape[0], q_vec.shape[0]))\n', (2034, 2068), True, 'import numpy as np\n'), ((2092, 2134), 'numpy.zeros', 'np.zeros', (['(R_vec.shape[0], q_vec.shape[0])'], {}), '((R_vec.shape[0], q_vec.shape[0]))\n', (2100, 2134), True, 'import numpy as np\n'), ((2184, 2238), 'numpy.zeros', 'np.zeros', (['(R_vec.shape[0], q_vec.shape[0], N_calc_err)'], {}), '((R_vec.shape[0], q_vec.shape[0], N_calc_err))\n', (2192, 2238), True, 'import numpy as np\n'), ((2265, 2319), 'numpy.zeros', 'np.zeros', (['(R_vec.shape[0], q_vec.shape[0], N_calc_err)'], {}), '((R_vec.shape[0], q_vec.shape[0], N_calc_err))\n', (2273, 2319), True, 'import numpy as np\n'), ((8231, 8719), 'numpy.savez', 'np.savez', (['results_filename'], {'R_vec': 'R_vec', 'q_vec': 'q_vec', 'params': 'params', 'checkpoint_num': 'checkpoint_num', 'NRMSE_av_vs_q_and_R': 'NRMSE_av_vs_q_and_R', 'NRMSE_std_vs_q_and_R': 'NRMSE_std_vs_q_and_R', 'SSIM_av_vs_q_and_R': 'SSIM_av_vs_q_and_R', 'SSIM_std_vs_q_and_R': 'SSIM_std_vs_q_and_R', 'NRMSE_examples_4display': 'NRMSE_examples_4display', 'SSIM_examples_4display': 'SSIM_examples_4display', 'N_examples_stats': 'N_examples_stats', 'N_examples_4display': 'N_examples_4display', 'TARGETS': 'TARGETS', 'RECS': 'RECS'}), '(results_filename, R_vec=R_vec, q_vec=q_vec, params=params,\n checkpoint_num=checkpoint_num, NRMSE_av_vs_q_and_R=NRMSE_av_vs_q_and_R,\n NRMSE_std_vs_q_and_R=NRMSE_std_vs_q_and_R, SSIM_av_vs_q_and_R=\n SSIM_av_vs_q_and_R, SSIM_std_vs_q_and_R=SSIM_std_vs_q_and_R,\n NRMSE_examples_4display=NRMSE_examples_4display, SSIM_examples_4display\n =SSIM_examples_4display, N_examples_stats=N_examples_stats,\n N_examples_4display=N_examples_4display, TARGETS=TARGETS, RECS=RECS)\n', (8239, 8719), True, 'import numpy as np\n'), ((853, 880), 'os.path.exists', 'os.path.exists', (['"""test_figs"""'], {}), "('test_figs')\n", (867, 880), False, 'import os\n'), ((887, 911), 'os.makedirs', 'os.makedirs', (['"""test_figs"""'], {}), "('test_figs')\n", (898, 911), False, 'import os\n'), ((765, 790), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (788, 790), False, 'import torch\n'), ((3318, 3345), 'utils.datasets.create_data_loaders', 'create_data_loaders', (['params'], {}), '(params)\n', (3337, 3345), False, 'from utils.datasets import create_data_loaders\n'), ((3566, 3614), 'torch.load', 'torch.load', (['checkpoint_file'], {'map_location': 'device'}), '(checkpoint_file, map_location=device)\n', (3576, 3614), False, 'import torch\n'), ((3964, 3979), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3977, 3979), False, 'import torch\n'), ((7394, 7421), 'numpy.asarray', 'np.asarray', (['NRMSE_test_list'], {}), '(NRMSE_test_list)\n', (7404, 7421), True, 'import numpy as np\n'), ((7771, 7797), 'numpy.asarray', 'np.asarray', (['SSIM_test_list'], {}), '(SSIM_test_list)\n', (7781, 7797), True, 'import numpy as np\n'), ((3740, 3768), 'MoDL_single.UnrolledModel', 'UnrolledModel', (['params_loaded'], {}), '(params_loaded)\n', (3753, 3768), False, 'from MoDL_single import UnrolledModel\n'), ((5593, 5610), 'numpy.abs', 'np.abs', (['im_target'], {}), '(im_target)\n', (5599, 5610), True, 'import numpy as np\n'), ((5612, 5626), 'numpy.abs', 'np.abs', (['im_out'], {}), '(im_out)\n', (5618, 5626), True, 'import numpy as np\n'), ((6825, 6837), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6835, 6837), True, 'import matplotlib.pyplot as plt\n'), ((6863, 6905), 'matplotlib.pyplot.imshow', 'plt.imshow', (['target_im_rotated'], {'cmap': '"""gray"""'}), "(target_im_rotated, cmap='gray')\n", (6873, 6905), True, 'import matplotlib.pyplot as plt\n'), ((6931, 6955), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'shrink': '(0.5)'}), '(shrink=0.5)\n', (6943, 6955), True, 'import matplotlib.pyplot as plt\n'), ((6981, 6996), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6989, 6996), True, 'import matplotlib.pyplot as plt\n'), ((7106, 7116), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7114, 7116), True, 'import matplotlib.pyplot as plt\n'), ((6147, 6164), 'numpy.abs', 'np.abs', (['im_target'], {}), '(im_target)\n', (6153, 6164), True, 'import numpy as np\n'), ((6220, 6234), 'numpy.abs', 'np.abs', (['im_out'], {}), '(im_out)\n', (6226, 6234), True, 'import numpy as np\n'), ((6452, 6507), 'numpy.zeros', 'np.zeros', (['(NX, NY, q_vec.shape[0], N_examples_4display)'], {}), '((NX, NY, q_vec.shape[0], N_examples_4display))\n', (6460, 6507), True, 'import numpy as np\n'), ((6544, 6615), 'numpy.zeros', 'np.zeros', (['(NX, NY, R_vec.shape[0], q_vec.shape[0], N_examples_4display)'], {}), '((NX, NY, R_vec.shape[0], q_vec.shape[0], N_examples_4display))\n', (6552, 6615), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.spatial import cKDTree as KDTree
import math
import argparse
# ref: https://github.com/facebookresearch/DeepSDF/blob/master/deep_sdf/metrics/chamfer.py
# takes one pair of reconstructed and gt point cloud and return the cd
def compute_cd(gt_points, gen_points):
# one direction
gen_points_kd_tree = KDTree(gen_points)
one_distances, one_vertex_ids = gen_points_kd_tree.query(gt_points)
gt_to_gen_chamfer = np.mean(np.square(one_distances))
# other direction
gt_points_kd_tree = KDTree(gt_points)
two_distances, two_vertex_ids = gt_points_kd_tree.query(gen_points)
gen_to_gt_chamfer = np.mean(np.square(two_distances))
return gt_to_gen_chamfer + gen_to_gt_chamfer
if __name__ == '__main__':
num_gen_pts_sample = 30000
parser = argparse.ArgumentParser()
parser.add_argument('--gt_pts_path', type=str, help='Path to ground truth point clouds (numpy array)')
parser.add_argument('--gen_pts_path', type=str, help='Path to corresponsing reconstructed point clouds (numpy array)')
args = parser.parse_args()
test_gt_pts = np.load(args.gt_pts_path, allow_pickle=True)
test_gen_pts = np.load(args.gen_pts_path, allow_pickle=True)
assert test_gen_pts.shape[0] == test_gt_pts.shape[0]
num_instances = test_gen_pts.shape[0]
chamfer_results = []
print('Might take a few minutes ...')
for instance_idx in range(num_instances):
gt_pts_instance = test_gt_pts[instance_idx]
gen_pts_instance = test_gen_pts[instance_idx]
if gen_pts_instance.shape[0] < 2000:
continue
# if the number of points in reconstructed point cloud is < num_gen_pts_sample,
# repeat the points randomly to make number of points = num_gen_pts_sample
if gen_pts_instance.shape[0]<num_gen_pts_sample:
pt_indices = np.concatenate([
np.arange(len(gen_pts_instance)),
np.random.choice(len(gen_pts_instance), num_gen_pts_sample-len(gen_pts_instance))
])
gen_pts_instance = gen_pts_instance[pt_indices]
np.random.shuffle(gt_pts_instance)
np.random.shuffle(gen_pts_instance)
cd = compute_cd(gt_pts_instance, gen_pts_instance)
if math.isnan(cd):
continue
chamfer_results.append(cd)
chamfer_results.sort()
print('Ground truth point cloud: {}'.format(args.gt_pts_path))
print('Reconstructed point cloud: {}'.format(args.gen_pts_path))
cd_avg = sum(chamfer_results) / float(len(chamfer_results))
print('Average Chamfer Distance: {}'.format(cd_avg))
print('Median Chamfer Distance: {}'.format(chamfer_results[len(chamfer_results)//2]))
print('-'*80)
|
[
"argparse.ArgumentParser",
"scipy.spatial.cKDTree",
"numpy.square",
"math.isnan",
"numpy.load",
"numpy.random.shuffle"
] |
[((341, 359), 'scipy.spatial.cKDTree', 'KDTree', (['gen_points'], {}), '(gen_points)\n', (347, 359), True, 'from scipy.spatial import cKDTree as KDTree\n'), ((537, 554), 'scipy.spatial.cKDTree', 'KDTree', (['gt_points'], {}), '(gt_points)\n', (543, 554), True, 'from scipy.spatial import cKDTree as KDTree\n'), ((810, 835), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (833, 835), False, 'import argparse\n'), ((1117, 1161), 'numpy.load', 'np.load', (['args.gt_pts_path'], {'allow_pickle': '(True)'}), '(args.gt_pts_path, allow_pickle=True)\n', (1124, 1161), True, 'import numpy as np\n'), ((1181, 1226), 'numpy.load', 'np.load', (['args.gen_pts_path'], {'allow_pickle': '(True)'}), '(args.gen_pts_path, allow_pickle=True)\n', (1188, 1226), True, 'import numpy as np\n'), ((464, 488), 'numpy.square', 'np.square', (['one_distances'], {}), '(one_distances)\n', (473, 488), True, 'import numpy as np\n'), ((659, 683), 'numpy.square', 'np.square', (['two_distances'], {}), '(two_distances)\n', (668, 683), True, 'import numpy as np\n'), ((2137, 2171), 'numpy.random.shuffle', 'np.random.shuffle', (['gt_pts_instance'], {}), '(gt_pts_instance)\n', (2154, 2171), True, 'import numpy as np\n'), ((2180, 2215), 'numpy.random.shuffle', 'np.random.shuffle', (['gen_pts_instance'], {}), '(gen_pts_instance)\n', (2197, 2215), True, 'import numpy as np\n'), ((2288, 2302), 'math.isnan', 'math.isnan', (['cd'], {}), '(cd)\n', (2298, 2302), False, 'import math\n')]
|
#!/usr/bin/env python3
# This is the master ImageAnalysis processing script. For DJI and
# Sentera cameras it should typically be able to run through with
# default settings and produce a good result with no further input.
#
# If something goes wrong, there are usually specific sub-scripts that
# can be run to fix the problem and then this script can be re-run to
# continue.
#
# If your camera isn't yet supported, you can run a script that mostly
# automates the process of adding a new camera (possibly with a small
# amount of extra info that you can usually research by googling.
#
# If you run into an unsolvable glitch and are willing to share your
# data set, I may be able to look at the issue and make some sort of
# determination or fix.
import argparse
import numpy as np
import os
import pickle
import socket # gethostname()
import time
from lib import camera
from lib import groups
from lib.logger import log
from lib import matcher
from lib import match_cleanup
from lib import optimizer
from lib import pose
from lib import project
from lib import render_panda3d
from lib import smart
from lib import srtm
from lib import state
from props import getNode, PropertyNode # from the aura-props python package
import props_json
parser = argparse.ArgumentParser(description='Create an empty project.')
parser.add_argument('project', help='Directory with a set of aerial images.')
# camera setup options
parser.add_argument('--camera', help='camera config file')
parser.add_argument('--yaw-deg', type=float, default=0.0,
help='camera yaw mounting offset from aircraft')
parser.add_argument('--pitch-deg', type=float, default=-90.0,
help='camera pitch mounting offset from aircraft')
parser.add_argument('--roll-deg', type=float, default=0.0,
help='camera roll mounting offset from aircraft')
# pose setup options
parser.add_argument('--max-angle', type=float, default=25.0, help='max pitch or roll angle for image inclusion')
parser.add_argument('--force-altitude', type=float, help='Fudge altitude geotag for stupid dji phantom 4 pro v2.0')
# feature detection options
parser.add_argument('--scale', type=float, default=0.4, help='scale images before detecting features, this acts much like a noise filter')
parser.add_argument('--detector', default='SIFT',
choices=['SIFT', 'SURF', 'ORB', 'Star'])
parser.add_argument('--surf-hessian-threshold', default=600,
help='hessian threshold for surf method')
parser.add_argument('--surf-noctaves', default=4,
help='use a bigger number to detect bigger features')
parser.add_argument('--orb-max-features', default=20000,
help='maximum ORB features')
parser.add_argument('--grid-detect', default=1,
help='run detect on gridded squares for (maybe) better feature distribution, 4 is a good starting value, only affects ORB method')
parser.add_argument('--star-max-size', default=16,
help='4, 6, 8, 11, 12, 16, 22, 23, 32, 45, 46, 64, 90, 128')
parser.add_argument('--star-response-threshold', default=30)
parser.add_argument('--star-line-threshold-projected', default=10)
parser.add_argument('--star-line-threshold-binarized', default=8)
parser.add_argument('--star-suppress-nonmax-size', default=5)
parser.add_argument('--reject-margin', default=0, help='reject features within this distance of the image outer edge margin')
# feature matching arguments
parser.add_argument('--match-strategy', default='traditional',
choices=['smart', 'bestratio', 'traditional', 'bruteforce'])
parser.add_argument('--match-ratio', default=0.75, type=float,
help='match ratio')
parser.add_argument('--min-pairs', default=25, type=int,
help='minimum matches between image pairs to keep')
parser.add_argument('--min-dist', type=float,
help='minimum 2d camera distance for pair comparison')
parser.add_argument('--max-dist', type=float,
help='maximum 2d camera distance for pair comparison')
parser.add_argument('--filter', default='gms',
choices=['gms', 'homography', 'fundamental', 'essential', 'none'])
parser.add_argument('--min-chain-length', type=int, default=3, help='minimum match chain length (3 recommended)')
# for smart matching
parser.add_argument('--ground', type=float, help="ground elevation")
# optimizer arguments
parser.add_argument('--group', type=int, default=0, help='group number')
parser.add_argument('--cam-calibration', action='store_true', help='include camera calibration in the optimization.')
parser.add_argument('--refine', action='store_true', help='refine a previous optimization.')
args = parser.parse_args()
log("Project processed on host:", socket.gethostname())
log("Project processed with arguments:", args)
############################################################################
log("Step 1: setup the project", fancy=True)
############################################################################
### 1a. initialize a new project workspace
# test if images directory exists
if not os.path.isdir(args.project):
print("Images directory doesn't exist:", args.project)
quit()
# create an empty project and save...
proj = project.ProjectMgr(args.project, create=True)
proj.save()
log("Created project:", args.project)
### 1b. intialize camera
if args.camera:
# specified on command line
camera_file = args.camera
else:
# auto detect camera from image meta data
camera_name, make, model, lens_model = proj.detect_camera()
camera_file = os.path.join("..", "cameras", camera_name + ".json")
log("Camera auto-detected:", camera_name, make, model, lens_model)
log("Camera file:", camera_file)
# copy/overlay/update the specified camera config into the existing
# project configuration
cam_node = getNode('/config/camera', True)
tmp_node = PropertyNode()
if props_json.load(camera_file, tmp_node):
props_json.overlay(cam_node, tmp_node)
if cam_node.getString("make") == "DJI":
# phantom, et al.
camera.set_mount_params(0.0, 0.0, 0.0)
elif cam_node.getString("make") == "Hasselblad":
# mavic pro
camera.set_mount_params(0.0, 0.0, 0.0)
else:
# assume a nadir camera rigidly mounted to airframe
camera.set_mount_params(args.yaw_deg, args.pitch_deg, args.roll_deg)
# note: dist_coeffs = array[5] = k1, k2, p1, p2, k3
# ... and save
proj.save()
else:
# failed to load camera config file
if not args.camera:
log("Camera autodetection failed. Consider running the new camera script to create a camera config and then try running this script again.")
else:
log("Specified camera config not found:", args.camera)
log("Aborting due to camera detection failure.")
quit()
state.update("STEP1")
############################################################################
log("Step 2: configure camera poses and per-image meta data files", fancy=True)
############################################################################
log("Configuring images")
# create pose file (if it doesn't already exist, for example sentera
# cameras will generate the pix4d.csv file automatically, dji does not)
pix4d_file = os.path.join(args.project, 'pix4d.csv')
meta_file = os.path.join(args.project, 'image-metadata.txt')
if os.path.exists(pix4d_file):
log("Found a pose file:", pix4d_file)
elif os.path.exists(meta_file):
log("Found a pose file:", meta_file)
else:
pose.make_pix4d(args.project, args.force_altitude)
pix4d_file = os.path.join(args.project, 'pix4d.csv')
meta_file = os.path.join(args.project, 'image-metadata.txt')
if os.path.exists(pix4d_file):
pose.set_aircraft_poses(proj, pix4d_file, order='rpy',
max_angle=args.max_angle)
elif os.path.exists(meta_file):
pose.set_aircraft_poses(proj, meta_file, order='ypr',
max_angle=args.max_angle)
else:
log("Error: no pose file found in image directory:", args.project)
quit()
# save the initial meta .json file for each posed image
proj.save_images_info()
# now, load the image meta data and init the proj.image_list
proj.load_images_info()
# compute the project's NED reference location (based on average of
# aircraft poses)
proj.compute_ned_reference_lla()
ref_node = getNode('/config/ned_reference', True)
ref = [ ref_node.getFloat('lat_deg'),
ref_node.getFloat('lon_deg'),
ref_node.getFloat('alt_m') ]
log("NED reference location:", ref)
# set the camera poses (fixed offset from aircraft pose) Camera pose
# location is specfied in ned, so do this after computing the ned
# reference point for this project.
pose.compute_camera_poses(proj)
# local surface approximation
srtm.initialize( ref, 6000, 6000, 30)
smart.load(proj.analysis_dir)
smart.update_srtm_elevations(proj)
smart.save(proj.analysis_dir)
# save the poses
proj.save_images_info()
# save initial proejct config (mainly the ned reference)
proj.save()
state.update("STEP2")
############################################################################
log("Step 3: feature matching", fancy=True)
############################################################################
if not state.check("STEP3a"):
proj.load_images_info()
proj.load_match_pairs()
smart.load(proj.analysis_dir)
smart.set_yaw_error_estimates(proj)
# setup project detector parameters
detector_node = getNode('/config/detector', True)
detector_node.setString('detector', args.detector)
detector_node.setString('scale', args.scale)
if args.detector == 'SIFT':
pass
elif args.detector == 'SURF':
detector_node.setInt('surf_hessian_threshold', args.surf_hessian_threshold)
detector_node.setInt('surf_noctaves', args.surf_noctaves)
elif args.detector == 'ORB':
detector_node.setInt('grid_detect', args.grid_detect)
detector_node.setInt('orb_max_features', args.orb_max_features)
elif args.detector == 'Star':
detector_node.setInt('star_max_size', args.star_max_size)
detector_node.setInt('star_response_threshold',
args.star_response_threshold)
detector_node.setInt('star_line_threshold_projected',
args.star_response_threshold)
detector_node.setInt('star_line_threshold_binarized',
args.star_line_threshold_binarized)
detector_node.setInt('star_suppress_nonmax_size',
args.star_suppress_nonmax_size)
log("detector:", args.detector)
log("image scale for fearture detection/matching:", args.scale)
matcher_node = getNode('/config/matcher', True)
matcher_node.setFloat('match_ratio', args.match_ratio)
matcher_node.setString('filter', args.filter)
matcher_node.setInt('min_pairs', args.min_pairs)
if args.min_dist:
matcher_node.setFloat('min_dist', args.min_dist)
if args.max_dist:
matcher_node.setFloat('max_dist', args.max_dist)
matcher_node.setInt('min_chain_len', args.min_chain_length)
if args.ground:
matcher_node.setFloat('ground_m', args.ground)
# save any config changes
proj.save()
# camera calibration
K = camera.get_K()
# print("K:", K)
log("Matching features")
# fire up the matcher
matcher.configure()
matcher.find_matches(proj, K, strategy=args.match_strategy,
transform=args.filter, sort=True, review=False)
feature_count = 0
image_count = 0
for image in proj.image_list:
feature_count += image.num_features
image_count += 1
log("Average # of features per image found = %.0f" % (feature_count / image_count))
state.update("STEP3a")
matches_name = os.path.join(proj.analysis_dir, "matches_grouped")
if not state.check("STEP3b"):
proj.load_images_info()
proj.load_features(descriptors=False)
proj.load_match_pairs()
match_cleanup.merge_duplicates(proj)
match_cleanup.check_for_pair_dups(proj)
match_cleanup.check_for_1vn_dups(proj)
matches_direct = match_cleanup.make_match_structure(proj)
matches_grouped = match_cleanup.link_matches(proj, matches_direct)
log("Writing full group chain file:", matches_name)
pickle.dump(matches_grouped, open(matches_name, "wb"))
state.update("STEP3b")
if not state.check("STEP3c"):
proj.load_images_info()
K = camera.get_K(optimized=False)
IK = np.linalg.inv(K)
log("Loading source matches:", matches_name)
matches_grouped = pickle.load( open(matches_name, 'rb') )
match_cleanup.triangulate_smart(proj, matches_grouped)
log("Writing triangulated group file:", matches_name)
pickle.dump(matches_grouped, open(matches_name, "wb"))
state.update("STEP3c")
if not state.check("STEP3d"):
proj.load_images_info()
log("Loading source matches:", matches_name)
matches = pickle.load( open( matches_name, 'rb' ) )
log("matched features:", len(matches))
# compute the group connections within the image set.
group_list = groups.compute(proj.image_list, matches)
groups.save(proj.analysis_dir, group_list)
log("Total images:", len(proj.image_list))
line = "Group sizes:"
for g in group_list:
line += " " + str(len(g))
log(line)
log("Counting allocated features...")
count = 0
for i, match in enumerate(matches):
if match[1] >= 0:
count += 1
print("Features: %d/%d" % (count, len(matches)))
log("Writing grouped tagged matches:", matches_name)
pickle.dump(matches, open(matches_name, "wb"))
state.update("STEP3d")
############################################################################
log("Step 4: Optimization (fit)", fancy=True)
############################################################################
if not state.check("STEP4"):
proj.load_images_info()
log("Loading source matches:", matches_name)
matches = pickle.load( open( matches_name, 'rb' ) )
log("matched features:", len(matches))
# load the group connections within the image set
group_list = groups.load(proj.analysis_dir)
opt = optimizer.Optimizer(args.project)
# setup the data structures
opt.setup( proj, group_list, args.group, matches, optimized=args.refine,
cam_calib=args.cam_calibration)
# run the optimization (fit)
cameras, features, cam_index_map, feat_index_map, \
fx_opt, fy_opt, cu_opt, cv_opt, distCoeffs_opt \
= opt.run()
# update camera poses
opt.update_camera_poses(proj)
# update and save the optimized camera calibration
camera.set_K(fx_opt, fy_opt, cu_opt, cv_opt, optimized=True)
camera.set_dist_coeffs(distCoeffs_opt.tolist(), optimized=True)
proj.save()
# reposition the optimized data set to best fit the original gps
# locations of the camera poses.
opt.refit(proj, matches, group_list, args.group)
# write out the updated match_dict
log("Writing optimized (fitted) matches:", matches_name)
pickle.dump(matches, open(matches_name, 'wb'))
state.update("STEP4")
############################################################################
log("Step 5: Create the map", fancy=True)
############################################################################
if not state.check("STEP6"):
# load the group connections within the image set
group_list = groups.load(proj.analysis_dir)
render_panda3d.build_map(proj, group_list, args.group)
#state.update("STEP6")
|
[
"lib.smart.update_srtm_elevations",
"lib.pose.set_aircraft_poses",
"lib.optimizer.Optimizer",
"lib.match_cleanup.triangulate_smart",
"props_json.load",
"lib.match_cleanup.merge_duplicates",
"os.path.exists",
"lib.state.check",
"argparse.ArgumentParser",
"lib.pose.make_pix4d",
"lib.matcher.find_matches",
"lib.groups.compute",
"os.path.isdir",
"lib.matcher.configure",
"lib.project.ProjectMgr",
"props.PropertyNode",
"socket.gethostname",
"lib.pose.compute_camera_poses",
"props_json.overlay",
"lib.camera.set_K",
"lib.camera.get_K",
"lib.smart.set_yaw_error_estimates",
"props.getNode",
"lib.groups.save",
"lib.srtm.initialize",
"lib.groups.load",
"lib.match_cleanup.link_matches",
"lib.smart.load",
"lib.match_cleanup.check_for_pair_dups",
"os.path.join",
"lib.state.update",
"lib.match_cleanup.make_match_structure",
"numpy.linalg.inv",
"lib.logger.log",
"lib.render_panda3d.build_map",
"lib.camera.set_mount_params",
"lib.match_cleanup.check_for_1vn_dups",
"lib.smart.save"
] |
[((1275, 1338), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create an empty project."""'}), "(description='Create an empty project.')\n", (1298, 1338), False, 'import argparse\n'), ((4865, 4911), 'lib.logger.log', 'log', (['"""Project processed with arguments:"""', 'args'], {}), "('Project processed with arguments:', args)\n", (4868, 4911), False, 'from lib.logger import log\n'), ((4990, 5034), 'lib.logger.log', 'log', (['"""Step 1: setup the project"""'], {'fancy': '(True)'}), "('Step 1: setup the project', fancy=True)\n", (4993, 5034), False, 'from lib.logger import log\n'), ((5343, 5388), 'lib.project.ProjectMgr', 'project.ProjectMgr', (['args.project'], {'create': '(True)'}), '(args.project, create=True)\n', (5361, 5388), False, 'from lib import project\n'), ((5402, 5439), 'lib.logger.log', 'log', (['"""Created project:"""', 'args.project'], {}), "('Created project:', args.project)\n", (5405, 5439), False, 'from lib.logger import log\n'), ((5803, 5835), 'lib.logger.log', 'log', (['"""Camera file:"""', 'camera_file'], {}), "('Camera file:', camera_file)\n", (5806, 5835), False, 'from lib.logger import log\n'), ((5940, 5971), 'props.getNode', 'getNode', (['"""/config/camera"""', '(True)'], {}), "('/config/camera', True)\n", (5947, 5971), False, 'from props import getNode, PropertyNode\n'), ((5983, 5997), 'props.PropertyNode', 'PropertyNode', ([], {}), '()\n', (5995, 5997), False, 'from props import getNode, PropertyNode\n'), ((6001, 6039), 'props_json.load', 'props_json.load', (['camera_file', 'tmp_node'], {}), '(camera_file, tmp_node)\n', (6016, 6039), False, 'import props_json\n'), ((6917, 6938), 'lib.state.update', 'state.update', (['"""STEP1"""'], {}), "('STEP1')\n", (6929, 6938), False, 'from lib import state\n'), ((7018, 7097), 'lib.logger.log', 'log', (['"""Step 2: configure camera poses and per-image meta data files"""'], {'fancy': '(True)'}), "('Step 2: configure camera poses and per-image meta data files', fancy=True)\n", (7021, 7097), False, 'from lib.logger import log\n'), ((7176, 7201), 'lib.logger.log', 'log', (['"""Configuring images"""'], {}), "('Configuring images')\n", (7179, 7201), False, 'from lib.logger import log\n'), ((7357, 7396), 'os.path.join', 'os.path.join', (['args.project', '"""pix4d.csv"""'], {}), "(args.project, 'pix4d.csv')\n", (7369, 7396), False, 'import os\n'), ((7409, 7457), 'os.path.join', 'os.path.join', (['args.project', '"""image-metadata.txt"""'], {}), "(args.project, 'image-metadata.txt')\n", (7421, 7457), False, 'import os\n'), ((7461, 7487), 'os.path.exists', 'os.path.exists', (['pix4d_file'], {}), '(pix4d_file)\n', (7475, 7487), False, 'import os\n'), ((7683, 7722), 'os.path.join', 'os.path.join', (['args.project', '"""pix4d.csv"""'], {}), "(args.project, 'pix4d.csv')\n", (7695, 7722), False, 'import os\n'), ((7735, 7783), 'os.path.join', 'os.path.join', (['args.project', '"""image-metadata.txt"""'], {}), "(args.project, 'image-metadata.txt')\n", (7747, 7783), False, 'import os\n'), ((7787, 7813), 'os.path.exists', 'os.path.exists', (['pix4d_file'], {}), '(pix4d_file)\n', (7801, 7813), False, 'import os\n'), ((8457, 8495), 'props.getNode', 'getNode', (['"""/config/ned_reference"""', '(True)'], {}), "('/config/ned_reference', True)\n", (8464, 8495), False, 'from props import getNode, PropertyNode\n'), ((8609, 8644), 'lib.logger.log', 'log', (['"""NED reference location:"""', 'ref'], {}), "('NED reference location:', ref)\n", (8612, 8644), False, 'from lib.logger import log\n'), ((8817, 8848), 'lib.pose.compute_camera_poses', 'pose.compute_camera_poses', (['proj'], {}), '(proj)\n', (8842, 8848), False, 'from lib import pose\n'), ((8880, 8916), 'lib.srtm.initialize', 'srtm.initialize', (['ref', '(6000)', '(6000)', '(30)'], {}), '(ref, 6000, 6000, 30)\n', (8895, 8916), False, 'from lib import srtm\n'), ((8918, 8947), 'lib.smart.load', 'smart.load', (['proj.analysis_dir'], {}), '(proj.analysis_dir)\n', (8928, 8947), False, 'from lib import smart\n'), ((8948, 8982), 'lib.smart.update_srtm_elevations', 'smart.update_srtm_elevations', (['proj'], {}), '(proj)\n', (8976, 8982), False, 'from lib import smart\n'), ((8983, 9012), 'lib.smart.save', 'smart.save', (['proj.analysis_dir'], {}), '(proj.analysis_dir)\n', (8993, 9012), False, 'from lib import smart\n'), ((9126, 9147), 'lib.state.update', 'state.update', (['"""STEP2"""'], {}), "('STEP2')\n", (9138, 9147), False, 'from lib import state\n'), ((9227, 9270), 'lib.logger.log', 'log', (['"""Step 3: feature matching"""'], {'fancy': '(True)'}), "('Step 3: feature matching', fancy=True)\n", (9230, 9270), False, 'from lib.logger import log\n'), ((11928, 11978), 'os.path.join', 'os.path.join', (['proj.analysis_dir', '"""matches_grouped"""'], {}), "(proj.analysis_dir, 'matches_grouped')\n", (11940, 11978), False, 'import os\n'), ((13901, 13946), 'lib.logger.log', 'log', (['"""Step 4: Optimization (fit)"""'], {'fancy': '(True)'}), "('Step 4: Optimization (fit)', fancy=True)\n", (13904, 13946), False, 'from lib.logger import log\n'), ((15387, 15428), 'lib.logger.log', 'log', (['"""Step 5: Create the map"""'], {'fancy': '(True)'}), "('Step 5: Create the map', fancy=True)\n", (15390, 15428), False, 'from lib.logger import log\n'), ((4843, 4863), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (4861, 4863), False, 'import socket\n'), ((5198, 5225), 'os.path.isdir', 'os.path.isdir', (['args.project'], {}), '(args.project)\n', (5211, 5225), False, 'import os\n'), ((5679, 5731), 'os.path.join', 'os.path.join', (['""".."""', '"""cameras"""', "(camera_name + '.json')"], {}), "('..', 'cameras', camera_name + '.json')\n", (5691, 5731), False, 'import os\n'), ((5736, 5802), 'lib.logger.log', 'log', (['"""Camera auto-detected:"""', 'camera_name', 'make', 'model', 'lens_model'], {}), "('Camera auto-detected:', camera_name, make, model, lens_model)\n", (5739, 5802), False, 'from lib.logger import log\n'), ((6045, 6083), 'props_json.overlay', 'props_json.overlay', (['cam_node', 'tmp_node'], {}), '(cam_node, tmp_node)\n', (6063, 6083), False, 'import props_json\n'), ((6856, 6904), 'lib.logger.log', 'log', (['"""Aborting due to camera detection failure."""'], {}), "('Aborting due to camera detection failure.')\n", (6859, 6904), False, 'from lib.logger import log\n'), ((7493, 7530), 'lib.logger.log', 'log', (['"""Found a pose file:"""', 'pix4d_file'], {}), "('Found a pose file:', pix4d_file)\n", (7496, 7530), False, 'from lib.logger import log\n'), ((7536, 7561), 'os.path.exists', 'os.path.exists', (['meta_file'], {}), '(meta_file)\n', (7550, 7561), False, 'import os\n'), ((7819, 7904), 'lib.pose.set_aircraft_poses', 'pose.set_aircraft_poses', (['proj', 'pix4d_file'], {'order': '"""rpy"""', 'max_angle': 'args.max_angle'}), "(proj, pix4d_file, order='rpy', max_angle=args.max_angle\n )\n", (7842, 7904), False, 'from lib import pose\n'), ((7933, 7958), 'os.path.exists', 'os.path.exists', (['meta_file'], {}), '(meta_file)\n', (7947, 7958), False, 'import os\n'), ((9356, 9377), 'lib.state.check', 'state.check', (['"""STEP3a"""'], {}), "('STEP3a')\n", (9367, 9377), False, 'from lib import state\n'), ((9439, 9468), 'lib.smart.load', 'smart.load', (['proj.analysis_dir'], {}), '(proj.analysis_dir)\n', (9449, 9468), False, 'from lib import smart\n'), ((9473, 9508), 'lib.smart.set_yaw_error_estimates', 'smart.set_yaw_error_estimates', (['proj'], {}), '(proj)\n', (9502, 9508), False, 'from lib import smart\n'), ((9574, 9607), 'props.getNode', 'getNode', (['"""/config/detector"""', '(True)'], {}), "('/config/detector', True)\n", (9581, 9607), False, 'from props import getNode, PropertyNode\n'), ((10695, 10726), 'lib.logger.log', 'log', (['"""detector:"""', 'args.detector'], {}), "('detector:', args.detector)\n", (10698, 10726), False, 'from lib.logger import log\n'), ((10731, 10794), 'lib.logger.log', 'log', (['"""image scale for fearture detection/matching:"""', 'args.scale'], {}), "('image scale for fearture detection/matching:', args.scale)\n", (10734, 10794), False, 'from lib.logger import log\n'), ((10815, 10847), 'props.getNode', 'getNode', (['"""/config/matcher"""', '(True)'], {}), "('/config/matcher', True)\n", (10822, 10847), False, 'from props import getNode, PropertyNode\n'), ((11392, 11406), 'lib.camera.get_K', 'camera.get_K', ([], {}), '()\n', (11404, 11406), False, 'from lib import camera\n'), ((11433, 11457), 'lib.logger.log', 'log', (['"""Matching features"""'], {}), "('Matching features')\n", (11436, 11457), False, 'from lib.logger import log\n'), ((11493, 11512), 'lib.matcher.configure', 'matcher.configure', ([], {}), '()\n', (11510, 11512), False, 'from lib import matcher\n'), ((11517, 11629), 'lib.matcher.find_matches', 'matcher.find_matches', (['proj', 'K'], {'strategy': 'args.match_strategy', 'transform': 'args.filter', 'sort': '(True)', 'review': '(False)'}), '(proj, K, strategy=args.match_strategy, transform=args.\n filter, sort=True, review=False)\n', (11537, 11629), False, 'from lib import matcher\n'), ((11800, 11887), 'lib.logger.log', 'log', (["('Average # of features per image found = %.0f' % (feature_count / image_count)\n )"], {}), "('Average # of features per image found = %.0f' % (feature_count /\n image_count))\n", (11803, 11887), False, 'from lib.logger import log\n'), ((11889, 11911), 'lib.state.update', 'state.update', (['"""STEP3a"""'], {}), "('STEP3a')\n", (11901, 11911), False, 'from lib import state\n'), ((11987, 12008), 'lib.state.check', 'state.check', (['"""STEP3b"""'], {}), "('STEP3b')\n", (11998, 12008), False, 'from lib import state\n'), ((12117, 12153), 'lib.match_cleanup.merge_duplicates', 'match_cleanup.merge_duplicates', (['proj'], {}), '(proj)\n', (12147, 12153), False, 'from lib import match_cleanup\n'), ((12158, 12197), 'lib.match_cleanup.check_for_pair_dups', 'match_cleanup.check_for_pair_dups', (['proj'], {}), '(proj)\n', (12191, 12197), False, 'from lib import match_cleanup\n'), ((12202, 12240), 'lib.match_cleanup.check_for_1vn_dups', 'match_cleanup.check_for_1vn_dups', (['proj'], {}), '(proj)\n', (12234, 12240), False, 'from lib import match_cleanup\n'), ((12262, 12302), 'lib.match_cleanup.make_match_structure', 'match_cleanup.make_match_structure', (['proj'], {}), '(proj)\n', (12296, 12302), False, 'from lib import match_cleanup\n'), ((12325, 12373), 'lib.match_cleanup.link_matches', 'match_cleanup.link_matches', (['proj', 'matches_direct'], {}), '(proj, matches_direct)\n', (12351, 12373), False, 'from lib import match_cleanup\n'), ((12379, 12430), 'lib.logger.log', 'log', (['"""Writing full group chain file:"""', 'matches_name'], {}), "('Writing full group chain file:', matches_name)\n", (12382, 12430), False, 'from lib.logger import log\n'), ((12495, 12517), 'lib.state.update', 'state.update', (['"""STEP3b"""'], {}), "('STEP3b')\n", (12507, 12517), False, 'from lib import state\n'), ((12526, 12547), 'lib.state.check', 'state.check', (['"""STEP3c"""'], {}), "('STEP3c')\n", (12537, 12547), False, 'from lib import state\n'), ((12590, 12619), 'lib.camera.get_K', 'camera.get_K', ([], {'optimized': '(False)'}), '(optimized=False)\n', (12602, 12619), False, 'from lib import camera\n'), ((12629, 12645), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (12642, 12645), True, 'import numpy as np\n'), ((12651, 12695), 'lib.logger.log', 'log', (['"""Loading source matches:"""', 'matches_name'], {}), "('Loading source matches:', matches_name)\n", (12654, 12695), False, 'from lib.logger import log\n'), ((12762, 12816), 'lib.match_cleanup.triangulate_smart', 'match_cleanup.triangulate_smart', (['proj', 'matches_grouped'], {}), '(proj, matches_grouped)\n', (12793, 12816), False, 'from lib import match_cleanup\n'), ((12821, 12874), 'lib.logger.log', 'log', (['"""Writing triangulated group file:"""', 'matches_name'], {}), "('Writing triangulated group file:', matches_name)\n", (12824, 12874), False, 'from lib.logger import log\n'), ((12939, 12961), 'lib.state.update', 'state.update', (['"""STEP3c"""'], {}), "('STEP3c')\n", (12951, 12961), False, 'from lib import state\n'), ((12970, 12991), 'lib.state.check', 'state.check', (['"""STEP3d"""'], {}), "('STEP3d')\n", (12981, 12991), False, 'from lib import state\n'), ((13026, 13070), 'lib.logger.log', 'log', (['"""Loading source matches:"""', 'matches_name'], {}), "('Loading source matches:', matches_name)\n", (13029, 13070), False, 'from lib.logger import log\n'), ((13246, 13286), 'lib.groups.compute', 'groups.compute', (['proj.image_list', 'matches'], {}), '(proj.image_list, matches)\n', (13260, 13286), False, 'from lib import groups\n'), ((13291, 13333), 'lib.groups.save', 'groups.save', (['proj.analysis_dir', 'group_list'], {}), '(proj.analysis_dir, group_list)\n', (13302, 13333), False, 'from lib import groups\n'), ((13471, 13480), 'lib.logger.log', 'log', (['line'], {}), '(line)\n', (13474, 13480), False, 'from lib.logger import log\n'), ((13486, 13523), 'lib.logger.log', 'log', (['"""Counting allocated features..."""'], {}), "('Counting allocated features...')\n", (13489, 13523), False, 'from lib.logger import log\n'), ((13690, 13742), 'lib.logger.log', 'log', (['"""Writing grouped tagged matches:"""', 'matches_name'], {}), "('Writing grouped tagged matches:', matches_name)\n", (13693, 13742), False, 'from lib.logger import log\n'), ((13799, 13821), 'lib.state.update', 'state.update', (['"""STEP3d"""'], {}), "('STEP3d')\n", (13811, 13821), False, 'from lib import state\n'), ((14032, 14052), 'lib.state.check', 'state.check', (['"""STEP4"""'], {}), "('STEP4')\n", (14043, 14052), False, 'from lib import state\n'), ((14087, 14131), 'lib.logger.log', 'log', (['"""Loading source matches:"""', 'matches_name'], {}), "('Loading source matches:', matches_name)\n", (14090, 14131), False, 'from lib.logger import log\n'), ((14303, 14333), 'lib.groups.load', 'groups.load', (['proj.analysis_dir'], {}), '(proj.analysis_dir)\n', (14314, 14333), False, 'from lib import groups\n'), ((14345, 14378), 'lib.optimizer.Optimizer', 'optimizer.Optimizer', (['args.project'], {}), '(args.project)\n', (14364, 14378), False, 'from lib import optimizer\n'), ((14824, 14884), 'lib.camera.set_K', 'camera.set_K', (['fx_opt', 'fy_opt', 'cu_opt', 'cv_opt'], {'optimized': '(True)'}), '(fx_opt, fy_opt, cu_opt, cv_opt, optimized=True)\n', (14836, 14884), False, 'from lib import camera\n'), ((15173, 15229), 'lib.logger.log', 'log', (['"""Writing optimized (fitted) matches:"""', 'matches_name'], {}), "('Writing optimized (fitted) matches:', matches_name)\n", (15176, 15229), False, 'from lib.logger import log\n'), ((15286, 15307), 'lib.state.update', 'state.update', (['"""STEP4"""'], {}), "('STEP4')\n", (15298, 15307), False, 'from lib import state\n'), ((15514, 15534), 'lib.state.check', 'state.check', (['"""STEP6"""'], {}), "('STEP6')\n", (15525, 15534), False, 'from lib import state\n'), ((15607, 15637), 'lib.groups.load', 'groups.load', (['proj.analysis_dir'], {}), '(proj.analysis_dir)\n', (15618, 15637), False, 'from lib import groups\n'), ((15643, 15697), 'lib.render_panda3d.build_map', 'render_panda3d.build_map', (['proj', 'group_list', 'args.group'], {}), '(proj, group_list, args.group)\n', (15667, 15697), False, 'from lib import render_panda3d\n'), ((6162, 6200), 'lib.camera.set_mount_params', 'camera.set_mount_params', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (6185, 6200), False, 'from lib import camera\n'), ((6637, 6783), 'lib.logger.log', 'log', (['"""Camera autodetection failed. Consider running the new camera script to create a camera config and then try running this script again."""'], {}), "('Camera autodetection failed. Consider running the new camera script to create a camera config and then try running this script again.'\n )\n", (6640, 6783), False, 'from lib.logger import log\n'), ((6797, 6851), 'lib.logger.log', 'log', (['"""Specified camera config not found:"""', 'args.camera'], {}), "('Specified camera config not found:', args.camera)\n", (6800, 6851), False, 'from lib.logger import log\n'), ((7567, 7603), 'lib.logger.log', 'log', (['"""Found a pose file:"""', 'meta_file'], {}), "('Found a pose file:', meta_file)\n", (7570, 7603), False, 'from lib.logger import log\n'), ((7614, 7664), 'lib.pose.make_pix4d', 'pose.make_pix4d', (['args.project', 'args.force_altitude'], {}), '(args.project, args.force_altitude)\n', (7629, 7664), False, 'from lib import pose\n'), ((7964, 8043), 'lib.pose.set_aircraft_poses', 'pose.set_aircraft_poses', (['proj', 'meta_file'], {'order': '"""ypr"""', 'max_angle': 'args.max_angle'}), "(proj, meta_file, order='ypr', max_angle=args.max_angle)\n", (7987, 8043), False, 'from lib import pose\n'), ((8082, 8148), 'lib.logger.log', 'log', (['"""Error: no pose file found in image directory:"""', 'args.project'], {}), "('Error: no pose file found in image directory:', args.project)\n", (8085, 8148), False, 'from lib.logger import log\n'), ((6282, 6320), 'lib.camera.set_mount_params', 'camera.set_mount_params', (['(0.0)', '(0.0)', '(0.0)'], {}), '(0.0, 0.0, 0.0)\n', (6305, 6320), False, 'from lib import camera\n'), ((6399, 6467), 'lib.camera.set_mount_params', 'camera.set_mount_params', (['args.yaw_deg', 'args.pitch_deg', 'args.roll_deg'], {}), '(args.yaw_deg, args.pitch_deg, args.roll_deg)\n', (6422, 6467), False, 'from lib import camera\n')]
|
import pytorch_lightning as pl
import torch
from torch.utils.data import Dataset, DataLoader, random_split
from sklearn.preprocessing import LabelEncoder
from PIL import Image
import numpy as np
import pandas as pd
def encode_labels(labels):
le = LabelEncoder()
encoded = le.fit_transform(labels)
return le, encoded
def read_image(path, w, h):
img = Image.open(path)
img = img.resize((w, h))
img = img.convert('RGB')
return np.array(img)
class ImageNetDataset(Dataset):
def __init__(self, width, height, img_paths, labels):
super().__init__()
self.width, self.height = width, height
self.img_paths = img_paths
self.encoder, self.labels = encode_labels(labels)
def __len__(self):
return self.img_paths.shape[0]
def __getitem__(self, i):
img = read_image(self.img_paths[i], self.width, self.height)
label = self.labels[i]
img = np.transpose(img, (2, 0, 1)).astype('float32')/255
return img, label
class DataModule(pl.LightningDataModule):
def __init__(self, path, n, width, height, batch_size=32, num_workers=8, split={'train':.8, 'test':.1, 'val':.1}):
super().__init__()
self.path = path
self.n = n
self.width, self.height = width, height
self.batch_size = batch_size
self.num_workers = num_workers
self.split = split
def prepare_data(self):
df = pd.read_csv(self.path).to_numpy()[:self.n]
img_paths, labels = df[:, 0], df [:, 1]
self.dataset = ImageNetDataset(self.width, self.height, img_paths, labels)
def setup(self, stage=None):
if stage=='fit' or stage is None:
n = len(self.dataset)
l = [self.split['val'], self.split['test']]
val, test = map(lambda x: int(x*n), l)
train = n - (val + test)
self.train, self.val, self.test = random_split(self.dataset, [train, val, test], generator=torch.Generator().manual_seed(42))
def train_dataloader(self):
return DataLoader(self.train, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True)
def val_dataloader(self):
return DataLoader(self.val, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True)
def test_dataloader(self):
return DataLoader(self.test, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=True)
|
[
"sklearn.preprocessing.LabelEncoder",
"PIL.Image.open",
"pandas.read_csv",
"numpy.array",
"torch.utils.data.DataLoader",
"numpy.transpose",
"torch.Generator"
] |
[((256, 270), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (268, 270), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((384, 400), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (394, 400), False, 'from PIL import Image\n'), ((470, 483), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (478, 483), True, 'import numpy as np\n'), ((2125, 2227), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers', 'pin_memory': '(True)'}), '(self.train, batch_size=self.batch_size, num_workers=self.\n num_workers, pin_memory=True)\n', (2135, 2227), False, 'from torch.utils.data import Dataset, DataLoader, random_split\n'), ((2269, 2369), 'torch.utils.data.DataLoader', 'DataLoader', (['self.val'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers', 'pin_memory': '(True)'}), '(self.val, batch_size=self.batch_size, num_workers=self.\n num_workers, pin_memory=True)\n', (2279, 2369), False, 'from torch.utils.data import Dataset, DataLoader, random_split\n'), ((2412, 2513), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_workers', 'pin_memory': '(True)'}), '(self.test, batch_size=self.batch_size, num_workers=self.\n num_workers, pin_memory=True)\n', (2422, 2513), False, 'from torch.utils.data import Dataset, DataLoader, random_split\n'), ((974, 1002), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (986, 1002), True, 'import numpy as np\n'), ((1503, 1525), 'pandas.read_csv', 'pd.read_csv', (['self.path'], {}), '(self.path)\n', (1514, 1525), True, 'import pandas as pd\n'), ((2042, 2059), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (2057, 2059), False, 'import torch\n')]
|
import argparse
import shutil
import numpy as np
from project.data_preprocessing.preprocessing import Preprocessor
from project.data_preprocessing.data_loader import Loader
from project.models.model import Model
from prostagma.techniques.grid_search import GridSearch
from prostagma.performances.cross_validation import CrossValidation
parser = argparse.ArgumentParser()
""" General Parameters """
parser.add_argument('--test', type=bool, default=False,
help='if True test the model.')
parser.add_argument('--train', type=bool, default=False,
help='if True train the model.')
""" Model Parameters """
parser.add_argument('--log_dir', type=str, default='./tensorbaord',
help='directory where to store tensorbaord values.')
parser.add_argument('--model_path', type=str,
default='./project/weights/model',
help='model checkpoints directory.')
parser.add_argument('--epochs', type=int, default=10,
help='number of batch iterations.')
parser.add_argument('--batch_size', type=int, default=32,
help='number of samples in the training batch.')
parser.add_argument('--number_of_samples', type=int, default=1500,
help='number of samples to load in memory.')
parser.add_argument('--train_samples', type=int, default=50,
help='number of samples to load in memory.')
parser.add_argument('--val_samples', type=int, default=50,
help='number of samples to load in memory.')
parser.add_argument('--test_samples', type=int, default=100,
help='number of samples to load in memory.')
args = parser.parse_args()
def main():
# Remove Tensorboard Folder
try:
shutil.rmtree('./tensorbaord')
except FileNotFoundError:
pass
# Fix the seed
np.random.seed(0)
# Load the data
loader = Loader(number_of_samples=args.number_of_samples)
X, y = loader.load_data()
print("Loaded the data...")
# Preprocess the data
preprocessor = Preprocessor(
train_samples=args.train_samples,
test_samples=args.test_samples,
val_samples=args.val_samples)
X_train, y_train, X_test, y_test, X_val, y_val = preprocessor.fit_transform(X, y)
# Define the Model
model = Model(
log_dir=args.log_dir,
model_path=args.model_path
)
# Directly Validate the model
validator = CrossValidation(
k_fold=5,
epochs=args.epochs,
batch_size=args.batch_size)
results = validator.fit(X_train, y_train, model.build_model)
print("Mean: %f Std(%f)" % (results.mean(), results.std()))
# Tuning The Parameters
# Define the dictionary of parameters
parameters = {
"dropout" : [0.25, 0.5, 0.75],
"learning_rate" : [0.1, 0.01, 0.001, 0.0001]
}
# Define the Strategy to use
strategy = GridSearch(
parameters=parameters,
model=model.build_model,
performance_validator=CrossValidation(
k_fold=5,
epochs=args.epochs,
batch_size=args.batch_size
)
)
strategy.fit(X_train, y_train)
# Show the results
print("Best Parameters: ")
print(strategy.best_param)
print("Best Score Obtained: ")
print(strategy.best_score)
return
main()
|
[
"project.data_preprocessing.data_loader.Loader",
"argparse.ArgumentParser",
"project.data_preprocessing.preprocessing.Preprocessor",
"numpy.random.seed",
"shutil.rmtree",
"prostagma.performances.cross_validation.CrossValidation",
"project.models.model.Model"
] |
[((348, 373), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (371, 373), False, 'import argparse\n'), ((1718, 1735), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1732, 1735), True, 'import numpy as np\n'), ((1770, 1818), 'project.data_preprocessing.data_loader.Loader', 'Loader', ([], {'number_of_samples': 'args.number_of_samples'}), '(number_of_samples=args.number_of_samples)\n', (1776, 1818), False, 'from project.data_preprocessing.data_loader import Loader\n'), ((1927, 2040), 'project.data_preprocessing.preprocessing.Preprocessor', 'Preprocessor', ([], {'train_samples': 'args.train_samples', 'test_samples': 'args.test_samples', 'val_samples': 'args.val_samples'}), '(train_samples=args.train_samples, test_samples=args.\n test_samples, val_samples=args.val_samples)\n', (1939, 2040), False, 'from project.data_preprocessing.preprocessing import Preprocessor\n'), ((2176, 2231), 'project.models.model.Model', 'Model', ([], {'log_dir': 'args.log_dir', 'model_path': 'args.model_path'}), '(log_dir=args.log_dir, model_path=args.model_path)\n', (2181, 2231), False, 'from project.models.model import Model\n'), ((2309, 2382), 'prostagma.performances.cross_validation.CrossValidation', 'CrossValidation', ([], {'k_fold': '(5)', 'epochs': 'args.epochs', 'batch_size': 'args.batch_size'}), '(k_fold=5, epochs=args.epochs, batch_size=args.batch_size)\n', (2324, 2382), False, 'from prostagma.performances.cross_validation import CrossValidation\n'), ((1616, 1646), 'shutil.rmtree', 'shutil.rmtree', (['"""./tensorbaord"""'], {}), "('./tensorbaord')\n", (1629, 1646), False, 'import shutil\n'), ((2874, 2947), 'prostagma.performances.cross_validation.CrossValidation', 'CrossValidation', ([], {'k_fold': '(5)', 'epochs': 'args.epochs', 'batch_size': 'args.batch_size'}), '(k_fold=5, epochs=args.epochs, batch_size=args.batch_size)\n', (2889, 2947), False, 'from prostagma.performances.cross_validation import CrossValidation\n')]
|
import numpy as np
import requests
import talib
class stock_ins:
BASE_URL = "https://paper-api.alpaca.markets"
DATA_URL = "https://data.alpaca.markets"
def __init__(self, stock_name, save_len, api_key, secret_key):
self.stock_name = stock_name
self.save_len = save_len
self.ask_data = []
self.bid_data = []
self.HEADERS = {'APCA-API-KEY-ID': api_key, 'APCA-API-SECRET-KEY': secret_key}
def __get_bid(self):
return requests.get("{}/v1/last/stocks/{}".format(self.DATA_URL, self.stock_name), headers=self.HEADERS).json()["last"]["price"]
def __get_ask(self):
return requests.get("{}/v1/last_quote/stocks/{}".format(self.DATA_URL, self.stock_name), headers=self.HEADERS).json()["last"]["askprice"]
def update(self):
# this will get new bid and ask data and resize it
bid = self.__get_bid()
ask = self.__get_ask()
if len(self.ask_data) >= self.save_len:
self.ask_data.pop(self.save_len-1)
self.bid_data.pop(self.save_len-1)
self.bid_data.insert(0, bid)
self.ask_data.insert(0, ask)
def get_indicator(self, ind, *, period_len=None, data=None):
# this will return any indicator available in talib in right format
data = self.ask_data if data is None else data
data = np.array(data, dtype="double")[::-1]
if period_len is None:
ind = getattr(talib, ind)(data)
else:
ind = getattr(talib, ind)(data, period_len)
return ind[::-1]
def order(self, data):
return requests.post("{}/v2/orders".format(self.BASE_URL), json=data, headers=self.HEADERS)
if __name__ == "__main__":
# test run, if everything is working
import config
import time
import market
stocks = ["DB", "TSLA", "MSFT"]
interval = 60 # interval time in seconds: minute data=60
save_len = 200 # length of saved prices
Key = config.key
sKey = config.sKey
stock_list = []
for stock in stocks:
stock_list.append(stock_ins(stock, save_len, Key, sKey))
while True:
if market.is_open():
start_timer = time.time()
for stock in stock_list:
stock.update() # this will update the bid and ask price
print(stock.get_indicator("EMA", period_len=2, data=[1, 2, 3, 4, 5]))
data = {
"side": "buy",
"symbol": stock.stock_name,
"type": "market",
"qty": "1",
"time_in_force": "gtc",
} # settings for order: order type etc.
# print(stock.order(data)) # this will order a stock with json=data
print(stock.ask_data[0], stock.ask_data[0], len(stock.ask_data))
sleep_time = interval - (time.time()-start_timer)
print("Waiting {:.2f}".format(sleep_time))
time.sleep(sleep_time)
|
[
"numpy.array",
"market.is_open",
"time.sleep",
"time.time"
] |
[((2142, 2158), 'market.is_open', 'market.is_open', ([], {}), '()\n', (2156, 2158), False, 'import market\n'), ((1350, 1380), 'numpy.array', 'np.array', (['data'], {'dtype': '"""double"""'}), "(data, dtype='double')\n", (1358, 1380), True, 'import numpy as np\n'), ((2186, 2197), 'time.time', 'time.time', ([], {}), '()\n', (2195, 2197), False, 'import time\n'), ((2985, 3007), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (2995, 3007), False, 'import time\n'), ((2893, 2904), 'time.time', 'time.time', ([], {}), '()\n', (2902, 2904), False, 'import time\n')]
|
import numpy as np
from rotations import rot2, rot3
import mavsim_python_parameters_aerosonde_parameters as P
class Gravity:
def __init__(self, state):
self.mass = P.mass
self.gravity = P.gravity
self.state = state
# Aero quantities
@property
def force(self):
R_ib = self.state.rot
R_bi = R_ib.T
W_i = np.array([0, 0, P.mass*P.gravity])
F = R_bi @ W_i
return F.flatten()
|
[
"numpy.array"
] |
[((385, 421), 'numpy.array', 'np.array', (['[0, 0, P.mass * P.gravity]'], {}), '([0, 0, P.mass * P.gravity])\n', (393, 421), True, 'import numpy as np\n')]
|
from tensorflow.python.framework import ops
import tensorflow as tf
from utilities import model as md
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
import os
import time
import cv2
def model(photos_train, Y_train, photos_test, Y_test, learning_rate=0.0005,
num_epochs=1500, minibatch_size=128, print_cost=True):
"""
Implements a three-layer tensorflow neural network:
LINEAR->RELU->LINEAR->RELU->LINEAR->SOFTMAX.
Arguments:
X_train -- training set
Y_train -- training set labels
X_test -- test set
Y_test -- test set labels
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
im_size = 64
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep consistent results
seed = 3 # to keep consistent results
(n_x, m) = (im_size * im_size * 3, len(photos_train)) # (n_x: input size, m : number of examples in the train set)
n_y = 6 # n_y : output size
costs = [] # To keep track of the cost
# Create Placeholders of shape (n_x, n_y)
X, Y = md.create_placeholders(n_x, n_y)
# Initialize parameters
parameters = md.initialize_parameters()
# Forward propagation: Build the forward propagation in the tensorflow graph
Z3 = md.forward_propagation(X, parameters)
# Cost function: Add cost function to tensorflow graph
cost = md.compute_cost(Z3, Y)
# Backpropagation: Define the tensorflow optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initialize all the variables
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
path = os.path.dirname(__file__)
# Do the training loop
for epoch in range(num_epochs):
epoch_cost = 0. # Defines a cost related to an epoch
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = md.random_mini_batches_chunk(photos_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
x_train_temp = np.array([cv2.imread(os.path.join(path, '../yelpData/resized64/') + i + '.png')
.reshape((1, im_size * im_size * 3)).T
for i in minibatch_X['photo_id']]).T[0]
# Flatten the training and test images
x_train_flatten = x_train_temp
# Normalize image vectors
x_train = x_train_flatten / 255.
# Convert training and test labels to one hot matrices
y_train = md.convert_to_one_hot(minibatch_Y.T, 6)
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the "optimizer" and the "cost",
# the feedict should contain a minibatch for (X,Y).
_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: x_train, Y: y_train})
epoch_cost += minibatch_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % 5 == 0:
costs.append(epoch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# lets save the parameters in a variable
parameters = sess.run(parameters)
print("Parameters have been trained!")
# Calculate the correct predictions
correct_prediction = tf.equal(tf.argmax(Z3), tf.argmax(Y))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
length = int(len(photos_train))
Y_train = md.convert_to_one_hot(Y_train.T, 6)
acc = 0
for i in range(0, 3):
X_train_temp = np.array([cv2.imread(os.path.join(path, '../yelpData/resized64/') + i + '.png')
.reshape((1, im_size * im_size * 3)).T
for i in photos_train['photo_id']
[int(i * length / 3):int((i+1) * length / 3)]]).T[0]
accuracy_temp = accuracy.eval(
{X: X_train_temp / 255, Y: Y_train[:, int(i * length / 3):int((i+1) * length / 3)]})
acc += accuracy_temp
print("Train Accuracy: ", acc / 3)
X_test = np.array([cv2.imread(os.path.join(path, '../yelpData/resized64/') + i + '.png')
.reshape((1, im_size * im_size * 3)).T
for i in photos_test['photo_id']]).T[0]
X_test = X_test / 255.
Y_test = md.convert_to_one_hot(Y_test.T, 6)
print("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
return parameters
if __name__ == '__main__':
x, y = md.get_data_chunk()
x_train, x_test, y_train, y_test = train_test_split(x, y,
test_size=.05,
random_state=42,
shuffle=True, stratify=y)
start = time.time()
parameters = model(x_train, y_train, x_test, y_test, num_epochs=500, learning_rate=0.001)
stop = time.time()
print("Time elapsed: " + str(stop - start))
path = os.path.dirname(__file__)
param_path = os.path.join(path, '../yelpData/') + 'parameters_5.npy'
np.save(param_path, parameters)
|
[
"tensorflow.python.framework.ops.reset_default_graph",
"matplotlib.pyplot.ylabel",
"utilities.model.forward_propagation",
"utilities.model.get_data_chunk",
"tensorflow.set_random_seed",
"tensorflow.cast",
"numpy.save",
"utilities.model.compute_cost",
"utilities.model.initialize_parameters",
"tensorflow.Session",
"matplotlib.pyplot.xlabel",
"tensorflow.train.AdamOptimizer",
"sklearn.model_selection.train_test_split",
"numpy.squeeze",
"os.path.dirname",
"utilities.model.convert_to_one_hot",
"time.time",
"matplotlib.pyplot.show",
"os.path.join",
"tensorflow.global_variables_initializer",
"tensorflow.argmax",
"utilities.model.create_placeholders",
"utilities.model.random_mini_batches_chunk"
] |
[((969, 994), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (992, 994), False, 'from tensorflow.python.framework import ops\n'), ((1065, 1086), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (1083, 1086), True, 'import tensorflow as tf\n'), ((1415, 1447), 'utilities.model.create_placeholders', 'md.create_placeholders', (['n_x', 'n_y'], {}), '(n_x, n_y)\n', (1437, 1447), True, 'from utilities import model as md\n'), ((1494, 1520), 'utilities.model.initialize_parameters', 'md.initialize_parameters', ([], {}), '()\n', (1518, 1520), True, 'from utilities import model as md\n'), ((1612, 1649), 'utilities.model.forward_propagation', 'md.forward_propagation', (['X', 'parameters'], {}), '(X, parameters)\n', (1634, 1649), True, 'from utilities import model as md\n'), ((1721, 1743), 'utilities.model.compute_cost', 'md.compute_cost', (['Z3', 'Y'], {}), '(Z3, Y)\n', (1736, 1743), True, 'from utilities import model as md\n'), ((1930, 1963), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1961, 1963), True, 'import tensorflow as tf\n'), ((5647, 5666), 'utilities.model.get_data_chunk', 'md.get_data_chunk', ([], {}), '()\n', (5664, 5666), True, 'from utilities import model as md\n'), ((5706, 5791), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.05)', 'random_state': '(42)', 'shuffle': '(True)', 'stratify': 'y'}), '(x, y, test_size=0.05, random_state=42, shuffle=True,\n stratify=y)\n', (5722, 5791), False, 'from sklearn.model_selection import train_test_split\n'), ((5968, 5979), 'time.time', 'time.time', ([], {}), '()\n', (5977, 5979), False, 'import time\n'), ((6085, 6096), 'time.time', 'time.time', ([], {}), '()\n', (6094, 6096), False, 'import time\n'), ((6156, 6181), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (6171, 6181), False, 'import os\n'), ((6259, 6290), 'numpy.save', 'np.save', (['param_path', 'parameters'], {}), '(param_path, parameters)\n', (6266, 6290), True, 'import numpy as np\n'), ((2030, 2042), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2040, 2042), True, 'import tensorflow as tf\n'), ((2124, 2149), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2139, 2149), False, 'import os\n'), ((3987, 4005), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cost"""'], {}), "('cost')\n", (3997, 4005), True, 'import matplotlib.pyplot as plt\n'), ((4014, 4049), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""iterations (per tens)"""'], {}), "('iterations (per tens)')\n", (4024, 4049), True, 'import matplotlib.pyplot as plt\n'), ((4116, 4126), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4124, 4126), True, 'import matplotlib.pyplot as plt\n'), ((4555, 4590), 'utilities.model.convert_to_one_hot', 'md.convert_to_one_hot', (['Y_train.T', '(6)'], {}), '(Y_train.T, 6)\n', (4576, 4590), True, 'from utilities import model as md\n'), ((5474, 5508), 'utilities.model.convert_to_one_hot', 'md.convert_to_one_hot', (['Y_test.T', '(6)'], {}), '(Y_test.T, 6)\n', (5495, 5508), True, 'from utilities import model as md\n'), ((6199, 6233), 'os.path.join', 'os.path.join', (['path', '"""../yelpData/"""'], {}), "(path, '../yelpData/')\n", (6211, 6233), False, 'import os\n'), ((1816, 1867), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1838, 1867), True, 'import tensorflow as tf\n'), ((2462, 2535), 'utilities.model.random_mini_batches_chunk', 'md.random_mini_batches_chunk', (['photos_train', 'Y_train', 'minibatch_size', 'seed'], {}), '(photos_train, Y_train, minibatch_size, seed)\n', (2490, 2535), True, 'from utilities import model as md\n'), ((3960, 3977), 'numpy.squeeze', 'np.squeeze', (['costs'], {}), '(costs)\n', (3970, 3977), True, 'import numpy as np\n'), ((4349, 4362), 'tensorflow.argmax', 'tf.argmax', (['Z3'], {}), '(Z3)\n', (4358, 4362), True, 'import tensorflow as tf\n'), ((4364, 4376), 'tensorflow.argmax', 'tf.argmax', (['Y'], {}), '(Y)\n', (4373, 4376), True, 'import tensorflow as tf\n'), ((4458, 4494), 'tensorflow.cast', 'tf.cast', (['correct_prediction', '"""float"""'], {}), "(correct_prediction, 'float')\n", (4465, 4494), True, 'import tensorflow as tf\n'), ((3233, 3272), 'utilities.model.convert_to_one_hot', 'md.convert_to_one_hot', (['minibatch_Y.T', '(6)'], {}), '(minibatch_Y.T, 6)\n', (3254, 3272), True, 'from utilities import model as md\n'), ((5234, 5278), 'os.path.join', 'os.path.join', (['path', '"""../yelpData/resized64/"""'], {}), "(path, '../yelpData/resized64/')\n", (5246, 5278), False, 'import os\n'), ((4686, 4730), 'os.path.join', 'os.path.join', (['path', '"""../yelpData/resized64/"""'], {}), "(path, '../yelpData/resized64/')\n", (4698, 4730), False, 'import os\n'), ((2723, 2767), 'os.path.join', 'os.path.join', (['path', '"""../yelpData/resized64/"""'], {}), "(path, '../yelpData/resized64/')\n", (2735, 2767), False, 'import os\n')]
|
import os
import scipy
import numpy as np
from ImageStatistics import UsefulImDirectory
import scipy as sp
import ast
from bokeh.charts import Histogram, show
import pandas as pd
class Game(object):
def __init__(self, gamefolder):
self.gamefolder = os.path.abspath(gamefolder)
file = open(os.path.join(gamefolder, "sales"))
self.releaseplayers = int(file.readline().rstrip("\n").split(": ")[1])
file.close()
file = open(os.path.join(gamefolder, "imagelinks"))
self.gameid = int(file.readline().rstrip("\n"))
file.close()
self.images = UsefulImDirectory.ImAggregate(os.path.join(gamefolder, "imgs"))
self.stats = ["means", "variances", "medians", "iqrs", "stddevs", "contrast"]
self.data = None
def getdata(self):
if self.data is None:
self.data = [self.images.getdata("reds"), self.images.getdata("greens"), self.images.getdata("blues")]
return self.data
else:
return self.data
def getcontrast(self):
return self.images.getdata("contrast")
def getplayers(self):
return self.releaseplayers
def calcstat(self, stat):
if stat not in self.stats:
raise AssertionError("Please choose a valid stat")
if stat == "means":
return [np.mean(x) for x in self.getdata()]
elif stat == "variances":
return [np.var(x) for x in self.getdata()]
elif stat == "medians":
return [np.median(x) for x in self.getdata()]
elif stat == "iqrs":
return [sp.stats.iqr(x) for x in self.getdata()]
elif stat == "stddevs":
return [np.std(x) for x in self.getdata()]
elif stat == "contrast":
return self.getcontrast()
def storestats(self):
file = open(os.path.join(self.gamefolder, "stats"), 'w+')
for x in self.stats:
towrite = self.calcstat(x)
file.write(x + ": " + str(towrite) + "\n")
file.close()
def readstats(self):
file = open(os.path.join(self.gamefolder, "stats"))
means = ast.literal_eval(file.readline().rstrip("\n").split(": ")[1])
variances = ast.literal_eval(file.readline().rstrip("\n").split(": ")[1])
medians = ast.literal_eval(file.readline().rstrip("\n").split(": ")[1])
iqrs = ast.literal_eval(file.readline().rstrip("\n").split(": ")[1])
stddevs = ast.literal_eval(file.readline().rstrip("\n").split(": ")[1])
line = file.readline().rstrip("\n").split(": ")[1]
try:
contrast = ast.literal_eval(line)
except ValueError:
tocont = line.replace("nan, ", "")
contrast = ast.literal_eval(tocont)
file.close()
return {"means": means, "variances": variances, "medians": medians, "iqrs": iqrs, "stddevs": stddevs, "contrast": contrast}
def colorhistogram(self, color):
colors = ["red", "green", "blue"]
if color.lower() not in colors:
raise AssertionError("Please pick a valid color")
self.histograms = {}
tohist = {"red": 0, "green": 1, "blue": 2}
self.histograms[color.lower()] = Histogram(pd.DataFrame(self.getdata()[tohist[color.lower()]], columns=[color.lower()]),values=color.lower(),color=color.capitalize(),bins=255)
show(self.histograms[color.lower()])
|
[
"numpy.mean",
"numpy.median",
"scipy.stats.iqr",
"os.path.join",
"ast.literal_eval",
"numpy.std",
"os.path.abspath",
"numpy.var"
] |
[((262, 289), 'os.path.abspath', 'os.path.abspath', (['gamefolder'], {}), '(gamefolder)\n', (277, 289), False, 'import os\n'), ((310, 343), 'os.path.join', 'os.path.join', (['gamefolder', '"""sales"""'], {}), "(gamefolder, 'sales')\n", (322, 343), False, 'import os\n'), ((465, 503), 'os.path.join', 'os.path.join', (['gamefolder', '"""imagelinks"""'], {}), "(gamefolder, 'imagelinks')\n", (477, 503), False, 'import os\n'), ((634, 666), 'os.path.join', 'os.path.join', (['gamefolder', '"""imgs"""'], {}), "(gamefolder, 'imgs')\n", (646, 666), False, 'import os\n'), ((1885, 1923), 'os.path.join', 'os.path.join', (['self.gamefolder', '"""stats"""'], {}), "(self.gamefolder, 'stats')\n", (1897, 1923), False, 'import os\n'), ((2129, 2167), 'os.path.join', 'os.path.join', (['self.gamefolder', '"""stats"""'], {}), "(self.gamefolder, 'stats')\n", (2141, 2167), False, 'import os\n'), ((2661, 2683), 'ast.literal_eval', 'ast.literal_eval', (['line'], {}), '(line)\n', (2677, 2683), False, 'import ast\n'), ((1354, 1364), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1361, 1364), True, 'import numpy as np\n'), ((2781, 2805), 'ast.literal_eval', 'ast.literal_eval', (['tocont'], {}), '(tocont)\n', (2797, 2805), False, 'import ast\n'), ((1444, 1453), 'numpy.var', 'np.var', (['x'], {}), '(x)\n', (1450, 1453), True, 'import numpy as np\n'), ((1531, 1543), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (1540, 1543), True, 'import numpy as np\n'), ((1618, 1633), 'scipy.stats.iqr', 'sp.stats.iqr', (['x'], {}), '(x)\n', (1630, 1633), True, 'import scipy as sp\n'), ((1711, 1720), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1717, 1720), True, 'import numpy as np\n')]
|
from .ranking import CreditRanking
from .interleaving_method import InterleavingMethod
import numpy as np
from scipy.optimize import linprog
class Optimized(InterleavingMethod):
'''
Optimized Interleaving
Args:
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
credit_func: either 'inverse' (1/rank) or 'negative' (-rank)
'''
def __init__(self, lists, max_length=None, sample_num=None,
credit_func='inverse', secure_sampling=False):
'''
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
credit_func: either 'inverse' (1/rank) or 'negative' (-rank)
'''
if sample_num is None:
raise ValueError('sample_num cannot be None, '
+ 'i.e. the initial sampling is necessary')
if credit_func == 'inverse':
self._credit_func = lambda x: 1.0 / x
elif credit_func == 'negative':
self._credit_func = lambda x: -x
else:
raise ValueError('credit_func should be either inverse or negative')
self._secure_sampling = secure_sampling
super(Optimized, self).__init__(lists,
max_length=max_length, sample_num=sample_num)
# self._rankings (sampled rankings) is obtained here
res = self._compute_probabilities(lists, self._rankings)
is_success, self._probabilities, _ = res
self._probabilities /= np.sum(self._probabilities)
if not is_success:
raise ValueError('Optimization failed')
def _sample_rankings(self):
'''
Sample `sample_num` rankings
'''
distribution = {}
if self._secure_sampling:
rankings = set()
for _ in range(self.sample_num):
rankings.add(self._sample(self.max_length, self.lists))
for ranking in rankings:
distribution[ranking] = 1.0 / len(rankings)
else:
while len(distribution) < self.sample_num:
ranking = self._sample(self.max_length, self.lists)
distribution[ranking] = 1.0 / self.sample_num
self._rankings, self._probabilities = zip(*distribution.items())
def _sample(self, max_length, lists):
'''
Prefix constraint sampling
(Multileaved Comparisons for Fast Online Evaluation, CIKM'14)
max_length: the maximum length of resultant interleaving
lists: lists of document IDs
Return an instance of Ranking
'''
num_rankers = len(lists)
result = CreditRanking(num_rankers)
teams = set(range(num_rankers))
while len(result) < max_length:
if len(teams) == 0:
break
selected_team = np.random.choice(list(teams))
docs = [x for x in lists[selected_team] if not x in result]
if len(docs) > 0:
selected_doc = docs[0]
result.append(selected_doc)
else:
teams.remove(selected_team)
# assign credits
for docid in result:
for team in result.credits:
if docid in lists[team]:
rank = lists[team].index(docid) + 1
else:
rank = len(lists[team]) + 1
result.credits[team][docid] = self._credit_func(rank)
return result
def _compute_probabilities(self, lists, rankings):
'''
Solve the optimization problem in
(Multileaved Comparisons for Fast Online Evaluation, CIKM'14)
lists: lists of document IDs
rankings: a list of Ranking instances
Return a list of probabilities for input rankings
'''
# probability constraints
A_p_sum = np.array([1]*len(rankings))
# unbiasedness constraints
ub_cons = self._unbiasedness_constraints(lists, rankings)
# sensitivity
sensitivity = self._sensitivity(lists, rankings)
# constraints
A_eq = np.vstack((A_p_sum, ub_cons))
b_eq = np.array([1.0] + [0.0]*ub_cons.shape[0])
# solving the optimization problem
res = linprog(sensitivity, # objective function
A_eq=A_eq, b_eq=b_eq, # constraints
bounds=[(0, 1)]*len(rankings) # 0 <= p <= 1
)
return res.success, res.x, res.fun
def _unbiasedness_constraints(self, lists, rankings):
'''
for each k and team x, for a certain c_k:
sum_{L_i} {p_i} * sum^k_{j=1} ranking.credits[x][d_j] = c_k
In other words,
sum_{L_i} {p_i} * sum^k_{j=1}
(ranking.credits[x][d_j] - ranking.credits[x+1][d_j]) = 0
'''
result = []
credits = np.zeros((self.max_length, len(rankings), len(lists)))
for rid, ranking in enumerate(rankings):
for idx, docid in enumerate(ranking):
for team in ranking.credits:
credits[idx, rid, team] = ranking.credits[team][docid]
if idx > 0:
credits[idx, rid, team] += credits[idx-1, rid, team]
for i in range(len(lists) - 1):
result.append(credits[:, :, i] - credits[:, :, i+1])
result = np.vstack(result)
return result
def _sensitivity(self, lists, rankings):
'''
Expected variance
'''
# compute the mean of each ranking
mu = np.zeros(len(rankings))
for rid, ranking in enumerate(rankings):
for idx, docid in enumerate(ranking):
click_prob = 1.0 / (idx + 1)
credit = np.sum(
[ranking.credits[x][docid] for x in ranking.credits])
mu[rid] += click_prob * credit
mu /= len(lists)
# compute the variance
var = np.zeros(len(rankings))
for rid, ranking in enumerate(rankings):
for x in ranking.credits:
v = 0.0
for idx, docid in enumerate(ranking):
click_prob = 1.0 / (idx + 1)
if docid in ranking.credits[x]:
v += click_prob * ranking.credits[x][docid]
v -= mu[rid]
var[rid] += v ** 2
return var
@classmethod
def compute_scores(cls, ranking, clicks):
'''
ranking: an instance of Ranking
clicks: a list of indices clicked by a user
Return a list of scores of each ranker.
'''
return {i: sum([ranking.credits[i][ranking[c]] for c in clicks])
for i in ranking.credits}
|
[
"numpy.sum",
"numpy.array",
"numpy.vstack"
] |
[((2386, 2413), 'numpy.sum', 'np.sum', (['self._probabilities'], {}), '(self._probabilities)\n', (2392, 2413), True, 'import numpy as np\n'), ((4982, 5011), 'numpy.vstack', 'np.vstack', (['(A_p_sum, ub_cons)'], {}), '((A_p_sum, ub_cons))\n', (4991, 5011), True, 'import numpy as np\n'), ((5027, 5069), 'numpy.array', 'np.array', (['([1.0] + [0.0] * ub_cons.shape[0])'], {}), '([1.0] + [0.0] * ub_cons.shape[0])\n', (5035, 5069), True, 'import numpy as np\n'), ((6217, 6234), 'numpy.vstack', 'np.vstack', (['result'], {}), '(result)\n', (6226, 6234), True, 'import numpy as np\n'), ((6602, 6662), 'numpy.sum', 'np.sum', (['[ranking.credits[x][docid] for x in ranking.credits]'], {}), '([ranking.credits[x][docid] for x in ranking.credits])\n', (6608, 6662), True, 'import numpy as np\n')]
|
# import key libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS
import nltk
import re
from nltk.stem import PorterStemmer, WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
import plotly.express as px
# Tensorflow
import tensorflow as tf
from tensorflow.keras.preprocessing.text import one_hot,Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.utils import to_categorical
stock_df = pd.read_csv("stock_sentiment.csv")
# check for null values
stock_df.isnull().sum()
import string
string.punctuation
Test = '$I love AI & Machine learning!!'
Test_punc_removed = [char for char in Test if char not in string.punctuation]
Test_punc_removed_join = ''.join(Test_punc_removed)
Test_punc_removed_join
Test = 'Good morning beautiful people :)... #I am having fun learning Finance with Python!!'
Test_punc_removed = [char for char in Test if char not in string.punctuation]
Test_punc_removed
# Join the characters again to form the string.
Test_punc_removed_join = ''.join(Test_punc_removed)
Test_punc_removed_join
# Let's define a function to remove punctuations
def remove_punc(message):
Test_punc_removed = [char for char in message if char not in string.punctuation]
Test_punc_removed_join = ''.join(Test_punc_removed)
return Test_punc_removed_join
# Let's remove punctuations from our dataset
stock_df['Text Without Punctuation'] = stock_df['Text'].apply(remove_punc)
stock_df['Text'][2]
stock_df['Text Without Punctuation'][2]
# download stopwords
nltk.download("stopwords")
stopwords.words('english')
# Obtain additional stopwords from nltk
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use','will','aap','co','day','user','stock','today','week','year'])
# stop_words.extend(['from', 'subject', 're', 'edu', 'use','will','aap','co','day','user','stock','today','week','year', 'https'])
# Remove stopwords and remove short words (less than 2 characters)
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if len(token) >= 3 and token not in stop_words:
result.append(token)
return result
# apply pre-processing to the text column
stock_df['Text Without Punc & Stopwords'] = stock_df['Text Without Punctuation'].apply(preprocess)
stock_df['Text'][0]
stock_df['Text Without Punc & Stopwords'][0]
# join the words into a string
#stock_df['Processed Text 2'] = stock_df['Processed Text 2'].apply(lambda x: " ".join(x))
# join the words into a string
stock_df['Text Without Punc & Stopwords Joined'] = stock_df['Text Without Punc & Stopwords'].apply(lambda x: " ".join(x))
# plot the word cloud for text with positive sentiment
plt.figure(figsize = (20, 20))
wc = WordCloud(max_words = 1000 , width = 1600 , height = 800).generate(" ".join(stock_df[stock_df['Sentiment'] == 1]['Text Without Punc & Stopwords Joined']))
plt.imshow(wc, interpolation = 'bilinear');
nltk.download('punkt')
# word_tokenize is used to break up a string into words
print(stock_df['Text Without Punc & Stopwords Joined'][0])
print(nltk.word_tokenize(stock_df['Text Without Punc & Stopwords Joined'][0]))
# Obtain the maximum length of data in the document
# This will be later used when word embeddings are generated
maxlen = -1
for doc in stock_df['Text Without Punc & Stopwords Joined']:
tokens = nltk.word_tokenize(doc)
if(maxlen < len(tokens)):
maxlen = len(tokens)
print("The maximum number of words in any document is:", maxlen)
tweets_length = [ len(nltk.word_tokenize(x)) for x in stock_df['Text Without Punc & Stopwords Joined'] ]
# Plot the distribution for the number of words in a text
fig = px.histogram(x = tweets_length, nbins = 50)
# Obtain the total words present in the dataset
list_of_words = []
for i in stock_df['Text Without Punc & Stopwords']:
for j in i:
list_of_words.append(j)
# Obtain the total number of unique words
total_words = len(list(set(list_of_words)))
total_words
# split the data into test and train
X = stock_df['Text Without Punc & Stopwords']
y = stock_df['Sentiment']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
# Create a tokenizer to tokenize the words and create sequences of tokenized words
tokenizer = Tokenizer(num_words = total_words)
tokenizer.fit_on_texts(X_train)
# Training data
train_sequences = tokenizer.texts_to_sequences(X_train)
# Testing data
test_sequences = tokenizer.texts_to_sequences(X_test)
print("The encoding for document\n", X_train[1:2],"\n is: ", train_sequences[1])
# Add padding to training and testing
padded_train = pad_sequences(train_sequences, maxlen = 29, padding = 'post', truncating = 'post')
padded_test = pad_sequences(test_sequences, maxlen = 29, truncating = 'post')
for i, doc in enumerate(padded_train[:3]):
print("The padded encoding for document:", i+1," is:", doc)
# Convert the data to categorical 2D representation
y_train_cat = to_categorical(y_train, 2)
y_test_cat = to_categorical(y_test, 2)
# Add padding to training and testing
padded_train = pad_sequences(train_sequences, maxlen = 15, padding = 'post', truncating = 'post')
padded_test = pad_sequences(test_sequences, maxlen = 15, truncating = 'post')
# Sequential Model
model = Sequential()
# embedding layer
model.add(Embedding(total_words, output_dim = 512))
# Bi-Directional RNN and LSTM
model.add(LSTM(256))
# Dense layers
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.3))
model.add(Dense(2,activation = 'softmax'))
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['acc'])
model.summary()
# train the model
model.fit(padded_train, y_train_cat, batch_size = 32, validation_split = 0.2, epochs = 2)
# make prediction
pred = model.predict(padded_test)
# make prediction
prediction = []
for i in pred:
prediction.append(np.argmax(i))
# list containing original values
original = []
for i in y_test_cat:
original.append(np.argmax(i))
# acuracy score on text data
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(original, prediction)
accuracy
# Plot the confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(original, prediction)
sns.heatmap(cm, annot = True)
|
[
"pandas.read_csv",
"nltk.download",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.Dense",
"gensim.utils.simple_preprocess",
"matplotlib.pyplot.imshow",
"nltk.corpus.stopwords.words",
"tensorflow.keras.models.Sequential",
"sklearn.metrics.confusion_matrix",
"tensorflow.keras.utils.to_categorical",
"plotly.express.histogram",
"nltk.word_tokenize",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.Dropout",
"numpy.argmax",
"seaborn.heatmap",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.preprocessing.text.Tokenizer",
"sklearn.metrics.accuracy_score",
"wordcloud.WordCloud",
"matplotlib.pyplot.figure"
] |
[((925, 959), 'pandas.read_csv', 'pd.read_csv', (['"""stock_sentiment.csv"""'], {}), "('stock_sentiment.csv')\n", (936, 959), True, 'import pandas as pd\n'), ((2040, 2066), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (2053, 2066), False, 'import nltk\n'), ((2068, 2094), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2083, 2094), False, 'from nltk.corpus import stopwords\n'), ((2187, 2213), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2202, 2213), False, 'from nltk.corpus import stopwords\n'), ((3306, 3334), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (3316, 3334), True, 'import matplotlib.pyplot as plt\n'), ((3500, 3540), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wc'], {'interpolation': '"""bilinear"""'}), "(wc, interpolation='bilinear')\n", (3510, 3540), True, 'import matplotlib.pyplot as plt\n'), ((3547, 3569), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (3560, 3569), False, 'import nltk\n'), ((4296, 4335), 'plotly.express.histogram', 'px.histogram', ([], {'x': 'tweets_length', 'nbins': '(50)'}), '(x=tweets_length, nbins=50)\n', (4308, 4335), True, 'import plotly.express as px\n'), ((4818, 4855), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (4834, 4855), False, 'from sklearn.model_selection import train_test_split\n'), ((4955, 4987), 'tensorflow.keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'total_words'}), '(num_words=total_words)\n', (4964, 4987), False, 'from tensorflow.keras.preprocessing.text import one_hot, Tokenizer\n'), ((5309, 5385), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sequences'], {'maxlen': '(29)', 'padding': '"""post"""', 'truncating': '"""post"""'}), "(train_sequences, maxlen=29, padding='post', truncating='post')\n", (5322, 5385), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5407, 5466), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_sequences'], {'maxlen': '(29)', 'truncating': '"""post"""'}), "(test_sequences, maxlen=29, truncating='post')\n", (5420, 5466), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5649, 5675), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_train', '(2)'], {}), '(y_train, 2)\n', (5663, 5675), False, 'from tensorflow.keras.utils import to_categorical\n'), ((5690, 5715), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (['y_test', '(2)'], {}), '(y_test, 2)\n', (5704, 5715), False, 'from tensorflow.keras.utils import to_categorical\n'), ((5771, 5847), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['train_sequences'], {'maxlen': '(15)', 'padding': '"""post"""', 'truncating': '"""post"""'}), "(train_sequences, maxlen=15, padding='post', truncating='post')\n", (5784, 5847), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5869, 5928), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['test_sequences'], {'maxlen': '(15)', 'truncating': '"""post"""'}), "(test_sequences, maxlen=15, truncating='post')\n", (5882, 5928), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((5962, 5974), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5972, 5974), False, 'from tensorflow.keras.models import Sequential\n'), ((6785, 6821), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['original', 'prediction'], {}), '(original, prediction)\n', (6799, 6821), False, 'from sklearn.metrics import accuracy_score\n'), ((6913, 6951), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['original', 'prediction'], {}), '(original, prediction)\n', (6929, 6951), False, 'from sklearn.metrics import confusion_matrix\n'), ((6953, 6980), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)'}), '(cm, annot=True)\n', (6964, 6980), True, 'import seaborn as sns\n'), ((2595, 2631), 'gensim.utils.simple_preprocess', 'gensim.utils.simple_preprocess', (['text'], {}), '(text)\n', (2625, 2631), False, 'import gensim\n'), ((3694, 3765), 'nltk.word_tokenize', 'nltk.word_tokenize', (["stock_df['Text Without Punc & Stopwords Joined'][0]"], {}), "(stock_df['Text Without Punc & Stopwords Joined'][0])\n", (3712, 3765), False, 'import nltk\n'), ((3973, 3996), 'nltk.word_tokenize', 'nltk.word_tokenize', (['doc'], {}), '(doc)\n', (3991, 3996), False, 'import nltk\n'), ((6007, 6045), 'tensorflow.keras.layers.Embedding', 'Embedding', (['total_words'], {'output_dim': '(512)'}), '(total_words, output_dim=512)\n', (6016, 6045), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((6093, 6102), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(256)'], {}), '(256)\n', (6097, 6102), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((6133, 6162), 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (6138, 6162), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((6177, 6189), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (6184, 6189), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((6202, 6232), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (6207, 6232), False, 'from tensorflow.keras.layers import Dense, Flatten, Embedding, Input, LSTM, Conv1D, MaxPool1D, Bidirectional, Dropout\n'), ((3344, 3393), 'wordcloud.WordCloud', 'WordCloud', ([], {'max_words': '(1000)', 'width': '(1600)', 'height': '(800)'}), '(max_words=1000, width=1600, height=800)\n', (3353, 3393), False, 'from wordcloud import WordCloud, STOPWORDS\n'), ((4147, 4168), 'nltk.word_tokenize', 'nltk.word_tokenize', (['x'], {}), '(x)\n', (4165, 4168), False, 'import nltk\n'), ((6578, 6590), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (6587, 6590), True, 'import numpy as np\n'), ((6683, 6695), 'numpy.argmax', 'np.argmax', (['i'], {}), '(i)\n', (6692, 6695), True, 'import numpy as np\n')]
|
from ...isa.inst import *
import numpy as np
class Vwmacc_vv(Inst):
name = 'vwmacc.vv'
# vwmacc.vv vd, vs1, vs2, vm
def golden(self):
if self['vl']==0:
return self['ori']
result = self['ori'].copy()
maskflag = 1 if 'mask' in self else 0
vstart = self['vstart'] if 'vstart' in self else 0
for ii in range(vstart, self['vl']):
if (maskflag == 0) or (maskflag == 1 and np.unpackbits(self['mask'], bitorder='little')[ii] ):
result[ii] = self['vs2'][ii].astype(object) * self['vs1'][ii]+ self['ori'][ii].astype(object)
return result
class Vwmaccu_vv(Vwmacc_vv):
name = 'vwmaccu.vv'
class Vwmaccsu_vv(Vwmacc_vv):
name = 'vwmaccsu.vv'
|
[
"numpy.unpackbits"
] |
[((455, 501), 'numpy.unpackbits', 'np.unpackbits', (["self['mask']"], {'bitorder': '"""little"""'}), "(self['mask'], bitorder='little')\n", (468, 501), True, 'import numpy as np\n')]
|
import collections
import re
import numpy
import pytest
import random
import time
import nidaqmx
from nidaqmx.constants import (
AcquisitionType, BusType, RegenerationMode)
from nidaqmx.error_codes import DAQmxErrors
from nidaqmx.utils import flatten_channel_string
from nidaqmx.tests.fixtures import x_series_device
from nidaqmx.tests.helpers import generate_random_seed
class TestWriteExceptions(object):
"""
Contains a collection of pytest tests that validate the Write error behavior
in the NI-DAQmx Python API.
These tests use only a single X Series device by utilizing the internal
loopback routes on the device.
"""
def test_overwrite(self, x_series_device):
# USB streaming is very tricky.
if not (x_series_device.bus_type == BusType.PCIE or x_series_device.bus_type == BusType.PXIE):
pytest.skip("Requires a plugin device.")
number_of_samples = 100
sample_rate = 1000
fifo_size = 8191
host_buffer_size = 1000
with nidaqmx.Task() as write_task:
samp_clk_terminal = '/{0}/Ctr0InternalOutput'.format(
x_series_device.name)
write_task.ao_channels.add_ao_voltage_chan(
x_series_device.ao_physical_chans[0].name, max_val=10, min_val=-10)
write_task.timing.cfg_samp_clk_timing(
sample_rate, source=samp_clk_terminal, sample_mode=AcquisitionType.CONTINUOUS,
samps_per_chan=number_of_samples)
# Don't allow regeneration - this enables explicit hardware flow control.
write_task.out_stream.regen_mode = RegenerationMode.DONT_ALLOW_REGENERATION
# This is the only entrypoint that correctly sets number_of_samples_written in error
# conditions prior to DAQmx 21.8.
writer = nidaqmx.stream_writers.AnalogUnscaledWriter(write_task.out_stream, auto_start=False)
# Fill up the host buffer first.
initial_write_data = numpy.zeros((1, host_buffer_size), dtype=numpy.int16)
writer.write_int16(initial_write_data)
# Start the write task. All data from the host buffer should be in the FIFO.
write_task.start()
# Now write more data than can fit in the FIFO + host buffer.
large_write_data = numpy.zeros((1, fifo_size*2), dtype=numpy.int16)
with pytest.raises(nidaqmx.DaqWriteError) as timeout_exception:
writer.write_int16(large_write_data, timeout=2.0)
assert timeout_exception.value.error_code == DAQmxErrors.SAMPLES_CAN_NOT_YET_BE_WRITTEN
# Some of the data should have been written successfully. This test doesn't
# need to get into the nitty gritty device details on how much.
assert timeout_exception.value.samps_per_chan_written > 0
def test_overwrite_during_prime(self, x_series_device):
# USB streaming is very tricky.
if not (x_series_device.bus_type == BusType.PCIE or x_series_device.bus_type == BusType.PXIE):
pytest.skip("Requires a plugin device.")
number_of_samples = 100
sample_rate = 1000
fifo_size = 8191
host_buffer_size = 1000
total_buffer_size = fifo_size + host_buffer_size
with nidaqmx.Task() as write_task:
samp_clk_terminal = '/{0}/Ctr0InternalOutput'.format(
x_series_device.name)
write_task.ao_channels.add_ao_voltage_chan(
x_series_device.ao_physical_chans[0].name, max_val=10, min_val=-10)
write_task.timing.cfg_samp_clk_timing(
sample_rate, source=samp_clk_terminal, sample_mode=AcquisitionType.CONTINUOUS,
samps_per_chan=number_of_samples)
# Don't allow regeneration - this enables explicit hardware flow control.
write_task.out_stream.regen_mode = RegenerationMode.DONT_ALLOW_REGENERATION
# Make the host buffer small.
write_task.out_stream.output_buf_size = number_of_samples
# This is the only entrypoint that correctly sets number_of_samples_written in error
# conditions prior to DAQmx 21.8.
writer = nidaqmx.stream_writers.AnalogUnscaledWriter(write_task.out_stream, auto_start=False)
# This is more data than can be primed, so this should fail.
initial_write_data = numpy.zeros((1, total_buffer_size*2), dtype=numpy.int16)
with pytest.raises(nidaqmx.DaqWriteError) as timeout_exception:
writer.write_int16(initial_write_data)
assert timeout_exception.value.error_code == DAQmxErrors.NO_MORE_SPACE
# The driver detects that the write will fail immediately, so no data was written.
assert timeout_exception.value.samps_per_chan_written == 0
|
[
"nidaqmx.Task",
"nidaqmx.stream_writers.AnalogUnscaledWriter",
"numpy.zeros",
"pytest.raises",
"pytest.skip"
] |
[((859, 899), 'pytest.skip', 'pytest.skip', (['"""Requires a plugin device."""'], {}), "('Requires a plugin device.')\n", (870, 899), False, 'import pytest\n'), ((1031, 1045), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (1043, 1045), False, 'import nidaqmx\n'), ((1842, 1930), 'nidaqmx.stream_writers.AnalogUnscaledWriter', 'nidaqmx.stream_writers.AnalogUnscaledWriter', (['write_task.out_stream'], {'auto_start': '(False)'}), '(write_task.out_stream,\n auto_start=False)\n', (1885, 1930), False, 'import nidaqmx\n'), ((2006, 2059), 'numpy.zeros', 'numpy.zeros', (['(1, host_buffer_size)'], {'dtype': 'numpy.int16'}), '((1, host_buffer_size), dtype=numpy.int16)\n', (2017, 2059), False, 'import numpy\n'), ((2338, 2388), 'numpy.zeros', 'numpy.zeros', (['(1, fifo_size * 2)'], {'dtype': 'numpy.int16'}), '((1, fifo_size * 2), dtype=numpy.int16)\n', (2349, 2388), False, 'import numpy\n'), ((3080, 3120), 'pytest.skip', 'pytest.skip', (['"""Requires a plugin device."""'], {}), "('Requires a plugin device.')\n", (3091, 3120), False, 'import pytest\n'), ((3309, 3323), 'nidaqmx.Task', 'nidaqmx.Task', ([], {}), '()\n', (3321, 3323), False, 'import nidaqmx\n'), ((4232, 4320), 'nidaqmx.stream_writers.AnalogUnscaledWriter', 'nidaqmx.stream_writers.AnalogUnscaledWriter', (['write_task.out_stream'], {'auto_start': '(False)'}), '(write_task.out_stream,\n auto_start=False)\n', (4275, 4320), False, 'import nidaqmx\n'), ((4424, 4482), 'numpy.zeros', 'numpy.zeros', (['(1, total_buffer_size * 2)'], {'dtype': 'numpy.int16'}), '((1, total_buffer_size * 2), dtype=numpy.int16)\n', (4435, 4482), False, 'import numpy\n'), ((2404, 2440), 'pytest.raises', 'pytest.raises', (['nidaqmx.DaqWriteError'], {}), '(nidaqmx.DaqWriteError)\n', (2417, 2440), False, 'import pytest\n'), ((4498, 4534), 'pytest.raises', 'pytest.raises', (['nidaqmx.DaqWriteError'], {}), '(nidaqmx.DaqWriteError)\n', (4511, 4534), False, 'import pytest\n')]
|
import io
import os
import time
import urllib.request
import zipfile
import numpy as np
from scipy.io.wavfile import read as wav_read
from tqdm import tqdm
class dclde:
"""
The high-frequency dataset consists of marked encounters with echolocation
clicks of species commonly found along the US Atlantic Coast, and in the
Gulf of Mexico:
Mesoplodon europaeus - Gervais' beaked whale
Ziphius cavirostris - Cuvier's beaked whale
Mesoplodon bidens - Sowerby's beaked whale
Lagenorhynchus acutus - Atlantic white-sided dolphin
Grampus griseus - Risso's dolphin
Globicephala macrorhynchus - Short-finned pilot whale
Stenella sp. - Stenellid dolphins
Delphinid type A
Delphinid type B
Unidentified delphinid - delphinid other than those described above
The goal for these datasets is to identify acoustic encounters by species
during times when animals were echolocating. Analysts examined data for
echolocation clicks and approximated the start and end times of acoustic
encounters. Any period that was separated from another one by five minutes
or more was marked as a separate encounter. Whistle activity was not
considered. Consequently, while the use of whistle information during
echolocation activity is appropriate, reporting a species based on whistles
in the absence of echolocation activity will be considered a false positive
for this classification task.
"""
def download(path):
"""ToDo"""
# Load the dataset (download if necessary) and set
# the class attributes.
print("Loading DCLDE")
t = time.time()
if not os.path.isdir(path + "DCLDE"):
print("\tCreating Directory")
os.mkdir(path + "DCLDE")
if not os.path.exists(path + "DCLDE/DCLDE_LF_Dev.zip"):
url = "http://sabiod.univ-tln.fr/workspace/DCLDE2018/DCLDE_LF_Dev.zip"
with DownloadProgressBar(
unit="B", unit_scale=True, miniters=1, desc="Wav files"
) as t:
urllib.request.urlretrieve(url, path + "DCLDE/DCLDE_LF_Dev.zip")
def load(window_size=441000, path=None):
"""ToDo"""
if path is None:
path = os.environ["DATASET_path"]
dclde.download(path)
# Loading the files
f = zipfile.ZipFile(path + "DCLDE/DCLDE_LF_Dev.zip")
wavs = list()
# labels = list()
for zipf in tqdm(f.filelist, ascii=True):
if ".wav" in zipf.filename and ".d100." in zipf.filename:
wavfile = f.read(zipf)
byt = io.BytesIO(wavfile)
wav = wav_read(byt)[1].astype("float32")
for s in range(len(wav) // window_size):
wavs.append(wav[s * window_size : (s + 1) * window_size])
# labels.append(zipf.filename.split('/')[2])
# return wavs,labels
wavs = np.expand_dims(np.asarray(wavs), 1)
dataset.add_variable({"signals": {"train_set": wavs}})
print(
"Dataset freefield1010 loaded in", "{0:.2f}".format(time.time() - t), "s."
)
return dataset
|
[
"os.path.exists",
"zipfile.ZipFile",
"tqdm.tqdm",
"numpy.asarray",
"io.BytesIO",
"os.path.isdir",
"scipy.io.wavfile.read",
"os.mkdir",
"time.time"
] |
[((1646, 1657), 'time.time', 'time.time', ([], {}), '()\n', (1655, 1657), False, 'import time\n'), ((2348, 2396), 'zipfile.ZipFile', 'zipfile.ZipFile', (["(path + 'DCLDE/DCLDE_LF_Dev.zip')"], {}), "(path + 'DCLDE/DCLDE_LF_Dev.zip')\n", (2363, 2396), False, 'import zipfile\n'), ((2469, 2497), 'tqdm.tqdm', 'tqdm', (['f.filelist'], {'ascii': '(True)'}), '(f.filelist, ascii=True)\n', (2473, 2497), False, 'from tqdm import tqdm\n'), ((1674, 1703), 'os.path.isdir', 'os.path.isdir', (["(path + 'DCLDE')"], {}), "(path + 'DCLDE')\n", (1687, 1703), False, 'import os\n'), ((1759, 1783), 'os.mkdir', 'os.mkdir', (["(path + 'DCLDE')"], {}), "(path + 'DCLDE')\n", (1767, 1783), False, 'import os\n'), ((1799, 1846), 'os.path.exists', 'os.path.exists', (["(path + 'DCLDE/DCLDE_LF_Dev.zip')"], {}), "(path + 'DCLDE/DCLDE_LF_Dev.zip')\n", (1813, 1846), False, 'import os\n'), ((2968, 2984), 'numpy.asarray', 'np.asarray', (['wavs'], {}), '(wavs)\n', (2978, 2984), True, 'import numpy as np\n'), ((2630, 2649), 'io.BytesIO', 'io.BytesIO', (['wavfile'], {}), '(wavfile)\n', (2640, 2649), False, 'import io\n'), ((3132, 3143), 'time.time', 'time.time', ([], {}), '()\n', (3141, 3143), False, 'import time\n'), ((2672, 2685), 'scipy.io.wavfile.read', 'wav_read', (['byt'], {}), '(byt)\n', (2680, 2685), True, 'from scipy.io.wavfile import read as wav_read\n')]
|
#This file will generate functions in polynomials
import numpy as np
import random
import matplotlib.pyplot as plt
class generateFunctions():
#the initial function taking 4 inputs
def __init__(self, x_vector, high_degree_vector, rangeLow, rangeHigh):
#the input processing
self.x_vector = x_vector
self.high_degree_vector = high_degree_vector
self.rangeLow = rangeLow
self.rangeHigh = rangeHigh
self.functionString = ""
def generate(self):
#allowed values for the highest degree and others can be zeros
allowed_values = list(range(self.rangeLow,self.rangeHigh))
allowed_values.remove(0)
for i in range(len(self.x_vector)):
highestVar = self.high_degree_vector[i]
ppar = np.random.randint(low=self.rangeLow,high=self.rangeHigh,size=(highestVar+1))
#make sure the highest is not zero coefficient
if ppar[0] == 0:
ppar[0] = random.choice(allowed_values)
for j in range(len(ppar)):
add = ""
if ppar[j] != 0:
add = str(ppar[j])
if (highestVar-j) != 0:
add = add +"*"+self.x_vector[i]
if(highestVar-j)!=1:
add = add +"^"+str(highestVar-j)
if ppar[j] > 0:
add = "+" + add
self.functionString = self.functionString + add
return self.functionString
#p = generateFunctions()
#function = p.generate()
#print(function)
|
[
"numpy.random.randint",
"random.choice"
] |
[((710, 788), 'numpy.random.randint', 'np.random.randint', ([], {'low': 'self.rangeLow', 'high': 'self.rangeHigh', 'size': '(highestVar + 1)'}), '(low=self.rangeLow, high=self.rangeHigh, size=highestVar + 1)\n', (727, 788), True, 'import numpy as np\n'), ((872, 901), 'random.choice', 'random.choice', (['allowed_values'], {}), '(allowed_values)\n', (885, 901), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 15 17:22:01 2020
@author: Kamil
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import morse_decoder
import iir_filter
class RealtimeWindow:
def __init__(self, channel: str):
# create a plot window
self.fig, (self.ax, self.ax1)= plt.subplots(2)
plt.title(f"Channel: {channel}")
self.ax.set_title('Lumiannce')
self.ax1.set_title('Filtered Signal')
self.plotbuffer = np.zeros(800)
self.plotbuffer1 = np.zeros(800)
# Create empty lines
line, = self.ax.plot(self.plotbuffer)
line2, = self.ax1.plot(self.plotbuffer1)
self.line = [line, line2]
# Set axis limits
self.ax.set_ylim(-1, 1)
self.ax1.set_ylim(-1, 1)
# Initalize Ringbuffers
self.ringbuffer = []
self.ringbuffer1 = []
# add any initialisation code here (filters etc)
# start the animation
self.decodedSequence = ''
# Design High Pass filter
samplingFrequency = 30
# Define cut off frequency
cutOfFrequencyHighPass = 0.1 #Hz
# Number order that IIR filter array will be equivalent to
order = 2
# Genereate second order sections
sos = np.array(iir_filter.GenerateHighPassCoeff( cutOfFrequencyHighPass, samplingFrequency, order ))
# Initalize morse code decoder object
self.decoder = morse_decoder.MorseCodeDecoder()
# Create IIR Array object
self.iirFilter = iir_filter.IIRFilter(sos)
# Initialize filter output variable
self.filterOutput = 0
# start the animation
self.ani = animation.FuncAnimation(self.fig, self.update, interval=100)
# updates the plot
def update(self, data):
# add new data to the buffer
self.plotbuffer = np.append(self.plotbuffer, self.ringbuffer)
self.plotbuffer1 = np.append(self.plotbuffer1, self.ringbuffer1)
# only keep the 500 newest ones and discard the old ones
self.plotbuffer = self.plotbuffer[-800:]
self.plotbuffer1 = self.plotbuffer1[-800:]
self.ringbuffer = []
self.ringbuffer1 = []
# set the new 500 points of channel 9
self.line[0].set_ydata(self.plotbuffer)
self.line[1].set_ydata(self.plotbuffer1)
self.ax.set_ylim((min(self.plotbuffer)-1), max(self.plotbuffer)+1)
self.ax1.set_ylim((min(self.plotbuffer1)-1), max(self.plotbuffer1)+1)
# Update the decoded sequence
self.ax.set_title('Lumiannce - Detected Sequence: '+ self.decoder.morseSequence)
self.ax1.set_title('Filtered Signal - Decoded Sequence: '+ self.decoder.decodedLetters)
return self.line
# appends data to the ringbuffer
def addData(self, signal):
self.ringbuffer.append(signal)
self.filterOutput = self.iirFilter.Filter(signal)
self.ringbuffer1.append(self.filterOutput)
self.decoder.Detect(self.filterOutput)
|
[
"matplotlib.animation.FuncAnimation",
"iir_filter.GenerateHighPassCoeff",
"numpy.append",
"numpy.zeros",
"iir_filter.IIRFilter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"morse_decoder.MorseCodeDecoder"
] |
[((349, 364), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {}), '(2)\n', (361, 364), True, 'import matplotlib.pyplot as plt\n'), ((373, 405), 'matplotlib.pyplot.title', 'plt.title', (['f"""Channel: {channel}"""'], {}), "(f'Channel: {channel}')\n", (382, 405), True, 'import matplotlib.pyplot as plt\n'), ((517, 530), 'numpy.zeros', 'np.zeros', (['(800)'], {}), '(800)\n', (525, 530), True, 'import numpy as np\n'), ((558, 571), 'numpy.zeros', 'np.zeros', (['(800)'], {}), '(800)\n', (566, 571), True, 'import numpy as np\n'), ((1498, 1530), 'morse_decoder.MorseCodeDecoder', 'morse_decoder.MorseCodeDecoder', ([], {}), '()\n', (1528, 1530), False, 'import morse_decoder\n'), ((1595, 1620), 'iir_filter.IIRFilter', 'iir_filter.IIRFilter', (['sos'], {}), '(sos)\n', (1615, 1620), False, 'import iir_filter\n'), ((1753, 1813), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['self.fig', 'self.update'], {'interval': '(100)'}), '(self.fig, self.update, interval=100)\n', (1776, 1813), True, 'import matplotlib.animation as animation\n'), ((1930, 1973), 'numpy.append', 'np.append', (['self.plotbuffer', 'self.ringbuffer'], {}), '(self.plotbuffer, self.ringbuffer)\n', (1939, 1973), True, 'import numpy as np\n'), ((2001, 2046), 'numpy.append', 'np.append', (['self.plotbuffer1', 'self.ringbuffer1'], {}), '(self.plotbuffer1, self.ringbuffer1)\n', (2010, 2046), True, 'import numpy as np\n'), ((1342, 1428), 'iir_filter.GenerateHighPassCoeff', 'iir_filter.GenerateHighPassCoeff', (['cutOfFrequencyHighPass', 'samplingFrequency', 'order'], {}), '(cutOfFrequencyHighPass, samplingFrequency,\n order)\n', (1374, 1428), False, 'import iir_filter\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import numpy as np
MAXLINE = 10000
MAXFRAME = 10000
def read_xyz(xyz,natoms):
#
fopen = open(xyz,'r')
frames = []
for i in range(MAXLINE):
line = fopen.readline()
if line.strip():
assert int(line.strip().split()[0]) == natoms
line = fopen.readline() # comment
forces, poses = [], []
for j in range(natoms):
line = fopen.readline() #
data = line.strip().split()
poses.append(data[1:4])
forces.append(data[4:7])
poses = np.array(poses,dtype=float)
forces = np.array(forces,dtype=float)
frames.append([poses,forces])
if not line:
break
else:
raise ValueError('Too many lines in %s' %xyz)
fopen.close()
return frames
def read_outcar(outcar='OUTCAR',natoms=100,verbose=True,wdat=False,**kwargs):
# how many steps to read
nframes = MAXFRAME
if kwargs:
if 'nframes' in kwargs.keys():
if kwargs['nframes'] > 0:
nframes = int(kwargs['nframes'])
# read OUTCAR
energy = []
free_energy = []
frames = []
fopen = open(outcar, 'r')
count, flag = 0, True
while flag:
line = fopen.readline()
if line.startswith(' POSITION'):
fopen.readline() # segment line ---...---
poses, forces = [], []
for n in range(natoms):
data = fopen.readline().strip().split()
poses.append(data[:3]) # x y z
forces.append(data[3:]) # fx fy fz
poses = np.array(poses, dtype=float)
forces = np.array(forces, dtype=float)
frames.append((poses, forces))
count += 1
if line.strip().startswith('FREE ENERGIE OF THE ION-ELECTRON SYSTEM'):
fopen.readline() # segment line ---...---
# free energy F
data = fopen.readline().strip().split()
free_energy.append(float(data[-2]))
fopen.readline() # blank line
# energy E0
data = fopen.readline().strip().split()
energy.append(float(data[-1]))
if count == nframes or (not line):
flag = False
fopen.close()
return frames, energy, free_energy
def read_poscar(poscar='POSCAR',format='vasp5',verbose=True):
"""read POSCAR"""
with open(poscar, 'r') as reader:
lines = reader.readlines()
lines = [line.strip().split() for line in lines]
fname = ' '.join(lines[0]) # file description
scaling = float(lines[1][0])
lattice = np.array(lines[2:5], dtype=float)
symbols = lines[5]
numbers = [int(i) for i in lines[6]]
natoms = np.sum(numbers)
dyntype = ' '.join(lines[7]) # dynamic type
coorsys = lines[8] # coordinate system
poses, fixes = [], []
for coord in lines[9:9+natoms]:
poses.append(coord[:3])
fixes.append(coord[3:])
poses = np.array(poses, dtype=float)
# TODO: read velocity
if verbose:
print('Successfully read POSCAR, taking it as the reference...')
return fname, scaling, lattice, symbols, numbers, poses, fixes
def write_xyz(symbols,lattice,positions,forces,energy):
"""
positions in cartesian (AA) and forces in eV/AA
energy is always force-consistent energy for training purpose
"""
natoms = len(symbols)
content = "{:<d}\n".format(natoms)
content += ("Lattice=\""+"{:<.2f} "*9+"\""+\
" Properties=species:S:1:pos:R:3:forces:R:3 pbc=\"T T T\""+\
" energy={:<12.6f}"+"\n") \
.format(*list(lattice),energy)
for i in range(natoms):
content += ('{:<4s} '+'{:>12.6f} '*6+'\n')\
.format(symbols[i],*list(positions[i]),*list(forces[i]))
return content
def find_outcars():
fname, scaling, lattice, formula, numbers, refposes, fixes = \
read_poscar('POSCAR')
symbols = []
for s, n in zip(formula, numbers):
symbols.extend([s]*n)
print(symbols)
#
outcars = []
cur_dir = './POSCARs/'
for cur_fname in os.listdir(cur_dir):
if cur_fname.startswith('OUTCAR_'):
outcars.append(cur_fname)
outcars.sort(key = lambda fname: int(fname.split('_')[-1]))
print(outcars)
outcars = outcars[1::3]
print(outcars)
content = ''
for outcar in outcars:
print(os.path.abspath(outcar))
outcar = os.path.join(cur_dir,outcar)
frames, energy, free_energy = read_outcar(outcar=outcar,natoms=np.sum(numbers))
positions, forces = frames[0][0], frames[0][1]
en = free_energy[0]
content += write_xyz(symbols,lattice.ravel(),positions,forces,en)
with open('data.xyz', 'w') as writer:
writer.write(content)
return
if __name__ == '__main__':
#frames = read_xyz('bonds.xyz',2)
#print(frames[0])
find_outcars()
|
[
"os.listdir",
"os.path.join",
"numpy.array",
"numpy.sum",
"os.path.abspath"
] |
[((2693, 2726), 'numpy.array', 'np.array', (['lines[2:5]'], {'dtype': 'float'}), '(lines[2:5], dtype=float)\n', (2701, 2726), True, 'import numpy as np\n'), ((2804, 2819), 'numpy.sum', 'np.sum', (['numbers'], {}), '(numbers)\n', (2810, 2819), True, 'import numpy as np\n'), ((3051, 3079), 'numpy.array', 'np.array', (['poses'], {'dtype': 'float'}), '(poses, dtype=float)\n', (3059, 3079), True, 'import numpy as np\n'), ((4201, 4220), 'os.listdir', 'os.listdir', (['cur_dir'], {}), '(cur_dir)\n', (4211, 4220), False, 'import os\n'), ((4537, 4566), 'os.path.join', 'os.path.join', (['cur_dir', 'outcar'], {}), '(cur_dir, outcar)\n', (4549, 4566), False, 'import os\n'), ((637, 665), 'numpy.array', 'np.array', (['poses'], {'dtype': 'float'}), '(poses, dtype=float)\n', (645, 665), True, 'import numpy as np\n'), ((686, 715), 'numpy.array', 'np.array', (['forces'], {'dtype': 'float'}), '(forces, dtype=float)\n', (694, 715), True, 'import numpy as np\n'), ((1688, 1716), 'numpy.array', 'np.array', (['poses'], {'dtype': 'float'}), '(poses, dtype=float)\n', (1696, 1716), True, 'import numpy as np\n'), ((1738, 1767), 'numpy.array', 'np.array', (['forces'], {'dtype': 'float'}), '(forces, dtype=float)\n', (1746, 1767), True, 'import numpy as np\n'), ((4495, 4518), 'os.path.abspath', 'os.path.abspath', (['outcar'], {}), '(outcar)\n', (4510, 4518), False, 'import os\n'), ((4637, 4652), 'numpy.sum', 'np.sum', (['numbers'], {}), '(numbers)\n', (4643, 4652), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012, <NAME>
# All rights reserved.
# This file is part of PyDSM.
# PyDSM is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PyDSM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PyDSM. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
from numpy.testing import TestCase, run_module_suite
import numpy as np
from pkg_resources import resource_stream
from pydsm.delsig import simulateDSM
__all__ = ["TestSimulateDSM"]
class TestSimulateDSM(TestCase):
def setUp(self):
pass
def test_default(self):
f = resource_stream('pydsm.delsig',
'tests/Data/test_simulateDSM_0.npz')
d = np.load(f)['arr_0']
f.close()
# Take H as in H = synthesizeNTF(5, 32, 1)
H = (np.array([0.99604531+0.08884669j, 0.99604531-0.08884669j,
0.99860302+0.05283948j, 0.99860302-0.05283948j,
1.00000000+0.j]),
np.array([0.80655696+0.11982271j, 0.80655696-0.11982271j,
0.89807098+0.21981939j, 0.89807098-0.21981939j,
0.77776708+0.j]),
1)
N = 8192
f = 85
u = 0.5*np.sin(2.*np.pi*f/N*np.arange(N))
v, d1, d2, d3 = simulateDSM(u, H)
np.testing.assert_equal(v, d)
if __name__ == '__main__':
run_module_suite()
|
[
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.array",
"pydsm.delsig.simulateDSM",
"numpy.testing.run_module_suite",
"numpy.load",
"pkg_resources.resource_stream"
] |
[((1841, 1859), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (1857, 1859), False, 'from numpy.testing import TestCase, run_module_suite\n'), ((1063, 1131), 'pkg_resources.resource_stream', 'resource_stream', (['"""pydsm.delsig"""', '"""tests/Data/test_simulateDSM_0.npz"""'], {}), "('pydsm.delsig', 'tests/Data/test_simulateDSM_0.npz')\n", (1078, 1131), False, 'from pkg_resources import resource_stream\n'), ((1753, 1770), 'pydsm.delsig.simulateDSM', 'simulateDSM', (['u', 'H'], {}), '(u, H)\n', (1764, 1770), False, 'from pydsm.delsig import simulateDSM\n'), ((1779, 1808), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['v', 'd'], {}), '(v, d)\n', (1802, 1808), True, 'import numpy as np\n'), ((1172, 1182), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1179, 1182), True, 'import numpy as np\n'), ((1274, 1405), 'numpy.array', 'np.array', (['[0.99604531 + 0.08884669j, 0.99604531 - 0.08884669j, 0.99860302 + \n 0.05283948j, 0.99860302 - 0.05283948j, 1.0 + 0.0j]'], {}), '([0.99604531 + 0.08884669j, 0.99604531 - 0.08884669j, 0.99860302 + \n 0.05283948j, 0.99860302 - 0.05283948j, 1.0 + 0.0j])\n', (1282, 1405), True, 'import numpy as np\n'), ((1459, 1597), 'numpy.array', 'np.array', (['[0.80655696 + 0.11982271j, 0.80655696 - 0.11982271j, 0.89807098 + \n 0.21981939j, 0.89807098 - 0.21981939j, 0.77776708 + 0.0j]'], {}), '([0.80655696 + 0.11982271j, 0.80655696 - 0.11982271j, 0.89807098 + \n 0.21981939j, 0.89807098 - 0.21981939j, 0.77776708 + 0.0j])\n', (1467, 1597), True, 'import numpy as np\n'), ((1715, 1727), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1724, 1727), True, 'import numpy as np\n')]
|
##Clustering script for CaM_Trials##
#clusters using HDBSCAN the last 1 microsecond of simulation
#uses rmsd to native of backbone (excluding flexible tails but including peptide) as distance metric
import mdtraj as md
import numpy as np
import matplotlib.pyplot as plt
import hdbscan
MIN_SAMPLES = 200 #determined from data/trial and error
#Calculate rmsd to native
def compute_rmsd_matrix(traj):
distances = np.empty((traj.n_frames, traj.n_frames))
for i in range(traj.n_frames):
distances[i] = md.rmsd(traj, traj, i, atom_indices=traj.top.select('backbone'))
return distances
#Determines the k-plot (helpful in determining MIN_SAMPLES)
def plot_k_dist(distances):
print('plotting dists')
s = np.sort(distances, axis=0)
counts = s[:, MIN_SAMPLES]
plt.plot(counts)
plt.xlabel('distance')
plt.ylabel('num_steps')
plt.savefig('k-dist.png')
#Clusters data using HDBSCAN
def make_clusters(native, traj):
distances = compute_rmsd_matrix(traj)
plot_k_dist(distances)
#clustering set up
clusterer = hdbscan.HDBSCAN(min_cluster_size=MIN_SAMPLES)
cluster_indices = clusterer.fit_predict(distances)
min_index = 0
max_index = np.max(cluster_indices) + 1
#clustering
clusters = [traj[np.where(cluster_indices == index)]
for index in range(min_index, max_index)]
clusters = sorted(clusters, key=lambda x: x.n_frames, reverse=True)
#now add the unclustered frames to last cluster
clusters.append(traj[np.where(cluster_indices == -1)])
cluster_sizes = [c.n_frames for c in clusters]
total_frames = traj.n_frames
print('Found {} total clusters.'.format(len(clusters)))
#calculates important values and outputs to files
for i, c in enumerate(clusters):
rmsds_to_native = md.rmsd(c, native)*10
mean = np.mean(rmsds_to_native)
median = np.median(rmsds_to_native)
min_ = np.min(rmsds_to_native)
max_ = np.max(rmsds_to_native)
std_ = np.std(rmsds_to_native)
np.savetxt("clusters_0"+str(i)+".dat", rmsds_to_native, fmt="%f")
print('Cluster {:02d} has population {:.1f}; RMSD: {:.2f} {:.2f} {:.2f} {:.2f} {:.2f}'.format(i, 100 * cluster_sizes[i] / float(total_frames), mean, median, min_, max_, std_))
c.save('cluster_{:02d}.pdb'.format(i))
#native struct
native = md.load('cam_fill.pdb')
native_indices = native.top.select('backbone and (resid 4 to 146 or resid>=149)')
#last 1 microsecond of simulation
traj = md.load_dcd('CaM_Trial3.dcd', top='trajectory_1.pdb')[-10000:]
traj_indices = traj.top.select('backbone and (resid 4 to 146 or resid >=149)')
#gets indices of subsection
ref = native.atom_slice(atom_indices=native_indices)
cam = traj.atom_slice(atom_indices = traj_indices)
make_clusters(ref, cam)
|
[
"numpy.mean",
"numpy.median",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"numpy.where",
"numpy.sort",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.std",
"numpy.max",
"mdtraj.load_dcd",
"numpy.empty",
"numpy.min",
"mdtraj.rmsd",
"mdtraj.load",
"hdbscan.HDBSCAN"
] |
[((2359, 2382), 'mdtraj.load', 'md.load', (['"""cam_fill.pdb"""'], {}), "('cam_fill.pdb')\n", (2366, 2382), True, 'import mdtraj as md\n'), ((418, 458), 'numpy.empty', 'np.empty', (['(traj.n_frames, traj.n_frames)'], {}), '((traj.n_frames, traj.n_frames))\n', (426, 458), True, 'import numpy as np\n'), ((729, 755), 'numpy.sort', 'np.sort', (['distances'], {'axis': '(0)'}), '(distances, axis=0)\n', (736, 755), True, 'import numpy as np\n'), ((792, 808), 'matplotlib.pyplot.plot', 'plt.plot', (['counts'], {}), '(counts)\n', (800, 808), True, 'import matplotlib.pyplot as plt\n'), ((813, 835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""distance"""'], {}), "('distance')\n", (823, 835), True, 'import matplotlib.pyplot as plt\n'), ((840, 863), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num_steps"""'], {}), "('num_steps')\n", (850, 863), True, 'import matplotlib.pyplot as plt\n'), ((868, 893), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""k-dist.png"""'], {}), "('k-dist.png')\n", (879, 893), True, 'import matplotlib.pyplot as plt\n'), ((1066, 1111), 'hdbscan.HDBSCAN', 'hdbscan.HDBSCAN', ([], {'min_cluster_size': 'MIN_SAMPLES'}), '(min_cluster_size=MIN_SAMPLES)\n', (1081, 1111), False, 'import hdbscan\n'), ((2507, 2560), 'mdtraj.load_dcd', 'md.load_dcd', (['"""CaM_Trial3.dcd"""'], {'top': '"""trajectory_1.pdb"""'}), "('CaM_Trial3.dcd', top='trajectory_1.pdb')\n", (2518, 2560), True, 'import mdtraj as md\n'), ((1202, 1225), 'numpy.max', 'np.max', (['cluster_indices'], {}), '(cluster_indices)\n', (1208, 1225), True, 'import numpy as np\n'), ((1843, 1867), 'numpy.mean', 'np.mean', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (1850, 1867), True, 'import numpy as np\n'), ((1885, 1911), 'numpy.median', 'np.median', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (1894, 1911), True, 'import numpy as np\n'), ((1927, 1950), 'numpy.min', 'np.min', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (1933, 1950), True, 'import numpy as np\n'), ((1966, 1989), 'numpy.max', 'np.max', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (1972, 1989), True, 'import numpy as np\n'), ((2005, 2028), 'numpy.std', 'np.std', (['rmsds_to_native'], {}), '(rmsds_to_native)\n', (2011, 2028), True, 'import numpy as np\n'), ((1268, 1302), 'numpy.where', 'np.where', (['(cluster_indices == index)'], {}), '(cluster_indices == index)\n', (1276, 1302), True, 'import numpy as np\n'), ((1508, 1539), 'numpy.where', 'np.where', (['(cluster_indices == -1)'], {}), '(cluster_indices == -1)\n', (1516, 1539), True, 'import numpy as np\n'), ((1806, 1824), 'mdtraj.rmsd', 'md.rmsd', (['c', 'native'], {}), '(c, native)\n', (1813, 1824), True, 'import mdtraj as md\n')]
|
import typing as t
import numpy as np
import pandas as pd
from house_prices_regression_model import __version__ as VERSION
from house_prices_regression_model.processing.data_manager import load_pipeline
from house_prices_regression_model.config.core import load_config_file, SETTINGS_PATH
from house_prices_regression_model.processing.data_validation import validate_inputs
# Config files
config = load_config_file(SETTINGS_PATH)
PIPELINE_ARTIFACT_NAME = config["PIPELINE_ARTIFACT_NAME"]
pipeline_file_name = f"{PIPELINE_ARTIFACT_NAME}_v{VERSION}.pkl"
cb_pipe = load_pipeline(file_name=pipeline_file_name)
#Function
def make_prediction(*,input_data: t.Union[pd.DataFrame, dict],) -> list:
"""Make a prediction using a saved model pipeline."""
df = pd.DataFrame(input_data)
validated_df, error_dict = validate_inputs(input_data=df)
errors_list = list(error_dict.values())
results = {'model_output': None}
if error_dict == {}:
log_predictions = cb_pipe.predict(validated_df)
predictions = [np.exp(pred) for pred in log_predictions]
results['model_output'] = predictions
else:
results['model_output'] = 'Errors making prediction:' + ' '.join(map(str, errors_list))
return results
|
[
"house_prices_regression_model.config.core.load_config_file",
"house_prices_regression_model.processing.data_manager.load_pipeline",
"numpy.exp",
"house_prices_regression_model.processing.data_validation.validate_inputs",
"pandas.DataFrame"
] |
[((400, 431), 'house_prices_regression_model.config.core.load_config_file', 'load_config_file', (['SETTINGS_PATH'], {}), '(SETTINGS_PATH)\n', (416, 431), False, 'from house_prices_regression_model.config.core import load_config_file, SETTINGS_PATH\n'), ((564, 607), 'house_prices_regression_model.processing.data_manager.load_pipeline', 'load_pipeline', ([], {'file_name': 'pipeline_file_name'}), '(file_name=pipeline_file_name)\n', (577, 607), False, 'from house_prices_regression_model.processing.data_manager import load_pipeline\n'), ((760, 784), 'pandas.DataFrame', 'pd.DataFrame', (['input_data'], {}), '(input_data)\n', (772, 784), True, 'import pandas as pd\n'), ((816, 846), 'house_prices_regression_model.processing.data_validation.validate_inputs', 'validate_inputs', ([], {'input_data': 'df'}), '(input_data=df)\n', (831, 846), False, 'from house_prices_regression_model.processing.data_validation import validate_inputs\n'), ((1032, 1044), 'numpy.exp', 'np.exp', (['pred'], {}), '(pred)\n', (1038, 1044), True, 'import numpy as np\n')]
|
import numpy as np
from amlearn.utils.basetest import AmLearnTest
from amlearn.utils.data import get_isometric_lists
class test_data(AmLearnTest):
def setUp(self):
pass
def test_get_isometric_lists(self):
test_lists= [[1, 2, 3], [4], [5, 6], [1, 2, 3]]
isometric_lists = \
get_isometric_lists(test_lists, limit_width=80, fill_value=0)
self.assertEqual(np.array(isometric_lists).shape, (4, 80))
test_arrays = np.array([np.array([1, 2, 3]), np.array([4]),
np.array([5, 6]), np.array([1, 2, 3])])
isometric_arrays = \
get_isometric_lists(test_arrays, limit_width=80, fill_value=0)
self.assertEqual(np.array(isometric_arrays).shape, (4, 80))
|
[
"numpy.array",
"amlearn.utils.data.get_isometric_lists"
] |
[((320, 381), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['test_lists'], {'limit_width': '(80)', 'fill_value': '(0)'}), '(test_lists, limit_width=80, fill_value=0)\n', (339, 381), False, 'from amlearn.utils.data import get_isometric_lists\n'), ((630, 692), 'amlearn.utils.data.get_isometric_lists', 'get_isometric_lists', (['test_arrays'], {'limit_width': '(80)', 'fill_value': '(0)'}), '(test_arrays, limit_width=80, fill_value=0)\n', (649, 692), False, 'from amlearn.utils.data import get_isometric_lists\n'), ((407, 432), 'numpy.array', 'np.array', (['isometric_lists'], {}), '(isometric_lists)\n', (415, 432), True, 'import numpy as np\n'), ((482, 501), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (490, 501), True, 'import numpy as np\n'), ((503, 516), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (511, 516), True, 'import numpy as np\n'), ((549, 565), 'numpy.array', 'np.array', (['[5, 6]'], {}), '([5, 6])\n', (557, 565), True, 'import numpy as np\n'), ((567, 586), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (575, 586), True, 'import numpy as np\n'), ((718, 744), 'numpy.array', 'np.array', (['isometric_arrays'], {}), '(isometric_arrays)\n', (726, 744), True, 'import numpy as np\n')]
|
import json
import torch
import numpy as np
import os
#from pytorch_pretrained_bert import BertTokenizer
from transformers import BertTokenizer
class BertWordFormatter:
def __init__(self, config, mode):
self.max_question_len = config.getint("data", "max_question_len")
self.max_option_len = config.getint("data", "max_option_len")
self.tokenizer = BertTokenizer.from_pretrained(config.get("model", "bert_path"))
def convert_tokens_to_ids(self, tokens):
arr = []
for a in range(0, len(tokens)):
if tokens[a] in self.word2id:
arr.append(self.word2id[tokens[a]])
else:
arr.append(self.word2id["UNK"])
return arr
def convert(self, tokens, l, bk=False):
tokens = "".join(tokens)
# while len(tokens) < l:
# tokens.append("PAD")
# if bk:
# tokens = tokens[len(tokens) - l:]
# else:
# tokens = tokens[:l]
ids = self.tokenizer.tokenize(tokens)
return ids
def _convert_sentence_pair_to_bert_dataset(
self, context, max_len):
"""Convert sentence pairs to dataset for BERT model.
Args:
sc_list, bc_list: List[List[str]], list of word tokens list
label_list: train: List[int], list of labels
test: []
Returns:
Train:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids, label)
Test:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids)
"""
all_input_ids, all_input_mask, all_segment_ids = [], [], []
for i, _ in enumerate(context):
if len(context[i]) > max_len:
context[i] = context[i][-max_len:]
tokens = ['[CLS]'] + context[i] + ['[SEP]']
segment_ids = [i%2] * len(tokens)
if len(tokens) > max_len:
tokens = tokens[:max_len]
assert len(tokens) == max_len
segment_ids = segment_ids[:max_len]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
tokens_len = len(input_ids)
input_ids += [0] * (max_len - tokens_len)
segment_ids += [0] * (max_len - tokens_len)
input_mask += [0] * (max_len - tokens_len)
all_input_ids.append(input_ids)
all_input_mask.append(input_mask)
all_segment_ids.append(segment_ids)
all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)
all_input_mask = torch.tensor(all_input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(all_segment_ids, dtype=torch.long)
# test
return (
all_input_ids, all_input_mask, all_segment_ids)
def process(self, data, config, mode, *args, **params):
context = []
question = []
label = []
idx = []
for temp_data in data:
idx.append(temp_data["id"])
if mode != "test":
# label_x = []
# for opt in list("ABCD"):
# if opt in temp_data["answer"]:
# label_x.append(1)
# else:
# label_x.append(0)
label_x = -1
if "A" in temp_data["answer"]:
label_x += 1
if "B" in temp_data["answer"]:
label_x += 2
if "C" in temp_data["answer"]:
label_x += 4
if "D" in temp_data["answer"]:
label_x += 8
label.append(label_x)
temp_context = []
temp_question = []
temp_question.append(self.convert(temp_data["statement"], self.max_question_len, bk=True))
for option in ["A", "B", "C", "D"]:
temp_context.append(self.convert(temp_data["option_list"][option], self.max_option_len))
context.extend(temp_context)
question.extend(temp_question)
# question = torch.tensor(question, dtype=torch.long)
# context = torch.tensor(context, dtype=torch.long)
question = self._convert_sentence_pair_to_bert_dataset(question, self.max_question_len)
context = self._convert_sentence_pair_to_bert_dataset(context, self.max_option_len)
if mode != "test":
label = torch.LongTensor(np.array(label, dtype=np.int))
return {"context": context, "question": question, 'label': label, "id": idx}
else:
return {"context": context, "question": question, "id": idx}
|
[
"torch.tensor",
"numpy.array"
] |
[((2646, 2691), 'torch.tensor', 'torch.tensor', (['all_input_ids'], {'dtype': 'torch.long'}), '(all_input_ids, dtype=torch.long)\n', (2658, 2691), False, 'import torch\n'), ((2717, 2763), 'torch.tensor', 'torch.tensor', (['all_input_mask'], {'dtype': 'torch.long'}), '(all_input_mask, dtype=torch.long)\n', (2729, 2763), False, 'import torch\n'), ((2790, 2837), 'torch.tensor', 'torch.tensor', (['all_segment_ids'], {'dtype': 'torch.long'}), '(all_segment_ids, dtype=torch.long)\n', (2802, 2837), False, 'import torch\n'), ((4584, 4613), 'numpy.array', 'np.array', (['label'], {'dtype': 'np.int'}), '(label, dtype=np.int)\n', (4592, 4613), True, 'import numpy as np\n')]
|
"""A filter block.
"""
import control
import numpy as np
import scipy
from .base import Block
class Filter(Block):
"""A Filter block class
This is simply a single-input-single-output LTI system defined by a
single TransferFunction object.
Parameters
----------
tf : control.TransferFunction
The transfer function of the filter (continuous).
fs : float
Sampling frequency in Hz.
method : str, optional
Method used to convert the continuous system to
discrete system.
Argument is passed to ``scipy.signal.cont2discrete``.
Defaults to "bilinear".
label : str, optional
Label for this filter.
Defaults to None.
Note
----
When ``inputs.setter`` the current input and output is saved into a register
for next cycle.
This means that calling ``inputs.setter`` indicates the end of a cycle.
"""
def __init__(self, tf, fs, method="bilinear", label=None):
"""Constructor
Parameters
----------
tf : control.TransferFunction
The transfer function of the filter (continuous).
fs : float
Sampling frequency in Hz.
method : str, optional
Method used to convert the continuous system to
discrete system.
Argument is passed to ``scipy.signal.cont2discrete``.
Defaults to "bilinear".
label : str, optional
Label for this filter.
Defaults to None.
"""
self._tf = None
self._fs = None
self._method = None
self._num_d = None
self._den_d = None
self._input_register = None
self._output_register = None
self.tf = tf
self.fs = fs
self.method = method
super().__init__(label=label)
def _i2o(self):
"""Pass input through filter and return the output"""
input_register = self.input_register
output_register = self.output_register
num_d = self.num_d
den_d = self.den_d
out = (np.dot(num_d, input_register)
- np.dot(den_d[1:], output_register[1:]))
return out
@property
def inputs(self):
"""Input of the block."""
return self._inputs
@inputs.setter
def inputs(self, _inputs):
"""input.setter"""
self._latch_output_register()
self._inputs = _inputs
self._latch_input_register()
@property
def tf(self):
"""The transfer function of the filter (continuous)."""
return self._tf
@tf.setter
def tf(self, _tf):
"""tf.setter"""
if not isinstance(_tf, control.TransferFunction):
raise TypeError("tf must be a TransferFunction object.")
if len(_tf.zero()) > len(_tf.pole()):
raise ValueError("tf must be a proper transfer function.")
if np.any(_tf.pole().real >= 0):
raise ValueError("tf must be a stable transfer function.")
self._tf = _tf
self._set_coefs()
self._reset_register()
@property
def fs(self):
"""Sampling frequency in Hz"""
return self._fs
@fs.setter
def fs(self, _fs):
"""fs.setter"""
self._fs = _fs
self._set_coefs()
self._reset_register()
@property
def method(self):
"""Method used to convert the continuous system discrete system."""
return self._method
@method.setter
def method(self, _method):
"""method.setter"""
self._method = _method
self._set_coefs()
self._reset_register()
@property
def num_d(self):
"""Discrete transfer function numerators"""
return self._num_d
@num_d.setter
def num_d(self, _num_d):
"""num_d.setter"""
self._num_d = _num_d
@property
def den_d(self):
"""Discrete transfer function denominator"""
return self._den_d
@den_d.setter
def den_d(self, _den_d):
"""den_d.setter"""
self._den_d = _den_d
@property
def input_register(self):
"""Input register (history of the input)"""
return self._input_register
@input_register.setter
def input_register(self, _input_register):
"""input_register.setter"""
self._input_register = _input_register
@property
def output_register(self):
"""output register (history of the output)"""
return self._output_register
@output_register.setter
def output_register(self, _output_register):
"""input_register.setter"""
self._output_register = _output_register
def _set_coefs(self):
"""Set discrete filter coefficients."""
if (self.tf is not None
and self.fs is not None
and self.method is not None):
# Set coefficients for discrete filters.
# Note: H(z) = (b0 + b1*z^1...)/(1 + a1*z^1...)
# print("set coefs")
num = self.tf.num[0][0]
den = self.tf.den[0][0]
dt = 1/self.fs
method = self.method
num_d, den_d, _ = scipy.signal.cont2discrete(
(num, den), dt=dt, method=method)
num_d = num_d.reshape(-1)
den_d = den_d.reshape(-1)
self.num_d = num_d
self.den_d = den_d
def _reset_register(self):
"""Reset the input/output register"""
if self.num_d is not None and self.den_d is not None:
self.input_register = np.zeros_like(self.num_d)
self.output_register = np.zeros_like(self.den_d)
def _latch_input_register(self):
"""Shift and then put input value into input register
"""
if self.input_register is not None:
self.input_register[1:] = self.input_register[:-1]
self.input_register[0] = self.inputs
def _latch_output_register(self):
"""Shift and then put input value into input register
"""
if self.output_register is not None:
self.output_register[0] = self.output
self.output_register[1:] = self.output_register[:-1]
|
[
"numpy.dot",
"numpy.zeros_like",
"scipy.signal.cont2discrete"
] |
[((2083, 2112), 'numpy.dot', 'np.dot', (['num_d', 'input_register'], {}), '(num_d, input_register)\n', (2089, 2112), True, 'import numpy as np\n'), ((2130, 2168), 'numpy.dot', 'np.dot', (['den_d[1:]', 'output_register[1:]'], {}), '(den_d[1:], output_register[1:])\n', (2136, 2168), True, 'import numpy as np\n'), ((5163, 5223), 'scipy.signal.cont2discrete', 'scipy.signal.cont2discrete', (['(num, den)'], {'dt': 'dt', 'method': 'method'}), '((num, den), dt=dt, method=method)\n', (5189, 5223), False, 'import scipy\n'), ((5553, 5578), 'numpy.zeros_like', 'np.zeros_like', (['self.num_d'], {}), '(self.num_d)\n', (5566, 5578), True, 'import numpy as np\n'), ((5614, 5639), 'numpy.zeros_like', 'np.zeros_like', (['self.den_d'], {}), '(self.den_d)\n', (5627, 5639), True, 'import numpy as np\n')]
|
import pandas as pd
import numpy as np
import xml.etree.ElementTree as ElementTree
from traffic_analysis.d00_utils.bbox_helpers import bboxcv2_to_bboxcvlib
from traffic_analysis.d05_evaluation.parse_annotation import parse_annotation
from traffic_analysis.d05_evaluation.compute_mean_average_precision import get_avg_precision_at_iou
class FrameLevelEvaluator:
"""
Conduct frame level evaluation for one video.
"""
def __init__(self,
videos_to_eval: pd.DataFrame,
frame_level_df: pd.DataFrame,
selected_labels: list
):
# data frames to work with
self.videos_to_eval = videos_to_eval
self.frame_level_df = frame_level_df
self.frame_level_ground_truth = pd.DataFrame({})
self.frame_level_preds = pd.DataFrame({})
# parameters
self.selected_labels = selected_labels
def evaluate(self) -> pd.DataFrame:
"""Compute mean average precision for each vehicle type on multiple videos
"""
self.frame_level_ground_truth = self.get_ground_truth()
self.frame_level_preds = self.filter_frame_level_df()
frame_level_map_dfs = []
for (gt_camera_id, gt_video_upload_datetime), ground_truth_df in \
self.frame_level_ground_truth.groupby(["camera_id", "video_upload_datetime"]):
# get corresponding predictions for this video
pred_df = self.frame_level_preds[(self.frame_level_preds["camera_id"] == gt_camera_id) &
(self.frame_level_preds["video_upload_datetime"] ==
gt_video_upload_datetime)].copy()
ground_truth_dict = self.reparse_bboxes_df(ground_truth_df,
include_confidence=False)
predicted_dict = self.reparse_bboxes_df(pred_df,
include_confidence=True,
bbox_format="cv2")
map_dict = self.compute_map_video(ground_truth_dict, predicted_dict)
map_df = pd.DataFrame.from_dict(map_dict,
orient="index",
columns=["mean_avg_precision"])
map_df["camera_id"] = gt_camera_id
map_df["video_upload_datetime"] = gt_video_upload_datetime
frame_level_map_dfs.append(map_df)
frame_level_map_df = pd.concat(frame_level_map_dfs,
axis=0)
frame_level_map_df.index.name = "vehicle_type"
frame_level_map_df.reset_index(inplace=True)
return frame_level_map_df
def filter_frame_level_df(self) -> pd.DataFrame:
"""
Get preds for videos which are in videos_to_eval
"""
frame_level_df_filt = pd.merge(left=self.videos_to_eval[['camera_id', 'video_upload_datetime']],
right=self.frame_level_df,
on=['camera_id', 'video_upload_datetime'],
how='inner')
zeros_mask = frame_level_df_filt.bboxes.apply(
lambda x: all(True if bbox_entry == 0.0 else False for bbox_entry in x))
frame_level_df_filt = (frame_level_df_filt[~zeros_mask]
.sort_values(by=["camera_id", "video_upload_datetime"])
.reset_index(drop=True))
return frame_level_df_filt
def get_ground_truth(self) -> pd.DataFrame:
"""Read in annotation xmls from paths stored in self.videos_to_eval
"""
frame_level_ground_truth_dfs = []
for idx, video in self.videos_to_eval.iterrows():
# get frame level ground truth
xml_root = ElementTree.parse(video["xml_path"]).getroot()
frame_level_ground_truth = parse_annotation(xml_root)
frame_level_ground_truth["camera_id"] = video["camera_id"]
frame_level_ground_truth["video_upload_datetime"] = video["video_upload_datetime"]
frame_level_ground_truth_dfs.append(frame_level_ground_truth)
frame_level_ground_truth = pd.concat(frame_level_ground_truth_dfs,
axis=0)
return frame_level_ground_truth
def reparse_bboxes_df(self,
df: pd.DataFrame,
include_confidence: bool,
bbox_format: str = "cvlib") -> dict:
"""Restructures dfs containing bboxes for each frame (i.e. frame level df,
ground truth df) to a dictionary of dictionaries. This format is what
compute_mean_average_precision.py functions take as input.
Args:
df: frame_level_df which contains bboxes corresponding to each frame of
a video.
include_confidence: If this df contains the confidence corresponding to
the bbox predictions, this should be specified (the
reparser will construct a sub-dict for this case)
bbox_format: cvlib is cvlib (xmin,ymin, xmin+width, ymin+height),
cv2 is (xmin,ymin,width,height)
Returns:
df as a nested dictionary
"""
# dict of dict of dicts, with outermost layer being the vehicle type
n_frames = df["frame_id"].nunique()
bboxes_np = np.array(df["bboxes"].values.tolist())
assert bboxes_np.shape[1] == 4
if bbox_format == "cv2":
# convert to format cvlib
bboxes_cvlib = pd.Series(bboxcv2_to_bboxcvlib(bboxes_np, vectorized=True).tolist()).values
df.loc[:, "bboxes"] = bboxes_cvlib
# initialize dictionaries to correct shape
if include_confidence:
df_as_dict = {
vehicle_type: {
"frame" + str(i): {"bboxes": [], "scores": []}
for i in range(n_frames)
}
for vehicle_type in self.selected_labels
}
else:
df_as_dict = {
vehicle_type: {"frame" + str(i): [] for i in range(n_frames)}
for vehicle_type in self.selected_labels
}
for (vehicle_type, frame_id), vehicle_frame_df in df.groupby(
["vehicle_type", "frame_id"]):
if vehicle_type not in self.selected_labels:
continue
frame_id = int(frame_id)
if include_confidence:
df_as_dict[vehicle_type]["frame" + str(frame_id)]["bboxes"] = \
vehicle_frame_df["bboxes"].tolist()
df_as_dict[vehicle_type]["frame" + str(frame_id)]["scores"] = \
vehicle_frame_df["confidence"].tolist()
else:
df_as_dict[vehicle_type]["frame" + str(frame_id)] = \
vehicle_frame_df["bboxes"].tolist()
return df_as_dict
def compute_map_video(self, ground_truth_dict, predicted_dict) -> dict:
""" Function computes the mean average precision for each vehicle type for a video
Args:
ground_truth_dict: ground_truth_df reparsed by reparse_bboxes_df
predicted_dict: frame_level_df reparsed by reparse_bboxes_df
Returns:
map_dict: dictionary with vehicle_types as keys and maps as values
"""
map_dict = {vehicle_type: -1.0 for vehicle_type in self.selected_labels}
for vehicle_type in self.selected_labels:
vehicle_gt_dict = ground_truth_dict[vehicle_type]
vehicle_pred_dict = predicted_dict[vehicle_type]
avg_precs = []
iou_thrs = []
# compute avg precision for 10 IOU thresholds from .5 to .95 (COCO challenge standard)
for idx, iou_thr in enumerate(np.linspace(0.5, 0.95, 10)):
data_dict = get_avg_precision_at_iou(
gt_bboxes=vehicle_gt_dict,
pred_bboxes=vehicle_pred_dict,
iou_thr=iou_thr,
)
avg_precs.append(data_dict["avg_prec"])
iou_thrs.append(iou_thr)
# avg the avg precision for each IOU value
mean_avg_precision = 100 * np.mean(avg_precs)
map_dict[vehicle_type] = mean_avg_precision
return map_dict
|
[
"traffic_analysis.d00_utils.bbox_helpers.bboxcv2_to_bboxcvlib",
"numpy.mean",
"traffic_analysis.d05_evaluation.parse_annotation.parse_annotation",
"xml.etree.ElementTree.parse",
"pandas.merge",
"pandas.DataFrame.from_dict",
"traffic_analysis.d05_evaluation.compute_mean_average_precision.get_avg_precision_at_iou",
"numpy.linspace",
"pandas.DataFrame",
"pandas.concat"
] |
[((781, 797), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (793, 797), True, 'import pandas as pd\n'), ((831, 847), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (843, 847), True, 'import pandas as pd\n'), ((2556, 2594), 'pandas.concat', 'pd.concat', (['frame_level_map_dfs'], {'axis': '(0)'}), '(frame_level_map_dfs, axis=0)\n', (2565, 2594), True, 'import pandas as pd\n'), ((2945, 3110), 'pandas.merge', 'pd.merge', ([], {'left': "self.videos_to_eval[['camera_id', 'video_upload_datetime']]", 'right': 'self.frame_level_df', 'on': "['camera_id', 'video_upload_datetime']", 'how': '"""inner"""'}), "(left=self.videos_to_eval[['camera_id', 'video_upload_datetime']],\n right=self.frame_level_df, on=['camera_id', 'video_upload_datetime'],\n how='inner')\n", (2953, 3110), True, 'import pandas as pd\n'), ((4297, 4344), 'pandas.concat', 'pd.concat', (['frame_level_ground_truth_dfs'], {'axis': '(0)'}), '(frame_level_ground_truth_dfs, axis=0)\n', (4306, 4344), True, 'import pandas as pd\n'), ((2188, 2273), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['map_dict'], {'orient': '"""index"""', 'columns': "['mean_avg_precision']"}), "(map_dict, orient='index', columns=['mean_avg_precision']\n )\n", (2210, 2273), True, 'import pandas as pd\n'), ((3993, 4019), 'traffic_analysis.d05_evaluation.parse_annotation.parse_annotation', 'parse_annotation', (['xml_root'], {}), '(xml_root)\n', (4009, 4019), False, 'from traffic_analysis.d05_evaluation.parse_annotation import parse_annotation\n'), ((8039, 8065), 'numpy.linspace', 'np.linspace', (['(0.5)', '(0.95)', '(10)'], {}), '(0.5, 0.95, 10)\n', (8050, 8065), True, 'import numpy as np\n'), ((8096, 8200), 'traffic_analysis.d05_evaluation.compute_mean_average_precision.get_avg_precision_at_iou', 'get_avg_precision_at_iou', ([], {'gt_bboxes': 'vehicle_gt_dict', 'pred_bboxes': 'vehicle_pred_dict', 'iou_thr': 'iou_thr'}), '(gt_bboxes=vehicle_gt_dict, pred_bboxes=\n vehicle_pred_dict, iou_thr=iou_thr)\n', (8120, 8200), False, 'from traffic_analysis.d05_evaluation.compute_mean_average_precision import get_avg_precision_at_iou\n'), ((8467, 8485), 'numpy.mean', 'np.mean', (['avg_precs'], {}), '(avg_precs)\n', (8474, 8485), True, 'import numpy as np\n'), ((3907, 3943), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (["video['xml_path']"], {}), "(video['xml_path'])\n", (3924, 3943), True, 'import xml.etree.ElementTree as ElementTree\n'), ((5769, 5817), 'traffic_analysis.d00_utils.bbox_helpers.bboxcv2_to_bboxcvlib', 'bboxcv2_to_bboxcvlib', (['bboxes_np'], {'vectorized': '(True)'}), '(bboxes_np, vectorized=True)\n', (5789, 5817), False, 'from traffic_analysis.d00_utils.bbox_helpers import bboxcv2_to_bboxcvlib\n')]
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2021, Trustworthy AI, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder(s) nor the names of any contributors
may be used to endorse or promote products derived from this software without
specific prior written permission. No license is granted to the trademarks of
the copyright holders even if such marks are included in this software.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
###############################################################################
# transforms.py
# Purpose: utilities for converting between carla and ROS coordinate systems
# Notes:
# to make sure my editor saves in utf-8 here is a nice character: é
###############################################################################
import math
import numpy
import tf
from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel
def carla_location_to_numpy_vector(carla_location):
"""
Convert a carla location to a ROS vector3
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS)
:param carla_location: the carla location
:type carla_location: carla.Location
:return: a numpy.array with 3 elements
:rtype: numpy.array
"""
return numpy.array([
carla_location.x,
-carla_location.y,
carla_location.z
])
def carla_location_to_ros_vector3(carla_location):
"""
Convert a carla location to a ROS vector3
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS)
:param carla_location: the carla location
:type carla_location: carla.Location
:return: a ROS vector3
:rtype: geometry_msgs.msg.Vector3
"""
ros_translation = Vector3()
ros_translation.x = carla_location.x
ros_translation.y = -carla_location.y
ros_translation.z = carla_location.z
return ros_translation
def carla_location_to_ros_point(carla_location):
"""
Convert a carla location to a ROS point
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS)
:param carla_location: the carla location
:type carla_location: carla.Location
:return: a ROS point
:rtype: geometry_msgs.msg.Point
"""
ros_point = Point()
ros_point.x = carla_location.x
ros_point.y = -carla_location.y
ros_point.z = carla_location.z
return ros_point
def numpy_quaternion_to_ros_quaternion(numpy_quaternion):
"""
Convert a quaternion from transforms to a ROS msg quaternion
:param numpy_quaternion: a numpy quaternion
:type numpy_quaternion: numpy.array
:return: a ROS quaternion
:rtype: geometry_msgs.msg.Quaternion
"""
ros_quaternion = Quaternion()
ros_quaternion.x = numpy_quaternion[0]
ros_quaternion.y = numpy_quaternion[1]
ros_quaternion.z = numpy_quaternion[2]
ros_quaternion.w = numpy_quaternion[3]
return ros_quaternion
def carla_rotation_to_RPY(carla_rotation):
"""
Convert a carla rotation to a roll, pitch, yaw tuple
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS).
Considers the conversion from degrees (carla) to radians (ROS).
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a tuple with 3 elements (roll, pitch, yaw)
:rtype: tuple
"""
roll = math.radians(carla_rotation.roll)
pitch = -math.radians(carla_rotation.pitch)
yaw = -math.radians(carla_rotation.yaw)
return (roll, pitch, yaw)
def carla_rotation_to_numpy_quaternion(carla_rotation):
"""
Convert a carla rotation to a numpy quaternion
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS).
Considers the conversion from degrees (carla) to radians (ROS).
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a numpy.array with 4 elements (quaternion)
:rtype: numpy.array
"""
roll, pitch, yaw = carla_rotation_to_RPY(carla_rotation)
quat = tf.transformations.quaternion_from_euler(roll, pitch, yaw)
return quat
def carla_rotation_to_ros_quaternion(carla_rotation):
"""
Convert a carla rotation to a ROS quaternion
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS).
Considers the conversion from degrees (carla) to radians (ROS).
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a ROS quaternion
:rtype: geometry_msgs.msg.Quaternion
"""
quat = carla_rotation_to_numpy_quaternion(carla_rotation)
ros_quaternion = numpy_quaternion_to_ros_quaternion(quat)
return ros_quaternion
def carla_rotation_to_numpy_rotation_matrix(carla_rotation):
"""
Convert a carla rotation to a ROS quaternion
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS).
Considers the conversion from degrees (carla) to radians (ROS).
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a numpy.array with 3x3 elements
:rtype: numpy.array
"""
roll, pitch, yaw = carla_rotation_to_RPY(carla_rotation)
numpy_array = tf.transformations.euler_matrix(roll, pitch, yaw)
rotation_matrix = numpy_array[:3, :3]
return rotation_matrix
def carla_rotation_to_directional_numpy_vector(carla_rotation):
"""
Convert a carla rotation (as orientation) into a numpy directional vector
ros_quaternion = np_quaternion_to_ros_quaternion(quat)
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a numpy.array with 3 elements as directional vector
representation of the orientation
:rtype: numpy.array
"""
rotation_matrix = carla_rotation_to_numpy_rotation_matrix(carla_rotation)
directional_vector = numpy.array([1, 0, 0])
rotated_directional_vector = rotation_matrix.dot(directional_vector)
return rotated_directional_vector
def carla_vector_to_ros_vector_rotated(carla_vector, carla_rotation):
"""
Rotate carla vector, return it as ros vector
:param carla_vector: the carla vector
:type carla_vector: carla.Vector3D
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: rotated ros vector
:rtype: Vector3
"""
rotation_matrix = carla_rotation_to_numpy_rotation_matrix(carla_rotation)
tmp_array = rotation_matrix.dot(numpy.array([carla_vector.x, carla_vector.y, carla_vector.z]))
ros_vector = Vector3()
ros_vector.x = tmp_array[0]
ros_vector.y = -tmp_array[1]
ros_vector.z = tmp_array[2]
return ros_vector
def carla_velocity_to_ros_twist(carla_linear_velocity, carla_angular_velocity, carla_rotation):
"""
Convert a carla velocity to a ROS twist
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS).
:param carla_velocity: the carla velocity
:type carla_velocity: carla.Vector3D
:param carla_angular_velocity: the carla angular velocity
:type carla_angular_velocity: carla.Vector3D
:param carla_rotation: the carla rotation
:type carla_rotation: carla.Rotation
:return: a ROS twist (with rotation)
:rtype: geometry_msgs.msg.Twist
"""
ros_twist = Twist()
ros_twist.linear = carla_vector_to_ros_vector_rotated(carla_linear_velocity, carla_rotation)
ros_twist.angular.x = math.radians(carla_angular_velocity.x)
ros_twist.angular.y = -math.radians(carla_angular_velocity.y)
ros_twist.angular.z = -math.radians(carla_angular_velocity.z)
return ros_twist
def carla_velocity_to_numpy_vector(carla_velocity):
"""
Convert a carla velocity to a numpy array
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS)
:param carla_velocity: the carla velocity
:type carla_velocity: carla.Vector3D
:return: a numpy.array with 3 elements
:rtype: numpy.array
"""
return numpy.array([
carla_velocity.x,
-carla_velocity.y,
carla_velocity.z
])
def carla_acceleration_to_ros_accel(carla_acceleration):
"""
Convert a carla acceleration to a ROS accel
Considers the conversion from left-handed system (unreal) to right-handed
system (ROS)
The angular accelerations remain zero.
:param carla_acceleration: the carla acceleration
:type carla_acceleration: carla.Vector3D
:return: a ROS accel
:rtype: geometry_msgs.msg.Accel
"""
ros_accel = Accel()
ros_accel.linear.x = carla_acceleration.x
ros_accel.linear.y = -carla_acceleration.y
ros_accel.linear.z = carla_acceleration.z
return ros_accel
def carla_transform_to_ros_transform(carla_transform):
"""
Convert a carla transform to a ROS transform
See carla_location_to_ros_vector3() and carla_rotation_to_ros_quaternion() for details
:param carla_transform: the carla transform
:type carla_transform: carla.Transform
:return: a ROS transform
:rtype: geometry_msgs.msg.Transform
"""
ros_transform = Transform()
ros_transform.translation = carla_location_to_ros_vector3(
carla_transform.location)
ros_transform.rotation = carla_rotation_to_ros_quaternion(
carla_transform.rotation)
return ros_transform
def carla_transform_to_ros_pose(carla_transform):
"""
Convert a carla transform to a ROS pose
See carla_location_to_ros_point() and carla_rotation_to_ros_quaternion() for details
:param carla_transform: the carla transform
:type carla_transform: carla.Transform
:return: a ROS pose
:rtype: geometry_msgs.msg.Pose
"""
ros_pose = Pose()
ros_pose.position = carla_location_to_ros_point(
carla_transform.location)
ros_pose.orientation = carla_rotation_to_ros_quaternion(
carla_transform.rotation)
return ros_pose
def carla_location_to_pose(carla_location):
"""
Convert a carla location to a ROS pose
See carla_location_to_ros_point() for details.
pose quaternion remains zero.
:param carla_location: the carla location
:type carla_location: carla.Location
:return: a ROS pose
:rtype: geometry_msgs.msg.Pose
"""
ros_pose = Pose()
ros_pose.position = carla_location_to_ros_point(carla_location)
return ros_pose
|
[
"geometry_msgs.msg.Vector3",
"geometry_msgs.msg.Twist",
"math.radians",
"geometry_msgs.msg.Transform",
"numpy.array",
"geometry_msgs.msg.Point",
"geometry_msgs.msg.Quaternion",
"tf.transformations.quaternion_from_euler",
"tf.transformations.euler_matrix",
"geometry_msgs.msg.Accel",
"geometry_msgs.msg.Pose"
] |
[((2477, 2545), 'numpy.array', 'numpy.array', (['[carla_location.x, -carla_location.y, carla_location.z]'], {}), '([carla_location.x, -carla_location.y, carla_location.z])\n', (2488, 2545), False, 'import numpy\n'), ((2962, 2971), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (2969, 2971), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((3496, 3503), 'geometry_msgs.msg.Point', 'Point', ([], {}), '()\n', (3501, 3503), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((3954, 3966), 'geometry_msgs.msg.Quaternion', 'Quaternion', ([], {}), '()\n', (3964, 3966), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((4621, 4654), 'math.radians', 'math.radians', (['carla_rotation.roll'], {}), '(carla_rotation.roll)\n', (4633, 4654), False, 'import math\n'), ((5308, 5366), 'tf.transformations.quaternion_from_euler', 'tf.transformations.quaternion_from_euler', (['roll', 'pitch', 'yaw'], {}), '(roll, pitch, yaw)\n', (5348, 5366), False, 'import tf\n'), ((6509, 6558), 'tf.transformations.euler_matrix', 'tf.transformations.euler_matrix', (['roll', 'pitch', 'yaw'], {}), '(roll, pitch, yaw)\n', (6540, 6558), False, 'import tf\n'), ((7169, 7191), 'numpy.array', 'numpy.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (7180, 7191), False, 'import numpy\n'), ((7855, 7864), 'geometry_msgs.msg.Vector3', 'Vector3', ([], {}), '()\n', (7862, 7864), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((8618, 8625), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (8623, 8625), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((8749, 8787), 'math.radians', 'math.radians', (['carla_angular_velocity.x'], {}), '(carla_angular_velocity.x)\n', (8761, 8787), False, 'import math\n'), ((9319, 9387), 'numpy.array', 'numpy.array', (['[carla_velocity.x, -carla_velocity.y, carla_velocity.z]'], {}), '([carla_velocity.x, -carla_velocity.y, carla_velocity.z])\n', (9330, 9387), False, 'import numpy\n'), ((9857, 9864), 'geometry_msgs.msg.Accel', 'Accel', ([], {}), '()\n', (9862, 9864), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((10421, 10432), 'geometry_msgs.msg.Transform', 'Transform', ([], {}), '()\n', (10430, 10432), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((11022, 11028), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (11026, 11028), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((11586, 11592), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (11590, 11592), False, 'from geometry_msgs.msg import Vector3, Quaternion, Transform, Pose, Point, Twist, Accel\n'), ((4668, 4702), 'math.radians', 'math.radians', (['carla_rotation.pitch'], {}), '(carla_rotation.pitch)\n', (4680, 4702), False, 'import math\n'), ((4714, 4746), 'math.radians', 'math.radians', (['carla_rotation.yaw'], {}), '(carla_rotation.yaw)\n', (4726, 4746), False, 'import math\n'), ((7775, 7836), 'numpy.array', 'numpy.array', (['[carla_vector.x, carla_vector.y, carla_vector.z]'], {}), '([carla_vector.x, carla_vector.y, carla_vector.z])\n', (7786, 7836), False, 'import numpy\n'), ((8815, 8853), 'math.radians', 'math.radians', (['carla_angular_velocity.y'], {}), '(carla_angular_velocity.y)\n', (8827, 8853), False, 'import math\n'), ((8881, 8919), 'math.radians', 'math.radians', (['carla_angular_velocity.z'], {}), '(carla_angular_velocity.z)\n', (8893, 8919), False, 'import math\n')]
|
import numpy as np
class KNearestNeighbors:
def __init__(self, distances, labels, k=10):
self.distances = distances
self.labels = labels
self.k = k
def _kNN(self, instance, train, k):
nearest = np.argpartition(self.distances[instance][train], k)
nearest_labels = self.labels[train][nearest[:k]]
unique, counts = np.unique(nearest_labels, return_counts=True)
counts = dict(zip(unique, counts))
probabilities = np.zeros(10)
for i in range(10):
probabilities[i] = 0 if i not in counts else counts[i] / k
return probabilities
def score(self, train, test):
correct = 0
total = 0
confusion = np.zeros((10,10))
# choose k to be at most as large as supported by the training dataset
# or as configured, if enough training samples are available
k = min(len(train)//10, self.k)
for i in test:
probs = self._kNN(i, train, k)
pred = np.argmax(probs)
confusion[self.labels[i]][pred] += 1
if pred == self.labels[i]:
correct += 1
total += 1
accuracy = correct / total
return accuracy, confusion
class KNearestNeighborsTrainTest(KNearestNeighbors):
def __init__(self, distances, train_labels, test_labels, k=10):
self.test_labels = test_labels
super().__init__(distances, train_labels, k)
def score(self, train):
correct = 0
total = 0
confusion = np.zeros((10,10))
# choose k to be at most as large as supported by the training dataset
# or as configured, if enough training samples are available
k = min(len(train)//10, self.k)
for i, label in enumerate(self.test_labels):
probs = self._kNN(i, train, k)
pred = np.argmax(probs)
confusion[label][pred] += 1
if pred == label:
correct += 1
total += 1
accuracy = correct / total
return accuracy, confusion
|
[
"numpy.argmax",
"numpy.zeros",
"numpy.unique",
"numpy.argpartition"
] |
[((238, 289), 'numpy.argpartition', 'np.argpartition', (['self.distances[instance][train]', 'k'], {}), '(self.distances[instance][train], k)\n', (253, 289), True, 'import numpy as np\n'), ((372, 417), 'numpy.unique', 'np.unique', (['nearest_labels'], {'return_counts': '(True)'}), '(nearest_labels, return_counts=True)\n', (381, 417), True, 'import numpy as np\n'), ((494, 506), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (502, 506), True, 'import numpy as np\n'), ((741, 759), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (749, 759), True, 'import numpy as np\n'), ((1565, 1583), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (1573, 1583), True, 'import numpy as np\n'), ((1032, 1048), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (1041, 1048), True, 'import numpy as np\n'), ((1886, 1902), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (1895, 1902), True, 'import numpy as np\n')]
|
# coding: utf-8
# In[1]:
from path import Path
from matplotlib import pyplot as plt
import numpy as np
import skimage.io as io
import os
from PIL import Image
import cv2
import random
import shutil
def crop_by_sequence(image_path,img_class_path,crop_size_w,crop_size_h,prefix,save_dir ,same_scale = False):
"""
image_path : the image you want to crop
img_class_path: the mask you want to crop
crop_size_h: the size of height you want
crop_size_w: the size of weight you want
save_dir: the dir you want to save
prefix: the special word you want to add
same_scale: big or small to same
"""
raw_img = io.imread(image_path,)[:,:,1:]
raw_img_class = io.imread(img_class_path,)
if same_scale == True:
crop_size_w = crop_size_w * 2
crop_size_h = crop_size_h * 2
print(raw_img.shape,raw_img_class.shape)
h,w,c = raw_img.shape[0],raw_img.shape[1],raw_img.shape[2]
index = 0
x2,y2 = 0,0
x0,y0 = 0,0
while(y2<h):
while(x2<w):
x1 = x0
x2 = x1 + crop_size_w
y1 = y0
y2 = y1 +crop_size_h
if(x2>w or y2>h):
x2 = min(x2,w)
y2 = min(y2,h)
if((x2-x1)>10 and (y2-y1)>10):
backgroud = np.zeros((crop_size_h,crop_size_w,c),dtype=np.uint8)
backgroud[:y2-y1,:x2-x1,:] = raw_img[y1:y2,x1:x2,:]
patch = backgroud
backgroud_label = np.zeros((crop_size_h,crop_size_w),dtype=np.uint8)
backgroud_label[:y2-y1,:x2-x1] = raw_img_class[y1:y2,x1:x2]
patch_label = backgroud_label
else:
break
else:
patch = raw_img[y1:y2,x1:x2,:]
patch_label = raw_img_class[y1:y2,x1:x2]
#stride_h = auto_stride(patch_label)
stride_h = crop_size_h
stride_w = crop_size_w
#print "current stride: ",stride_h
x0 = x1 + stride_w
if same_scale == True:
patch = cv2.resize(patch,(int(crop_size_w/2), int(crop_size_h/2)))
patch_label = cv2.resize(patch_label,(int(crop_size_w/2), int(crop_size_h/2)))
success = cv2.imwrite(save_dir + f'/images/{prefix}_sequence_{index}.png',patch)
success_1 = cv2.imwrite(save_dir + f'/labels/{prefix}_sequence_{index}.png',patch_label)
if success == 1 and success_1 ==1 :
pass
else:
print('seq_save err')
index = index + 1
x0,x1,x2 = 0,0,0
y0 = y1 + stride_h
def crop_by_random(num,image_path,img_class_path,crop_size_w,crop_size_h,prefix,save_dir, same_scale = False ):
"""
image_path : the image you want to crop
img_class_path: the mask you want to crop
crop_size_h: the size of height you want
crop_size_w: the size of weight you want
save_dir: the dir you want to save
prefix: the special word you want to add
same_scale: big or small to same
"""
if same_scale == True:
crop_size_w = crop_size_w * 2
crop_size_h = crop_size_h * 2
raw_img = io.imread(image_path,)[:,:,1:]
raw_img_class = io.imread(img_class_path)
print(raw_img.shape, raw_img_class.shape)
h,w,c = raw_img.shape[0],raw_img.shape[1],raw_img.shape[2]
index = 0
range_h = h - crop_size_h - 1
range_w = w - crop_size_w - 1
list_x = np.random.randint(low = 0, high = range_h, size = num)
list_y = np.random.randint(low = 0, high = range_w, size = num)
combine = list(zip(list_x,list_y))
for i in combine:
patch = raw_img[i[0]:i[0] + crop_size_h, i[1]:i[1] + crop_size_w,:]
patch_label = raw_img_class[i[0]:i[0] + crop_size_h, i[1]:i[1] + crop_size_w]
if same_scale == True:
patch = cv2.resize(patch,(int(crop_size_w/2), int(crop_size_h/2)))
patch_label = cv2.resize(patch_label,(int(crop_size_w/2), int(crop_size_h/2)))
success = cv2.imwrite(save_dir + f'/images/{prefix}_random_{index}.png',patch)
success_1 = cv2.imwrite(f'{save_dir}/labels/{prefix}_random_{index}.png',patch_label)
if success == 1 and success_1 ==1 :
pass
else:
print('random save err', success, success_1)
index = index + 1
def generate(ds_file:list, num = 1000,split = 5, crop_size_h = 512, crop_size_w = 512, save_dir = 'dataset',string = '', same_scale = False, ):
"""
num: the number of pictures split by random crop
split: trainset : validationset
crop_size_h: the size of height you want
crop_size_w: the size of weight you want
save_dir: the dir you want to save
string: the special word you want to add
same_scale: big or small to same
"""
print(crop_size_h, crop_size_w)
os.mkdir(f'./{save_dir}/')
os.mkdir(f'./{save_dir}/training')
os.mkdir(f'./{save_dir}/training/images')
os.mkdir(f'./{save_dir}/training/labels')
os.mkdir(f'./{save_dir}/validation')
os.mkdir(f'./{save_dir}/validation/images')
os.mkdir(f'./{save_dir}/validation/labels')
for f in ds_file:
images = [i for i in Path(f'{f}/').files() if len(str(i.name)) == 45]
if 'train' in f:
ge_save_dir = save_dir + '/training'
else:
ge_save_dir = save_dir +'/validation'
for i in range(len(images)):
image_path = images[i]
img_class_path = f'{f}/' + f'{images[i].stem[:-4]}'+ '_label_mask.png'
prefix = f"picture_{i}"
prefix = string + prefix
print(image_path)
print(img_class_path)
crop_by_random(num,image_path,img_class_path,crop_size_w,crop_size_h,prefix,ge_save_dir, same_scale = same_scale )
crop_by_sequence(image_path,img_class_path,crop_size_w,crop_size_h,prefix,ge_save_dir, same_scale = same_scale)
if split == True:
## split the train dataset and validation dataset
img_sample = random.sample(Path(f'./{save_dir}/training/images/').files(),len(Path(f'./{save_dir}/training/images/').files())//split )
train_img_dir = f'./{save_dir}/training/images/'
train_label_dir = f'./{save_dir}/training/labels/'
val_img_dir = f'./{save_dir}/validation/images/'
val_label_dir = f'./{save_dir}/validation/labels/'
for i in sample:
shutil.move(train_img_dir + i.name,f'{val_img_dir}{i.name}')
shutil.move(train_label_dir + i.name ,f'{val_label_dir}{i.name}')
generate(ds_file = ['train_set', 'val_set'])
|
[
"cv2.imwrite",
"shutil.move",
"path.Path",
"skimage.io.imread",
"numpy.random.randint",
"numpy.zeros",
"os.mkdir"
] |
[((703, 728), 'skimage.io.imread', 'io.imread', (['img_class_path'], {}), '(img_class_path)\n', (712, 728), True, 'import skimage.io as io\n'), ((3340, 3365), 'skimage.io.imread', 'io.imread', (['img_class_path'], {}), '(img_class_path)\n', (3349, 3365), True, 'import skimage.io as io\n'), ((3576, 3624), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'range_h', 'size': 'num'}), '(low=0, high=range_h, size=num)\n', (3593, 3624), True, 'import numpy as np\n'), ((3644, 3692), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'range_w', 'size': 'num'}), '(low=0, high=range_w, size=num)\n', (3661, 3692), True, 'import numpy as np\n'), ((4995, 5021), 'os.mkdir', 'os.mkdir', (['f"""./{save_dir}/"""'], {}), "(f'./{save_dir}/')\n", (5003, 5021), False, 'import os\n'), ((5026, 5060), 'os.mkdir', 'os.mkdir', (['f"""./{save_dir}/training"""'], {}), "(f'./{save_dir}/training')\n", (5034, 5060), False, 'import os\n'), ((5065, 5106), 'os.mkdir', 'os.mkdir', (['f"""./{save_dir}/training/images"""'], {}), "(f'./{save_dir}/training/images')\n", (5073, 5106), False, 'import os\n'), ((5111, 5152), 'os.mkdir', 'os.mkdir', (['f"""./{save_dir}/training/labels"""'], {}), "(f'./{save_dir}/training/labels')\n", (5119, 5152), False, 'import os\n'), ((5157, 5193), 'os.mkdir', 'os.mkdir', (['f"""./{save_dir}/validation"""'], {}), "(f'./{save_dir}/validation')\n", (5165, 5193), False, 'import os\n'), ((5198, 5241), 'os.mkdir', 'os.mkdir', (['f"""./{save_dir}/validation/images"""'], {}), "(f'./{save_dir}/validation/images')\n", (5206, 5241), False, 'import os\n'), ((5246, 5289), 'os.mkdir', 'os.mkdir', (['f"""./{save_dir}/validation/labels"""'], {}), "(f'./{save_dir}/validation/labels')\n", (5254, 5289), False, 'import os\n'), ((652, 673), 'skimage.io.imread', 'io.imread', (['image_path'], {}), '(image_path)\n', (661, 673), True, 'import skimage.io as io\n'), ((3289, 3310), 'skimage.io.imread', 'io.imread', (['image_path'], {}), '(image_path)\n', (3298, 3310), True, 'import skimage.io as io\n'), ((4160, 4229), 'cv2.imwrite', 'cv2.imwrite', (["(save_dir + f'/images/{prefix}_random_{index}.png')", 'patch'], {}), "(save_dir + f'/images/{prefix}_random_{index}.png', patch)\n", (4171, 4229), False, 'import cv2\n'), ((4249, 4323), 'cv2.imwrite', 'cv2.imwrite', (['f"""{save_dir}/labels/{prefix}_random_{index}.png"""', 'patch_label'], {}), "(f'{save_dir}/labels/{prefix}_random_{index}.png', patch_label)\n", (4260, 4323), False, 'import cv2\n'), ((2339, 2410), 'cv2.imwrite', 'cv2.imwrite', (["(save_dir + f'/images/{prefix}_sequence_{index}.png')", 'patch'], {}), "(save_dir + f'/images/{prefix}_sequence_{index}.png', patch)\n", (2350, 2410), False, 'import cv2\n'), ((2434, 2511), 'cv2.imwrite', 'cv2.imwrite', (["(save_dir + f'/labels/{prefix}_sequence_{index}.png')", 'patch_label'], {}), "(save_dir + f'/labels/{prefix}_sequence_{index}.png', patch_label)\n", (2445, 2511), False, 'import cv2\n'), ((6581, 6642), 'shutil.move', 'shutil.move', (['(train_img_dir + i.name)', 'f"""{val_img_dir}{i.name}"""'], {}), "(train_img_dir + i.name, f'{val_img_dir}{i.name}')\n", (6592, 6642), False, 'import shutil\n'), ((6654, 6719), 'shutil.move', 'shutil.move', (['(train_label_dir + i.name)', 'f"""{val_label_dir}{i.name}"""'], {}), "(train_label_dir + i.name, f'{val_label_dir}{i.name}')\n", (6665, 6719), False, 'import shutil\n'), ((1324, 1379), 'numpy.zeros', 'np.zeros', (['(crop_size_h, crop_size_w, c)'], {'dtype': 'np.uint8'}), '((crop_size_h, crop_size_w, c), dtype=np.uint8)\n', (1332, 1379), True, 'import numpy as np\n'), ((1526, 1578), 'numpy.zeros', 'np.zeros', (['(crop_size_h, crop_size_w)'], {'dtype': 'np.uint8'}), '((crop_size_h, crop_size_w), dtype=np.uint8)\n', (1534, 1578), True, 'import numpy as np\n'), ((6204, 6242), 'path.Path', 'Path', (['f"""./{save_dir}/training/images/"""'], {}), "(f'./{save_dir}/training/images/')\n", (6208, 6242), False, 'from path import Path\n'), ((5346, 5359), 'path.Path', 'Path', (['f"""{f}/"""'], {}), "(f'{f}/')\n", (5350, 5359), False, 'from path import Path\n'), ((6255, 6293), 'path.Path', 'Path', (['f"""./{save_dir}/training/images/"""'], {}), "(f'./{save_dir}/training/images/')\n", (6259, 6293), False, 'from path import Path\n')]
|
import copy
import logging
import torch
import numpy as np
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
log = logging.getLogger(__name__)
def balanced_batches(dataset, batch_size):
unlabled_idx = dataset.unlabeled_idx
labeled_idx = list(filter(lambda _: _ not in unlabled_idx,
np.arange(len(dataset.targets))))
labeled_idx = np.array(labeled_idx)
# construct batches - half of them should be from unlabled, half from labeled
n_batches = (len(unlabled_idx) // (batch_size//2)) + 1
for ulb in np.array_split(unlabled_idx, n_batches):
batch_idx = list(ulb)
lb = np.random.choice(labeled_idx, size=(
batch_size // 2), replace=True)
batch_idx.extend(lb)
x_batch = []
y_batch = []
for idx in batch_idx:
x, y = dataset[idx]
x_batch.append(x)
y_batch.append(y)
yield torch.stack(x_batch), torch.LongTensor(y_batch)
def balanced_batches_heirarchy(dataset, heirarchy, batch_size):
unlabled_idx = dataset.unlabeled_idx
labeled_idx = list(filter(lambda _: _ not in unlabled_idx,
np.arange(len(dataset.targets))))
labeled_idx = np.array(labeled_idx)
# construct batches - half of them should be from unlabled, half from labeled
n_batches = (len(unlabled_idx) // (batch_size//2)) + 1
for ulb in np.array_split(unlabled_idx, n_batches):
batch_idx = list(ulb)
lb = np.random.choice(labeled_idx, size=(
batch_size // 2), replace=True)
batch_idx.extend(lb)
x_batch = []
y_batch = []
for idx in batch_idx:
x, y = dataset[idx]
x_batch.append(x)
y_batch.append(y)
y_batch = heirarchy.to_vec(torch.LongTensor(y_batch))
yield torch.stack(x_batch), y_batch
class FashionMNIST(datasets.FashionMNIST):
UNLABLED = -1
def __init__(self, root, percent_unlabeled, train=True, transform=None, target_transform=None, download=False):
super().__init__(root, train=train, transform=transform,
target_transform=target_transform, download=download)
assert percent_unlabeled >= 0.0 and percent_unlabeled <= 1.0
if not train:
# no unlabled data in the test set
assert percent_unlabeled == 0.0
self.true_targets = copy.deepcopy(self.targets)
self.percent_unlabeled = percent_unlabeled
log.info("Setting {}% of the targets to UNLABELED".format(
self.percent_unlabeled * 100))
self.unlabeled_idx = np.random.permutation(
np.arange(0, len(self.targets)))[:int(self.percent_unlabeled * len(self.targets))]
self.targets[self.unlabeled_idx] = self.UNLABLED
self.n_classes = len(self.classes)
def sample_labels(self, n):
"""Sample n targets from the labeled data
Arguments:
n {int} -- Number of samples
"""
pass
@staticmethod
def separate_unlabeled(x_raw, y_raw):
if y_raw.ndimension() == 2:
unlabeled_idx = (y_raw == -1).sum(1) > 0
else:
unlabeled_idx = y_raw == FashionMNIST.UNLABLED
x, y = x_raw[~unlabeled_idx], y_raw[~unlabeled_idx]
x_unlab, y_unlab = x_raw[unlabeled_idx], y_raw[unlabeled_idx]
return x, y, x_unlab, y_unlab
class Hierarchy:
def __init__(self, fmnist):
self.org_class_to_idx = fmnist.class_to_idx
self.org_idx_to_class = {
v: k for (k, v) in self.org_class_to_idx.items()}
self.org_n_classes = len(self.org_class_to_idx)
Top = {"T-shirt/top", "Pullover", "Coat", "Shirt"}
Shoes = {"Sandal", "Sneaker", "Ankle boot"}
# simple one level heirarchy for now
self.heirarchy = {
"Top": Top,
"Shoes": Shoes,
"Bag": "Bag",
"Dress": "Dress",
"Trouser": "Trouser",
}
self.class_to_idx = copy.deepcopy(self.org_class_to_idx)
# add new top level classes
self.class_to_idx["Top"] = len(self.class_to_idx)
self.class_to_idx["Shoes"] = len(self.class_to_idx)
self.idx_to_class = {v: k for (k, v) in self.class_to_idx.items()}
self.n_classes = len(self.class_to_idx)
assoc_idx = {}
neg_assoc_idx = {}
all_idx = set(range(self.n_classes))
for clz in self.class_to_idx:
cls_idx = self.class_to_idx[clz]
assoc_idx[cls_idx] = [self.class_to_idx[c]
for c in self.find_classes(clz)]
neg_assoc_idx[cls_idx] = []
for idx in all_idx:
if idx not in assoc_idx[cls_idx]:
neg_assoc_idx[cls_idx].append(idx)
self.assoc_idx = assoc_idx
self.neg_assoc_idx = neg_assoc_idx
def find_classes(self, y, new_classes=None):
if new_classes is None:
new_classes = set()
for k, v in self.heirarchy.items():
if isinstance(v, set) and y in v:
new_classes.add(k)
new_classes.add(y)
elif k == y:
new_classes.add(k)
return new_classes
def to_vec(self, y):
new_y = torch.zeros(y.size(0), self.n_classes)
for idx, y_sub in enumerate(y.detach().numpy()):
if y_sub == -1:
new_y[idx, :] = -1.0
continue
classes = self.find_classes(self.org_idx_to_class[y_sub])
classes_idx = [self.class_to_idx[c] for c in classes]
new_y[idx, classes_idx] = 1.0
return new_y
def from_vector(self, v):
pass
if __name__ == '__main__':
fmnist_transforms = transforms.Compose([
transforms.ToTensor()
])
fmnist = FashionMNIST("./fashion-mnist", 0.5,
transform=fmnist_transforms, download=True)
for x_raw, y_raw in DataLoader(fmnist, batch_size=10):
x, y, x_unlab, y_unlab = FashionMNIST.separate_unlabeled(x_raw, y_raw)
print(x.size(), y.size())
print(x_unlab.size(), y_unlab.size())
break
fmnist = FashionMNIST("./fashion-mnist", 0.998, train=True,
transform=fmnist_transforms, download=True)
for x, y in balanced_batches(fmnist, 16):
print(x.size(), y)
# for x_raw, y_raw in DataLoader(fmnist, batch_size=10):
# x, y, x_unlab, y_unlab = FashionMNIST.separate_unlabeled(x_raw, y_raw)
# print(y, y_unlab)
# print(x.size(), y.size())
# print(x_unlab.size(), y_unlab.size())
|
[
"logging.getLogger",
"copy.deepcopy",
"numpy.random.choice",
"torch.LongTensor",
"torch.stack",
"numpy.array_split",
"numpy.array",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor"
] |
[((152, 179), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (169, 179), False, 'import logging\n'), ((411, 432), 'numpy.array', 'np.array', (['labeled_idx'], {}), '(labeled_idx)\n', (419, 432), True, 'import numpy as np\n'), ((590, 629), 'numpy.array_split', 'np.array_split', (['unlabled_idx', 'n_batches'], {}), '(unlabled_idx, n_batches)\n', (604, 629), True, 'import numpy as np\n'), ((1262, 1283), 'numpy.array', 'np.array', (['labeled_idx'], {}), '(labeled_idx)\n', (1270, 1283), True, 'import numpy as np\n'), ((1441, 1480), 'numpy.array_split', 'np.array_split', (['unlabled_idx', 'n_batches'], {}), '(unlabled_idx, n_batches)\n', (1455, 1480), True, 'import numpy as np\n'), ((6024, 6057), 'torch.utils.data.DataLoader', 'DataLoader', (['fmnist'], {'batch_size': '(10)'}), '(fmnist, batch_size=10)\n', (6034, 6057), False, 'from torch.utils.data import DataLoader\n'), ((674, 739), 'numpy.random.choice', 'np.random.choice', (['labeled_idx'], {'size': '(batch_size // 2)', 'replace': '(True)'}), '(labeled_idx, size=batch_size // 2, replace=True)\n', (690, 739), True, 'import numpy as np\n'), ((1525, 1590), 'numpy.random.choice', 'np.random.choice', (['labeled_idx'], {'size': '(batch_size // 2)', 'replace': '(True)'}), '(labeled_idx, size=batch_size // 2, replace=True)\n', (1541, 1590), True, 'import numpy as np\n'), ((2442, 2469), 'copy.deepcopy', 'copy.deepcopy', (['self.targets'], {}), '(self.targets)\n', (2455, 2469), False, 'import copy\n'), ((4063, 4099), 'copy.deepcopy', 'copy.deepcopy', (['self.org_class_to_idx'], {}), '(self.org_class_to_idx)\n', (4076, 4099), False, 'import copy\n'), ((1835, 1860), 'torch.LongTensor', 'torch.LongTensor', (['y_batch'], {}), '(y_batch)\n', (1851, 1860), False, 'import torch\n'), ((5850, 5871), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5869, 5871), False, 'from torchvision import datasets, transforms\n'), ((962, 982), 'torch.stack', 'torch.stack', (['x_batch'], {}), '(x_batch)\n', (973, 982), False, 'import torch\n'), ((984, 1009), 'torch.LongTensor', 'torch.LongTensor', (['y_batch'], {}), '(y_batch)\n', (1000, 1009), False, 'import torch\n'), ((1876, 1896), 'torch.stack', 'torch.stack', (['x_batch'], {}), '(x_batch)\n', (1887, 1896), False, 'import torch\n')]
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA GNB classifier"""
import numpy as np
from mvpa2.testing import *
from mvpa2.testing.datasets import *
from mvpa2.clfs.gnb import GNB
from mvpa2.measures.base import TransferMeasure
from mvpa2.generators.splitters import Splitter
class GNBTests(unittest.TestCase):
def test_gnb(self):
gnb = GNB()
gnb_nc = GNB(common_variance=False)
gnb_n = GNB(normalize=True)
gnb_n_nc = GNB(normalize=True, common_variance=False)
ds = datasets['uni2medium']
# Generic silly coverage just to assure that it works in all
# possible scenarios:
bools = (True, False)
# There should be better way... heh
for cv in bools: # common_variance?
for prior in ('uniform', 'laplacian_smoothing', 'ratio'):
tp = None # predictions -- all above should
# result in the same predictions
for n in bools: # normalized?
for ls in bools: # logspace?
for es in ((), ('estimates')):
gnb_ = GNB(common_variance=cv,
prior=prior,
normalize=n,
logprob=ls,
enable_ca=es)
tm = TransferMeasure(gnb_, Splitter('train'))
predictions = tm(ds).samples[:,0]
if tp is None:
tp = predictions
assert_array_equal(predictions, tp)
# if normalized -- check if estimates are such
if n and 'estimates' in es:
v = gnb_.ca.estimates
if ls: # in log space -- take exp ;)
v = np.exp(v)
d1 = np.sum(v, axis=1) - 1.0
self.assertTrue(np.max(np.abs(d1)) < 1e-5)
def suite():
return unittest.makeSuite(GNBTests)
if __name__ == '__main__':
import runner
|
[
"numpy.abs",
"mvpa2.clfs.gnb.GNB",
"numpy.exp",
"numpy.sum",
"mvpa2.generators.splitters.Splitter"
] |
[((712, 717), 'mvpa2.clfs.gnb.GNB', 'GNB', ([], {}), '()\n', (715, 717), False, 'from mvpa2.clfs.gnb import GNB\n'), ((735, 761), 'mvpa2.clfs.gnb.GNB', 'GNB', ([], {'common_variance': '(False)'}), '(common_variance=False)\n', (738, 761), False, 'from mvpa2.clfs.gnb import GNB\n'), ((778, 797), 'mvpa2.clfs.gnb.GNB', 'GNB', ([], {'normalize': '(True)'}), '(normalize=True)\n', (781, 797), False, 'from mvpa2.clfs.gnb import GNB\n'), ((817, 859), 'mvpa2.clfs.gnb.GNB', 'GNB', ([], {'normalize': '(True)', 'common_variance': '(False)'}), '(normalize=True, common_variance=False)\n', (820, 859), False, 'from mvpa2.clfs.gnb import GNB\n'), ((1525, 1600), 'mvpa2.clfs.gnb.GNB', 'GNB', ([], {'common_variance': 'cv', 'prior': 'prior', 'normalize': 'n', 'logprob': 'ls', 'enable_ca': 'es'}), '(common_variance=cv, prior=prior, normalize=n, logprob=ls, enable_ca=es)\n', (1528, 1600), False, 'from mvpa2.clfs.gnb import GNB\n'), ((1772, 1789), 'mvpa2.generators.splitters.Splitter', 'Splitter', (['"""train"""'], {}), "('train')\n", (1780, 1789), False, 'from mvpa2.generators.splitters import Splitter\n'), ((2240, 2249), 'numpy.exp', 'np.exp', (['v'], {}), '(v)\n', (2246, 2249), True, 'import numpy as np\n'), ((2279, 2296), 'numpy.sum', 'np.sum', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (2285, 2296), True, 'import numpy as np\n'), ((2350, 2360), 'numpy.abs', 'np.abs', (['d1'], {}), '(d1)\n', (2356, 2360), True, 'import numpy as np\n')]
|
"""
amplitude.py
measure the maximum peak-to-peak amplitude
"""
import obspy
import types
import numpy as np
import pandas as pd
import madpy.noise as n
from typing import Tuple
import madpy.checks as ch
import madpy.config as config
import matplotlib.pyplot as plt
import madpy.plotting.amp as plot
def measure_amplitude(
st: obspy.Stream,
cfg: types.ModuleType = config,
) -> pd.DataFrame:
"""Measure noise level and amplitude
Args:
st: stream containing one or more time series
cfg: configuration file
Returns:
df: dataframe of time series amplitude information
"""
output = []
for tr in st:
preliminary_checks(tr, cfg)
noise = n.rms_noise(tr, 'amplitude', cfg)
amplitude = max_amplitude(tr, noise, cfg)
date_formatted = tr.stats.o.strftime('%Y-%m-%d')
time_formatted = tr.stats.o.strftime('%H:%M:%S.%f')
output.append([date_formatted, time_formatted[:11],
tr.stats.network, tr.stats.station, tr.stats.channel,
amplitude, noise])
df = format_output(output)
if cfg.Amplitude.save_output:
df.to_csv(f'{cfg.Amplitude.output_path}/amp-output.csv',
float_format='%0.5f', index=False)
return df
def preliminary_checks(
tr: obspy.Trace,
cfg: types.ModuleType = config
) -> None:
"""Make sure all necessary information is present
This function checks...
1. The configuration file is setup correctly
2. The trace has all relevant information
3. There is sufficient time series data
Args:
tr: time series
cfg: configuration file
Returns:
None
"""
ch.check_config(cfg.Amplitude())
ch.check_waveform(tr)
return None
def max_amplitude(
tr: obspy.Trace,
noise: float,
cfg: types.ModuleType = config
) -> float:
"""Measure maximum peak-to-peak amplitude
Args:
tr: time series
noise: noise level
cfg: configuration file
Returns:
amp: maximum peak-to-peak amplitude
Raises:
ValueError: if max amplitude is not real and positive
"""
acfg = cfg.Amplitude()
tr_signal = trim_waveform_signal(tr.copy())
peaks_nan = inflection_points(tr_signal.data)
peaks = remove_nan(peaks_nan)
p2p_amplitudes = np.diff(peaks)
amp = np.max(np.abs(p2p_amplitudes)) * acfg.amp_factor
ch.check_amplitude(amp)
if acfg.plot:
indices = p2p_indices(tr_signal, peaks, p2p_amplitudes)
plot.amplitude_plot(tr, tr_signal, amp, indices, noise, acfg)
return amp
def trim_waveform_signal(
tr: obspy.Trace,
cfg: types.ModuleType = config
) -> obspy.Trace:
"""Cut the time series to signal window
Args:
tr: time series
cfg: configuration file
Returns:
tr: trimmed time series
"""
starttime, endtime = signal_window(tr, cfg)
tr.trim(starttime=starttime, endtime=endtime)
return tr
def signal_window(
tr: obspy.Trace,
cfg: types.ModuleType = config
) -> Tuple[obspy.UTCDateTime, obspy.UTCDateTime]:
"""Get the starttimes and endtimes of signal window
Args:
tr: time series
cfg: configuration file
Returns:
starttime: signal window beginning date
endtime: signal window ending date
Raises:
AssertionError: Window begins before time series begins
AssertionError: Window ends after time series ends
"""
acfg = cfg.Amplitude()
arrival = n.arrival_time_utc(tr, acfg.signal_phase)
starttime = arrival + acfg.signal_window_begin
endtime = arrival + acfg.signal_window_end
ch.check_window(tr, starttime, endtime)
return starttime, endtime
def inflection_points(data: np.ndarray) -> np.ndarray:
"""Isolate the peaks of an array
Args:
data: time series
Returns:
inflection_points: peaks of the time series
"""
nan_points = np.concatenate([[0], np.diff(np.sign(np.diff(data))), [0]])
nan_points[nan_points == 0] = np.nan
nan_points[~np.isnan(nan_points)] = 0
inflection_points = nan_points + data
return inflection_points
def remove_nan(array: np.ndarray) -> np.ndarray:
"""Remove NaN values
Args:
array: time series
Returns:
the time series without NaN values
"""
return array[~np.isnan(array)]
def p2p_indices(
tr: obspy.Trace,
peaks: np.ndarray,
amplitudes: np.ndarray
) -> Tuple[float, float]:
"""Get peak indices of max peak-to-peak amplitude
Args:
tr: time series
peaks: the inflection points of the time series
amplitudes: values of each peak to peak
Return:
idx: indices of two peaks associated with maximum amplitude
"""
i_diff = np.where(np.abs(amplitudes) == np.max(np.abs(amplitudes)))
i_peak1 = i_diff[0][0]
i_peak2 = i_peak1 + 1
peak1 = peaks[i_peak1]
peak2 = peaks[i_peak2]
nan_points = inflection_points(tr.data)
i_p1_0 = np.where(nan_points == peak1)
i_p2_0 = np.where(nan_points == peak2)
idx = p2p_indices_check(i_p1_0, i_p2_0)
return idx
def p2p_indices_check(i_p1_0: float, i_p2_0: float) -> Tuple[float, float]:
"""Verify the indices are associated with the peaks
Args:
i_p1_0: preliminary peak 1
i_p2_0: preliminary peak 2
Returns:
idx: final peaks
"""
if len(i_p1_0[0]) > 1 or len(i_p2_0[0]) > 1:
x_p1 = np.repeat(i_p1_0[0], len(i_p2_0[0]))
x_p2 = np.tile(i_p2_0[0], len(i_p2_0[0]))
x_p2_p1 = np.subtract(x_p2, x_p1)
x_p2_p1 = np.divide(x_p2_p1, 1.0)
x_p2_p1[x_p2_p1 < 0] = np.nan
i_x = np.where(x_p2_p1 == np.nanmin(x_p2_p1))
i_p1 = x_p1[int(i_x[0])]
i_p2 = x_p2[int(i_x[0])]
idx = np.array([i_p1, i_p2])
else:
i_p1 = i_p1_0[0]
i_p2 = i_p2_0[0]
idx = np.array([i_p1, i_p2])
return idx
def format_output(data: list) -> pd.DataFrame:
"""Turn list into dataframe
Args:
data: list of amplitude information
Returns:
df: dataframe of amplitude information
Raises:
AssertionError: if data size does not match column size
"""
column_names = ['date', 'time', 'network', 'station', 'channel',
'amplitude', 'noise']
assert len(data[0]) == len(column_names), \
'(ValueError) Data length must match column length'
df = pd.DataFrame(data, columns=column_names)
return df
|
[
"numpy.abs",
"madpy.noise.arrival_time_utc",
"madpy.checks.check_amplitude",
"madpy.noise.rms_noise",
"numpy.divide",
"numpy.where",
"madpy.checks.check_window",
"numpy.diff",
"numpy.subtract",
"madpy.plotting.amp.amplitude_plot",
"numpy.array",
"numpy.isnan",
"pandas.DataFrame",
"numpy.nanmin",
"madpy.checks.check_waveform"
] |
[((1808, 1829), 'madpy.checks.check_waveform', 'ch.check_waveform', (['tr'], {}), '(tr)\n', (1825, 1829), True, 'import madpy.checks as ch\n'), ((2460, 2474), 'numpy.diff', 'np.diff', (['peaks'], {}), '(peaks)\n', (2467, 2474), True, 'import numpy as np\n'), ((2538, 2561), 'madpy.checks.check_amplitude', 'ch.check_amplitude', (['amp'], {}), '(amp)\n', (2556, 2561), True, 'import madpy.checks as ch\n'), ((3733, 3774), 'madpy.noise.arrival_time_utc', 'n.arrival_time_utc', (['tr', 'acfg.signal_phase'], {}), '(tr, acfg.signal_phase)\n', (3751, 3774), True, 'import madpy.noise as n\n'), ((3877, 3916), 'madpy.checks.check_window', 'ch.check_window', (['tr', 'starttime', 'endtime'], {}), '(tr, starttime, endtime)\n', (3892, 3916), True, 'import madpy.checks as ch\n'), ((5324, 5353), 'numpy.where', 'np.where', (['(nan_points == peak1)'], {}), '(nan_points == peak1)\n', (5332, 5353), True, 'import numpy as np\n'), ((5367, 5396), 'numpy.where', 'np.where', (['(nan_points == peak2)'], {}), '(nan_points == peak2)\n', (5375, 5396), True, 'import numpy as np\n'), ((6841, 6881), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'column_names'}), '(data, columns=column_names)\n', (6853, 6881), True, 'import pandas as pd\n'), ((730, 763), 'madpy.noise.rms_noise', 'n.rms_noise', (['tr', '"""amplitude"""', 'cfg'], {}), "(tr, 'amplitude', cfg)\n", (741, 763), True, 'import madpy.noise as n\n'), ((2657, 2718), 'madpy.plotting.amp.amplitude_plot', 'plot.amplitude_plot', (['tr', 'tr_signal', 'amp', 'indices', 'noise', 'acfg'], {}), '(tr, tr_signal, amp, indices, noise, acfg)\n', (2676, 2718), True, 'import madpy.plotting.amp as plot\n'), ((5918, 5941), 'numpy.subtract', 'np.subtract', (['x_p2', 'x_p1'], {}), '(x_p2, x_p1)\n', (5929, 5941), True, 'import numpy as np\n'), ((5960, 5983), 'numpy.divide', 'np.divide', (['x_p2_p1', '(1.0)'], {}), '(x_p2_p1, 1.0)\n', (5969, 5983), True, 'import numpy as np\n'), ((6156, 6178), 'numpy.array', 'np.array', (['[i_p1, i_p2]'], {}), '([i_p1, i_p2])\n', (6164, 6178), True, 'import numpy as np\n'), ((6253, 6275), 'numpy.array', 'np.array', (['[i_p1, i_p2]'], {}), '([i_p1, i_p2])\n', (6261, 6275), True, 'import numpy as np\n'), ((2492, 2514), 'numpy.abs', 'np.abs', (['p2p_amplitudes'], {}), '(p2p_amplitudes)\n', (2498, 2514), True, 'import numpy as np\n'), ((4317, 4337), 'numpy.isnan', 'np.isnan', (['nan_points'], {}), '(nan_points)\n', (4325, 4337), True, 'import numpy as np\n'), ((4645, 4660), 'numpy.isnan', 'np.isnan', (['array'], {}), '(array)\n', (4653, 4660), True, 'import numpy as np\n'), ((5110, 5128), 'numpy.abs', 'np.abs', (['amplitudes'], {}), '(amplitudes)\n', (5116, 5128), True, 'import numpy as np\n'), ((5139, 5157), 'numpy.abs', 'np.abs', (['amplitudes'], {}), '(amplitudes)\n', (5145, 5157), True, 'import numpy as np\n'), ((6056, 6074), 'numpy.nanmin', 'np.nanmin', (['x_p2_p1'], {}), '(x_p2_p1)\n', (6065, 6074), True, 'import numpy as np\n'), ((4237, 4250), 'numpy.diff', 'np.diff', (['data'], {}), '(data)\n', (4244, 4250), True, 'import numpy as np\n')]
|
####################################################
####################################################
# functions and classes used in conjunction with
# pipeline_metaomics.py
####################################################
####################################################
# import libraries
import sys
import re
import os
import itertools
import sqlite3
import CGAT.IOTools as IOTools
import CGATPipelines.Pipeline as P
from rpy2.robjects import r as R
import pandas
import numpy as np
####################################################
####################################################
####################################################
# SECTION 1
####################################################
####################################################
####################################################
def buildDiffStats(infile, outfile, db, connection):
'''
build differential abundance statistics
at different p-value and Fold change
thresholds for each comparison
'''
tablename = P.toTable(os.path.basename(infile))
statement = "ATTACH '%(db)s' as diff;" % locals()
connection.execute(statement)
# build table of results at different thresholds
ps = [0.01, 0.05, 0.1]
fcs = [0, 0.5, 1, 1.5, 2]
# build results for each pair
pairs = [("HhaIL10R", "WT"), ("WT", "aIL10R"), ("Hh", "WT")]
outf = open(outfile, "w")
outf.write("group1\tgroup2\tadj_P_Val\tlogFC\tnumber\n")
for pair in pairs:
p1, p2 = pair[0], pair[1]
for p, fc in itertools.product(ps, fcs):
statement = """SELECT COUNT(*)
FROM diff.%(tablename)s
WHERE group1 == "%(p1)s"
AND group2 == "%(p2)s"
AND adj_P_Val < %(p)f
AND abs(logFC) > %(fc)f""" % locals()
for data in connection.execute(statement).fetchall():
outf.write("\t".join([p1, p2, str(p), str(fc), str(data[0])]) + "\n")
outf.close()
####################################################
####################################################
####################################################
# SECTION 2
####################################################
####################################################
####################################################
def buildCommonList(rnadb, dnadb, outfile):
'''
build a list of NOGs/genera that were found in
common after filtering between RNA and
DNA data sets
'''
# select appropriate table depending on
# whether we want genera or NOGs
if "genera" in outfile:
tablename = "genus_diamond_aggregated_counts_diff"
else:
tablename = "gene_counts_diff"
# connect to respective
# databases for RNA and DNA
dbh_rna = sqlite3.connect(rnadb)
cc_rna = dbh_rna.cursor()
dbh_dna = sqlite3.connect(dnadb)
cc_dna = dbh_dna.cursor()
# collect NOGs/genera and write to
# file
outf = open(outfile, "w")
rna = set()
dna = set()
for gene in cc_rna.execute("""
SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""" % tablename).fetchall():
rna.add(gene[0])
for gene in cc_dna.execute("""SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
""" % tablename).fetchall():
dna.add(gene[0])
for gene in rna.intersection(dna):
outf.write(gene + "\n")
####################################################
####################################################
####################################################
def buildDiffList(db,
commonset,
outfile,
fdr=0.05,
l2fold=1,
tablename=None):
'''
build a list of differentially expressed
NOGs between colitis and steady state
'''
# list of common NOGs for sql statement
common = set([x[:-1] for x in open(commonset).readlines()])
common = "(" + ",".join(['"'+x+'"' for x in common]) + ")"
# connect to database
dbh = sqlite3.connect(db)
cc = dbh.cursor()
# remove any genes that are different between Hh and steady state
# or between aIL10R and steady state
hh = set([x[0] for x in cc.execute("""SELECT taxa
FROM %s \
WHERE group1 == "Hh" \
AND group2 == "WT" \
AND adj_P_Val < %f""" % (tablename, fdr)).fetchall()])
# sql list
hh = "(" + ",".join(['"'+x+'"' for x in hh]) + ")"
ail10r = set([x[0] for x in cc.execute("""SELECT taxa
FROM %s
WHERE group1 == "WT"
AND group2 == "aIL10R"
AND adj_P_Val < %f""" % (tablename, fdr)).fetchall()])
# sql list
ail10r = "(" + ",".join(['"'+x+'"' for x in ail10r]) + ")"
outf = open(outfile, "w")
for gene in cc.execute("""SELECT taxa
FROM %s
WHERE group1 == "HhaIL10R"
AND group2 == "WT"
AND adj_P_Val < %f
AND (logFC > %i OR logFC < -%i)
AND taxa IN %s
AND taxa NOT IN %s
AND taxa NOT IN %s
ORDER BY logFC DESC""" % (tablename, fdr, l2fold, l2fold, common, hh, ail10r)).fetchall():
outf.write(gene[0] + "\n")
outf.close()
####################################################
####################################################
####################################################
def heatmapDiffFeatures(diff_list,
matrix,
outfile):
'''
draw heatmap of differentially abundant features
'''
R('''library(gplots)''')
R('''library(gtools)''')
R('''diff <- read.csv("%s", header=F, sep="\t", stringsAsFactors=F)''' % diff_list)
R('''dat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % matrix)
R('''rownames(dat) <- dat$taxa''')
R('''dat <- dat[, 1:ncol(dat)-1]''')
R('''dat <- dat[diff[,1],]''')
R('''dat <- na.omit(dat)''')
R('''dat <- dat[, mixedsort(colnames(dat))]''')
R('''samples <- colnames(dat)''')
R('''dat <- t(apply(dat, 1, scale))''')
R('''colnames(dat) <- samples''')
R('''cols <- colorRampPalette(c("blue", "white", "red"))''')
R('''pdf("%s")''' % outfile)
R('''heatmap.2(as.matrix(dat), col = cols, scale = "row", trace = "none", Rowv = F, Colv = F, margins = c(15,15),
distfun = function(x) dist(x, method = "manhattan"),
hclustfun = function(x) hclust(x, method = "ward.D2"))''')
R["dev.off"]()
####################################################
####################################################
####################################################
def buildDiffGeneOverlap(dnafile, rnafile, outfile):
'''
overlap differentially abundant NOGs between
RNA and DNA data sets
'''
dna = set([x[:-1] for x in open(dnafile).readlines()])
rna = set([x[:-1] for x in open(rnafile).readlines()])
ndna = len(dna)
nrna = len(rna)
overlap = len(dna.intersection(rna))
outf = open(outfile, "w")
outf.write("nDNA\tnRNA\tnoverlap\n%(ndna)i\t%(nrna)i\t%(overlap)i\n" % locals())
outf.close()
####################################################
####################################################
####################################################
def testSignificanceOfOverlap(common, overlap, outfile):
'''
Test significance of overlapping lists
bewteen RNA and DNA using hypergeometric test
'''
R('''pop <- read.csv("%s", header = F, sep = "\t", stringsAsFactors = F)''' % common)
R('''overlaps <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % overlap)
# total genes in population
R('''npop <- nrow(pop)''')
# x = number of white balls picked = overlap
R('''x <- overlaps$noverlap''')
# m = total number of white balls = total diff in RNA analysis
R('''m <- overlaps$nRNA''')
# n = total number of black balls = total - diff in RNA analysis
R('''n <- npop - m''')
# k = total balls sampled = number of genera different in DNA analysis
R('''k <- overlaps$nDNA''')
# hypergeometric test
R('''p <- 1-phyper(x,m,n,k)''')
# write result
R('''res <- matrix(ncol = 2, nrow = 5)''')
R('''res[1,1] <- "x"''')
R('''res[2,1] <- "m"''')
R('''res[3,1] <- "n"''')
R('''res[4,1] <- "k"''')
R('''res[5,1] <- "p-value"''')
R('''res[1,2] <- x''')
R('''res[2,2] <- m''')
R('''res[3,2] <- n''')
R('''res[4,2] <- k''')
R('''res[5,2] <- p''')
R('''print(res)''')
R('''write.table(as.data.frame(res), file = "%s", quote = F, sep = "\t", row.names = F)''' % outfile)
####################################################
####################################################
####################################################
def scatterplotAbundanceEstimates(dnamatrix,
rnamatrix,
outfile):
'''
scatterplot abundance estimates between DNA and RNA
data sets
'''
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnamatrix)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)-1]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnamatrix)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)-1]''')
# intersection of taxa/NOGs present
R('''keep <- intersect(rownames(rna), rownames(dna))''')
# get data where there is rna and dna
R('''rna <- rna[keep,]''')
R('''dna <- dna[keep,]''')
# take averages
R('''rna.ave <- data.frame(apply(rna, 1, mean))''')
R('''dna.ave <- data.frame(apply(dna, 1, mean))''')
R('''print(cor(dna.ave,rna.ave)[[1]])''')
R('''png("%s")''' % outfile)
R('''plot(dna.ave[,1],
rna.ave[,1],
pch = 16,
col = "slateGrey",
xlab = "Mean DNA abundance",
ylab = "Mean RNA abundance",
main = paste("N = ", nrow(dna.ave), sep = ""))
abline(lm(rna[,1]~dna[,1], na.rm = T))''')
R["dev.off"]()
####################################################
####################################################
####################################################
def buildDetectionOverlap(rnacounts, dnacounts, outfile):
'''
build detection overlaps between RNA and DNA
data sets
'''
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnacounts)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,1:ncol(rna)]''')
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnacounts)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,1:ncol(dna)]''')
R('''taxa.rna <- rownames(rna)''')
R('''taxa.dna <- rownames(dna)''')
# union of taxa across samples
R('''nrna = length(taxa.rna)''')
R('''ndna = length(taxa.dna)''')
# get overlapping
R('''noverlap = length(intersect(taxa.rna, taxa.dna))''')
R('''result = data.frame(nrna = nrna, ndna = ndna, noverlap = noverlap)''')
R('''write.table(result, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
####################################################
####################################################
####################################################
def plotAbundanceLevelsOfOverlap(rnacounts,
dnacounts,
outfile,
of=None):
'''
plot abundance levels pf taxa/NOGs that do
and don't overlap between data sets
'''
R('''library(ggplot2)''')
# get rna reads per million
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnacounts)
R('''rownames(rna) <- rna$taxa''')
R('''rna <- rna[,2:ncol(rna)]''')
R('''rna <- sweep(rna, 2, colSums(rna)/1000000, "/")''')
# get dna reads per million
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnacounts)
R('''rownames(dna) <- dna$taxa''')
R('''dna <- dna[,2:ncol(dna)]''')
R('''dna <- sweep(dna, 2, colSums(dna)/1000000, "/")''')
# common and distinct sets
R('''common <- intersect(rownames(dna), rownames(rna))''')
R('''rna.only <- setdiff(rownames(rna), rownames(dna))''')
R('''dna.only <- setdiff(rownames(dna), rownames(rna))''')
# boxplot the abundance levels
R('''rna.common <- apply(rna[common,], 1, mean)''')
R('''dna.common <- apply(dna[common,], 1, mean)''')
R('''rna.distinct <- apply(rna[rna.only,], 1, mean)''')
R('''dna.distinct <- apply(dna[dna.only,], 1, mean)''')
if of == "genes":
# this is just so the thing will run
# genes do not have distinct genes
# in RNA analysis
R('''rna.distinct <- rep(0, 20)''')
else:
R('''rna.distinct <- rna.distinct''')
# test sig bewteen groups
R('''wtest1 <- wilcox.test(rna.common, rna.distinct)''')
R('''wtest2 <- wilcox.test(dna.common, dna.distinct)''')
R('''wtest3 <- wilcox.test(rna.common, dna.distinct)''')
R('''wtest4 <- wilcox.test(dna.common, rna.distinct)''')
R('''wtest5 <- wilcox.test(dna.common, rna.common)''')
R('''res <- data.frame("rna.common_vs_rna.distinct" = wtest1$p.value,
"dna.common_vs_dna.distinct" = wtest2$p.value,
"rna.common_vs_dna.distinct" = wtest3$p.value,
"dna.common_vs_rna.distinct" = wtest4$p.value,
"dna.common_vs_rna.common" = wtest5$p.value)''')
outname_sig = outfile[:-4] + ".sig"
R('''write.table(res, file = "%s", row.names = F, sep = "\t", quote = F)''' % outname_sig)
# create dataframe for plotting
R('''dat <- data.frame(values = c(dna.distinct, dna.common, rna.common, rna.distinct),
status = c(rep("unique.dna", length(dna.distinct)),
rep("common.dna", length(dna.common)),
rep("common.rna", length(rna.common)),
rep("unique.rna", length(rna.distinct))))''')
R('''plot1 <- ggplot(dat, aes(x = factor(status, levels = status), y = values, stat = "identity"))''')
R('''plot1 + geom_boxplot() + scale_y_log10()''')
R('''ggsave("%s")''' % outfile)
####################################################
####################################################
####################################################
# SECTION 3
####################################################
####################################################
####################################################
def runPCA(infile, outfile):
'''
run pca analysis - this outputs
a plot coloured by condition and
also the loadings
'''
if "RNA" in infile:
suffix = "rna"
else:
suffix = "dna"
if "gene" in infile:
xlim, ylim = 40,40
else:
xlim, ylim = 12,7
outname_plot = P.snip(outfile, ".loadings.tsv").replace("/", "/%s_" % suffix) + ".pca.pdf"
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
R('''rownames(dat) <- dat$taxa''')
R('''dat <- dat[, 1:ncol(dat)-1]''')
R('''pc <- prcomp(t(dat))''')
R('''conds <- unlist(strsplit(colnames(dat), ".R[0-9]"))[seq(1, ncol(dat)*2, 2)]''')
R('''conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]''')
# plot the principle components
R('''library(ggplot2)''')
R('''pcs <- data.frame(pc$x)''')
R('''pcs$cond <- conds''')
# get variance explained
R('''imps <- c(summary(pc)$importance[2], summary(pc)$importance[5])''')
R('''p <- ggplot(pcs, aes(x = PC1, y = PC2, colour = cond, size = 3)) + geom_point()''')
R('''p2 <- p + xlab(imps[1]) + ylab(imps[2])''')
R('''p3 <- p2 + scale_colour_manual(values = c("slateGrey", "green", "red", "blue"))''')
R('''p3 + xlim(c(-%i, %i)) + ylim(c(-%i, %i))''' % (xlim, xlim, ylim, ylim))
R('''ggsave("%s")''' % outname_plot)
# get the loadings
R('''loads <- data.frame(pc$rotation)''')
R('''loads$taxa <- rownames(loads)''')
# write out data
R('''write.table(loads, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile.replace("/", "/%s_" % suffix))
####################################################
####################################################
####################################################
def plotPCALoadings(infile, outfile):
'''
plot PCA loadings
'''
R('''library(ggplot2)''')
R('''library(grid)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
R('''top5pc1 <- dat[order(-dat$PC1),][1:5,]''')
R('''bottom5pc1 <- dat[order(dat$PC1),][1:5,]''')
R('''top5pc2 <- dat[order(-dat$PC2),][1:5,]''')
R('''bottom5pc2 <- dat[order(dat$PC2),][1:5,]''')
R('''totext <- data.frame(rbind(top5pc1, bottom5pc1, top5pc2, bottom5pc2))''')
R('''dat$x <- 0''')
R('''dat$y <- 0''')
R('''p <- ggplot(dat, aes(x = x, y = y, xend = PC1, yend = PC2, colour = taxa))''')
R('''p2 <- p + geom_segment(arrow = arrow(length = unit(0.2, "cm")))''')
R('''p2 + geom_text(data = totext, aes(x = PC1, y = PC2, label = totext$taxa, size = 6)) + xlim(c(-0.5,0.5)) + ylim(c(-0.5,0.25))''')
R('''ggsave("%s")''' % outfile)
# rna = [x for x in infiles if "RNA" in x][0]
# dna = [x for x in infiles if "DNA" in x][0]
# R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rna)
# R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dna)
# R('''rna <- rna[rna$group1 == "HhaIL10R" & rna$group2 == "WT",]''')
# R('''dna <- dna[dna$group1 == "HhaIL10R" & dna$group2 == "WT",]''')
# R('''rownames(rna) <- rna$taxa''')
# R('''rownames(dna) <- dna$taxa''')
# R('''rna <- rna[,1:ncol(rna)-1]''')
# R('''dna <- dna[,1:ncol(dna)-1]''')
# # only look at those that are present in both
# R('''keep <- intersect(rownames(rna), rownames(dna))''')
# R('''rna <- rna[keep,]''')
# R('''dna <- dna[keep,]''')
# R('''rna.ratio <- rna$logFC''')
# R('''dna.ratio <- dna$logFC''')
# R('''rna.p <- rna$adj.P.Val''')
# R('''dna.p <- dna$adj.P.Val''')
# R('''ratio <- data.frame(gene = keep, dna = dna.ratio, rna = rna.ratio, pdna = dna.p, prna = rna.p, ratio = rna.ratio - dna.ratio)''')
# R('''write.table(ratio, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile)
####################################################
####################################################
####################################################
def barchartProportions(infile, outfile):
'''
stacked barchart description of percent reads
mapping to each taxon
'''
R('''library(ggplot2)''')
R('''library(gtools)''')
R('''library(reshape)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
R('''rownames(dat) <- dat$taxa''')
# get rid of taxa colomn
R('''dat <- dat[,1:ncol(dat)-1]''')
R('''dat.percent <- data.frame(apply(dat, 2, function(x) x*100))''')
# candidate genera
R('''candidates <- c("Peptoniphilus",
"Deferribacter",
"Escherichia",
"Lactobacillus",
"Turicibacter",
"Akkermansia",
"Bifidobacterium",
"Methylacidiphilum")''')
R('''dat.percent <- dat.percent[candidates,]''')
R('''dat.percent <- dat.percent[,mixedsort(colnames(dat.percent))]''')
# add taxa column with "other" = < 5% in any sample
R('''dat.percent$taxa <- rownames(dat.percent)''')
# reshape and plot
outname = P.snip(outfile, ".pdf")
R('''dat.percent <- melt(dat.percent)''')
R('''conds <- unlist(strsplit(as.character(dat.percent$variable), ".R[0-9]"))[seq(1, nrow(dat.percent)*2, 2)]''')
R('''conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]''')
R('''dat.percent$cond <- conds''')
R('''for (taxon in candidates){
outname <- paste("%s", paste("_", taxon, sep=""), ".pdf", sep="")
dat.percent.restrict <- dat.percent[dat.percent$taxa==taxon,]
plot1 <- ggplot(dat.percent.restrict,
aes(x=factor(cond, levels=c("WT","aIL10R", "Hh", "HhaIL10R")),
y=value, group=cond, colour=cond, label=variable))
plot1 + geom_boxplot() + geom_jitter() + geom_text() + scale_colour_manual(values=c("darkGreen", "red", "grey", "blue"))
ggsave(outname)}''' % outname)
####################################################
####################################################
####################################################
# SECTION 4
####################################################
####################################################
####################################################
def buildRNADNARatio(dnadiff, rnadiff, outfile):
'''
build ratio of RNAfold/DNAfold
'''
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % rnadiff)
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % dnadiff)
R('''rna <- rna[rna$group1 == "HhaIL10R" & rna$group2 == "WT",]''')
R('''dna <- dna[dna$group1 == "HhaIL10R" & dna$group2 == "WT",]''')
R('''rownames(rna) <- rna$taxa''')
R('''rownames(dna) <- dna$taxa''')
R('''rna <- rna[,1:ncol(rna)-1]''')
R('''dna <- dna[,1:ncol(dna)-1]''')
# only look at those that are present in both
R('''keep <- intersect(rownames(rna), rownames(dna))''')
R('''rna <- rna[keep,]''')
R('''dna <- dna[keep,]''')
R('''rna.ratio <- rna$logFC''')
R('''dna.ratio <- dna$logFC''')
R('''rna.p <- rna$adj.P.Val''')
R('''dna.p <- dna$adj.P.Val''')
R('''ratio <- data.frame(gene = keep,
dna = dna.ratio,
rna = rna.ratio,
pdna = dna.p,
prna = rna.p,
ratio = rna.ratio - dna.ratio)''')
R('''write.table(ratio,
file = "%s",
sep = "\t",
row.names = F,
quote = F)''' % outfile)
####################################################
####################################################
####################################################
def annotateRNADNARatio(RNADNARatio,
dnalist,
rnalist,
outfile):
'''
annotate NOGs as to whether they were differentially
regulated in metagenomic, metatranscriptomic or both
data sets
'''
rna_diff = set([y[:-1] for y in open(rnalist).readlines()])
dna_diff = set([y[:-1] for y in open(dnalist).readlines()])
inf = IOTools.openFile(RNADNARatio)
inf.readline()
outf = IOTools.openFile(outfile, "w")
outf.write("gene\tdna\trna\tpdna\tprna\tratio\tstatus\n")
for line in inf.readlines():
gene, dna, rna, pdna, prna, ratio = line[:-1].split("\t")
gene = gene.strip('"')
dna, rna = float(dna), float(rna)
if gene in rna_diff and gene in dna_diff and dna > 0 and rna > 0:
status = "up.both"
elif gene in rna_diff and gene in dna_diff and dna < 0 and rna < 0:
status = "down.both"
elif gene in rna_diff and rna > 0:
status = "up.RNA"
elif gene in rna_diff and rna < 0:
status = "down.RNA"
elif gene in dna_diff and dna > 0:
status = "up.DNA"
elif gene in dna_diff and dna < 0:
status = "down.DNA"
else:
status = "NS"
outf.write("%(gene)s\t%(dna)s\t%(rna)s\t%(pdna)s\t%(prna)s\t%(ratio)s\t%(status)s\n" % locals())
outf.close()
####################################################
####################################################
####################################################
def plotSets(infile, outfile):
'''
plot the fold changes in RNA and DNA analyses
and label by how they are regulated in DNA and
RNA analyses
MUST HAVE GOI FILE IN WORKING DIR - not ideal
'''
R('''library(ggplot2)''')
# read in data
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
# get nog 2 gene map
R('''cog2gene <- read.csv("goi.tsv", header = F, stringsAsFactors = F, sep = "\t", row.names = 1)''')
# just get those signficant in either DNA or RNA or both
R('''dat$status[dat$status == "NS"] = "z"''')
R('''genes <- dat$gene''')
# regression model
R('''mod1 <- lm(dat$rna~dat$dna)''')
R('''intercept <- mod1[[1]][1]''')
R('''slope = mod1[[1]][2]''')
R('''print(summary(mod1))''')
# prediction intervals
R('''pred.ints <- predict(mod1, interval = "prediction", level = 0.95)''')
# add to data.frame
R('''dat$lwr <- pred.ints[,2]''')
R('''dat$upr <- pred.ints[,3]''')
# add labels
R('''dat$goi <- cog2gene[dat$gene,]''')
R('''dat$pointsize <- ifelse(!(is.na(dat$goi)), 10, 1)''')
# plot
R('''plot1 <- ggplot(dat, aes(x = dna, y = rna, alpha = 1, colour = status))''')
R('''plot2 <- plot1 + geom_point(shape = 18, aes(size = pointsize))''')
R('''plot3 <- plot2 + scale_size_area() + xlim(c(-5,5))''')
R('''plot4 <- plot3 + scale_colour_manual(values = c("blue",
"brown",
"darkGreen",
"orange",
"purple",
"red",
"grey"))''')
R('''plot5 <- plot4 + geom_abline(intercept = intercept, slope = slope)''')
# prediction intervals
R('''plot6 <- plot5 + geom_line(aes(x = dna, y = lwr), linetype = "dashed", colour = "black")''')
R('''plot7 <- plot6 + geom_line(aes(x = dna, y = upr), linetype = "dashed", colour = "black")''')
R('''plot7 + geom_text(aes(label = goi))''')
R('''ggsave("%s")''' % outfile)
####################################################
####################################################
####################################################
def buildGenesOutsidePredictionInterval(infile, outfile):
'''
annotate genes as being outside prediction
interval - these are the NOGs that we are
defining as colitis-responsive
'''
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
# just get those signficant in either DNA or RNA or both
R('''genes <- dat$gene''')
# regression model
R('''mod1 <- lm(dat$rna~dat$dna)''')
# prediction intervals
R('''pred.ints <- predict(mod1, interval = "prediction", level = 0.95)''')
# add to data.frame
R('''dat$lwr <- pred.ints[,2]''')
R('''dat$upr <- pred.ints[,3]''')
# annotate with whether or not they are above
# prediction intervals
R('''dat$pi_status[dat$rna > dat$upr & dat$status == "up.RNA"] <- "diff.up.rna"''')
R('''dat$pi_status[dat$rna > dat$upr & dat$status == "down.DNA"] <- "diff.down.dna"''')
R('''dat$pi_status[dat$rna > dat$upr & dat$status == "up.both"] <- "diff.up.rna"''')
R('''dat$pi_status[dat$rna < dat$lwr & dat$status == "down.RNA"] <- "diff.down.rna"''')
R('''dat$pi_status[dat$rna < dat$lwr & dat$status == "up.DNA"] <- "diff.up.dna"''')
R('''dat$pi_status[dat$rna < dat$lwr & dat$status == "down.both"] <- "diff.down.rna"''')
# write results
R('''write.table(dat, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
####################################################
####################################################
####################################################
# SECTION 6
####################################################
####################################################
####################################################
def buildGenusCogCountsMatrix(infile, outfile):
'''
build cog x genus proportion
matrix
'''
inf = IOTools.openFile(infile)
header = inf.readline()
result = {}
# create container for results
for line in inf.readlines():
data = line[:-1].split("\t")
cog, taxa = data[0], data[1]
if taxa == "unassigned": continue
result[cog] = {}
# get average % taxa per cog
inf = IOTools.openFile(infile)
header = inf.readline()
for line in inf.readlines():
data = line[:-1].split("\t")
if len(data) == 19:
cog, taxa = data[0], data[1]
values = map(float,data[3:])
elif len(data) == 20:
cog, taxa = data[0], data[1]
values = map(float,data[4:])
else:
cog, taxa = data[0], data[1]
values = map(float,data[2:])
if taxa == "unassigned": continue
ave = np.mean(values)
try:
result[cog][taxa] = ave
except KeyError: continue
df = pandas.DataFrame(result)
df.to_csv(outfile, sep = "\t", na_rep = 0)
####################################################
####################################################
####################################################
def mergePathwaysAndGenusCogCountsMatrix(annotations,
matrix,
outfile):
'''
merge cog annotations and per taxa cog counts
'''
# read annotations
R('''anno <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t", row.names=1)''' % annotations)
R('''anno.no.pathways <- anno[,1:ncol(anno)-1]''')
R('''anno.p <- sweep(anno.no.pathways, 2, colSums(anno.no.pathways), "/")''')
R('''anno.p$average <- rowMeans(anno.p)''')
R('''anno.p$pathway <- anno$taxa''')
# read matrix
R('''mat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t", row.names=1)''' % matrix)
R('''mat <- data.frame(t(mat))''')
R('''mat$ref <- rownames(mat)''')
# split pathway annotations
R('''for (pathway in unique(anno.p$pathway)){
if (pathway == "Function unknown"){next}
# some weirness with some names
pw <- gsub("/", "_", pathway)
outname <- paste("candidate_pathways.dir", paste(pw, "tsv", sep = "."), sep="/")
outname <- gsub(" ", "_", outname)
print(outname)
anno.p2 <- anno.p[anno.p$pathway == pathway,]
anno.p2 <- anno.p2[order(anno.p2$average, decreasing=T),]
# top 10
# anno.p2 <- anno.p2[1:10,]
# merge with matrix
mat2 <- mat[rownames(anno.p2),]
mat2$pathway <- anno.p2$pathway
write.table(mat2, file=outname, sep="\t", row.names=F)}''')
####################################################
####################################################
####################################################
def plotNumberOfTaxaPerPathway(infiles, outfile):
'''
plot the average number of taxa expressing genes
in each pathway
'''
tmp = P.getTempFilename(".")
infs = " ".join(infiles)
statement = '''awk 'FNR==1 && NR!=1 { while (/ref/) getline; }1 {print}' %(infs)s > %(tmp)s'''
P.run()
R('''library(ggplot2)''')
R('''library(plyr)''')
R('''library(reshape)''')
R('''dat <-read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % tmp)
R('''t <- ncol(dat)''')
R('''dat <- na.omit(dat)''')
R('''pathways <- dat$pathway''')
R('''dat2 <- dat[,1:ncol(dat)-1]''')
R('''dat2 <- dat2[,1:ncol(dat2)-1]''')
# colsums gives the total number of taxa expressing each NOG
R('''col.sums <- data.frame(t(sapply(split(dat2, pathways), colSums)))''')
R('''rownames(col.sums) <- unique(pathways)''')
# rowsums gives the total number of taxa expressing
# at least one NOG per pathway
R('''total.taxa <- data.frame(rowSums(col.sums > 0))''')
R('''total.taxa$pathway <- rownames(col.sums)''')
# sort by highest
R('''total.taxa <- total.taxa[order(total.taxa[,1], decreasing=T), ]''')
R('''colnames(total.taxa) <- c("value", "pathway")''')
R('''plot1 <- ggplot(total.taxa, aes(x=factor(pathway,levels=pathway), y=value/t, stat="identity"))''')
R('''plot1 + geom_bar(stat="identity") + theme(axis.text.x=element_text(angle=90))''')
R('''ggsave("%s")''' % outfile)
os.unlink(tmp)
####################################################
####################################################
####################################################
def plotTaxaContributionsToCandidatePathways(matrix,
outfile):
'''
plot the distribution of maximum genus
contribution per gene set
'''
R('''library(ggplot2)''')
R('''library(gplots)''')
R('''library(pheatmap)''')
R('''mat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % matrix)
R('''mat <- na.omit(mat)''')
R('''print(mat$ref)''')
# just plot top 10
R('''rownames(mat) <- mat$ref''')
R('''mat2 <- mat[,1:ncol(mat)-1]''')
R('''mat2 <- mat2[,1:ncol(mat2)-1]''')
# only keep those genera that contribute > 5% to
# a NOG
R('''mat2 <- mat2[,colSums(mat2) > 5]''')
R('''cols <- colorRampPalette(c("white", "blue"))(75)''')
R('''pdf("%s")''' % outfile)
R('''pheatmap(mat2,
color=cols,
cluster_cols=T,
cluster_rows=T,
cluster_method="ward.D2")''')
R["dev.off"]()
####################################################
####################################################
####################################################
def plotMaxTaxaContribution(matrix, annotations, outfile):
'''
plot the distribution of maximum genus
contribution per gene set
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % matrix)
R('''annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % annotations)
R('''maximums <- apply(dat, 2, max)''')
R('''dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)''')
R('''dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")''')
R('''dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)''')
R('''dat3$pi_status[is.na(dat3$pi_status)] <- "other_significant"''')
R('''plot1 <- ggplot(dat3, aes(x = as.numeric(as.character(max)), group = pi_status, colour = pi_status))''')
R('''plot2 <- plot1 + stat_ecdf(size = 1.1)''')
R('''plot2 + scale_colour_manual(values = c("cyan3",
"darkorchid",
"black",
"darkgoldenrod2",
"grey",
"darkBlue"))''')
R('''ggsave("%s")''' % outfile)
####################################################
####################################################
####################################################
def testSignificanceOfMaxTaxaContribution(matrix, annotations, outfile):
'''
Test significance of distribution differences. Compared to NS
group
'''
R('''library(ggplot2)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % matrix)
R('''annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % annotations)
R('''maximums <- apply(dat, 2, max)''')
R('''dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)''')
R('''dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")''')
R('''dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)''')
R('''diff.up.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.rna"]))''')
R('''diff.down.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.rna"]))''')
R('''diff.up.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.dna"]))''')
R('''diff.down.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.dna"]))''')
R('''ns <- as.numeric(as.character(dat3$max[dat3$pi_status == "NS"]))''')
# ks tests
R('''ks1 <- ks.test(diff.up.rna, ns)''')
R('''ks2 <- ks.test(diff.down.rna, ns)''')
R('''ks3 <- ks.test(diff.up.dna, ns)''')
R('''ks4 <- ks.test(diff.down.dna, ns)''')
R('''res <- data.frame("RNAGreaterThanDNA.up.pvalue" = ks1$p.value,
"RNAGreaterThanDNA.up.D" = ks1$statistic,
"RNAGreaterThanDNA.down.pvalue" = ks2$p.value,
"RNAGreaterThanDNA.down.D" = ks2$statistic,
"DNAGreaterThanRNA.up.pvalue" = ks3$p.value,
"DNAGreaterThanRNA.up.D" = ks3$statistic,
"DNAGreaterThanRNA.down.pvalue" = ks4$p.value,
"DNAGreaterThanRNA.down.D" = ks4$statistic)''')
R('''write.table(res, file = "%s", sep = "\t", quote = F, row.names = F)''' % outfile)
####################################################
####################################################
####################################################
def heatmapTaxaCogProportionMatrix(matrix, annotations, outfile):
'''
plot the taxa associated with each cog on
a heatmap
'''
R('''library(gplots)''')
R('''library(gtools)''')
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t", row.names = 1)''' % matrix)
R('''annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % annotations)
R('''rownames(annotations) <- annotations$gene''')
# get genes present in both - not sure why these are different
# in the first place - need to check
R('''genes <- intersect(rownames(annotations), colnames(dat))''')
R('''dat <- dat[, genes]''')
R('''dat <- dat[grep("unassigned", rownames(dat), invert = T),]''')
R('''genera <- rownames(dat)''')
R('''rownames(dat) <- genera''')
R('''colnames(dat) <- genes''')
R('''annotations <- annotations[genes,]''')
R('''annotations <- annotations[order(annotations$pi_status),]''')
# only for the COGs that have RNA fold > DNA fold up-regulated
R('''annotations <- annotations[annotations$pi_status == "diff.up.rna",]''')
R('''annotations <- na.omit(annotations)''')
R('''dat <- dat[,rownames(annotations)]''')
R('''annotation <- data.frame(cluster = as.character(annotations$pi_status))''')
R('''rownames(annotation) <- rownames(annotations)''')
R('''colors1 <- c("grey")''')
R('''names(colors1) <- c("diff.up.rna")''')
R('''anno_colors <- list(cluster = colors1)''')
R('''cols <- colorRampPalette(c("white", "darkBlue"))(150)''')
R('''dat <- dat[,colSums(dat > 50) >= 1]''')
R('''dat <- dat[rowSums(dat > 10) >= 1,]''')
# not reading numeric in all instances
R('''dat2 <- data.frame(t(apply(dat, 1, as.numeric)))''')
R('''colnames(dat2) <- colnames(dat)''')
R('''pdf("%s", height = 10, width = 15)''' % outfile)
R('''library(pheatmap)''')
R('''pheatmap(dat2,
clustering_distance_cols = "manhattan",
clustering_method = "ward",
annotation = annotation,
annotation_colors = anno_colors,
cluster_rows = T,
cluster_cols = F,
color = cols,
fontsize = 8)''')
R["dev.off"]()
####################################################
####################################################
####################################################
def scatterplotPerCogTaxaDNAFoldRNAFold(taxa_cog_rnadiff,
taxa_cog_dnadiff,
cog_rnadiff,
cog_dnadiff):
'''
scatterplot fold changes for per genus cog
differences for NOGs of interestx
'''
R('''library(ggplot2)''')
# read in cogs + taxa
R('''dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % taxa_cog_dnadiff)
R('''dna <- dna[dna$group2 == "WT" & dna$group1 == "HhaIL10R",]''')
R('''rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % taxa_cog_rnadiff)
R('''rna <- rna[rna$group2 == "WT" & rna$group1 == "HhaIL10R",]''')
# read in cogs alone
R('''dna.cog <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % cog_dnadiff)
R('''dna.cog <- dna.cog[dna.cog$group2 == "WT" & dna.cog$group1 == "HhaIL10R",]''')
R('''rna.cog <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % cog_rnadiff)
R('''rna.cog <- rna.cog[rna.cog$group2 == "WT" & rna.cog$group1 == "HhaIL10R",]''')
# merge data for cogs + taxa
R('''dat <- merge(dna, rna,
by.x = "taxa",
by.y = "taxa",
all.x = T,
all.y = T,
suffixes = c(".dna.taxa.cog", ".rna.taxa.cog"))''')
# sub NA for 0
R('''dat[is.na(dat)] <- 0''')
# NOTE these are specified and hardcoded
# here - NOGs of interest
R('''cogs <- c("COG0783", "COG2837", "COG0435","COG5520", "COG0508", "COG0852")''')
# iterate over cogs and scatterplot
# fold changes in DNA and RNA analysis.
# if not present in one or other then fold change will
# be 0
R('''for (cog in cogs){
dat2 <- dat[grep(cog, dat$taxa),]
dna.cog2 <- dna.cog[grep(cog, dna.cog$taxa),]
rna.cog2 <- rna.cog[grep(cog, rna.cog$taxa),]
# add the data for COG fold changes and abundance
dat3 <- data.frame("genus" = append(dat2$taxa, cog),
"dna.fold" = append(dat2$logFC.dna.taxa.cog, dna.cog2$logFC),
"rna.fold" = append(dat2$logFC.rna.taxa.cog, rna.cog2$logFC),
"abundance" = append(dat2$AveExpr.rna.taxa.cog, rna.cog2$AveExpr))
suffix <- paste(cog, "scatters.pdf", sep = ".")
outname <- paste("scatterplot_genus_cog_fold.dir", suffix, sep = "/")
plot1 <- ggplot(dat3, aes(x = dna.fold, y = rna.fold, size = log10(abundance), label = genus))
plot2 <- plot1 + geom_point(shape = 18)
plot3 <- plot2 + geom_text(hjust = 0.5, vjust = 1) + scale_size(range = c(3,6))
plot4 <- plot3 + geom_abline(intercept = 0, slope = 1, colour = "blue")
plot5 <- plot4 + geom_hline(yintercept = c(-1,1), linetype = "dashed")
plot6 <- plot5 + geom_vline(xintercept = c(-1,1), linetype = "dashed")
plot7 <- plot6 + geom_hline(yintercept = 0) + geom_vline(xintercept = 0)
ggsave(outname)
}''')
|
[
"numpy.mean",
"CGATPipelines.Pipeline.run",
"sqlite3.connect",
"CGATPipelines.Pipeline.snip",
"itertools.product",
"CGATPipelines.Pipeline.getTempFilename",
"os.unlink",
"os.path.basename",
"pandas.DataFrame",
"CGAT.IOTools.openFile",
"rpy2.robjects.r"
] |
[((2859, 2881), 'sqlite3.connect', 'sqlite3.connect', (['rnadb'], {}), '(rnadb)\n', (2874, 2881), False, 'import sqlite3\n'), ((2926, 2948), 'sqlite3.connect', 'sqlite3.connect', (['dnadb'], {}), '(dnadb)\n', (2941, 2948), False, 'import sqlite3\n'), ((4401, 4420), 'sqlite3.connect', 'sqlite3.connect', (['db'], {}), '(db)\n', (4416, 4420), False, 'import sqlite3\n'), ((6211, 6231), 'rpy2.robjects.r', 'R', (['"""library(gplots)"""'], {}), "('library(gplots)')\n", (6212, 6231), True, 'from rpy2.robjects import r as R\n'), ((6240, 6260), 'rpy2.robjects.r', 'R', (['"""library(gtools)"""'], {}), "('library(gtools)')\n", (6241, 6260), True, 'from rpy2.robjects import r as R\n'), ((6270, 6349), 'rpy2.robjects.r', 'R', (['(\'diff <- read.csv("%s", header=F, sep="\\t", stringsAsFactors=F)\' % diff_list)'], {}), '(\'diff <- read.csv("%s", header=F, sep="\\t", stringsAsFactors=F)\' % diff_list)\n', (6271, 6349), True, 'from rpy2.robjects import r as R\n'), ((6359, 6434), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\\t")\' % matrix)'], {}), '(\'dat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\\t")\' % matrix)\n', (6360, 6434), True, 'from rpy2.robjects import r as R\n'), ((6443, 6473), 'rpy2.robjects.r', 'R', (['"""rownames(dat) <- dat$taxa"""'], {}), "('rownames(dat) <- dat$taxa')\n", (6444, 6473), True, 'from rpy2.robjects import r as R\n'), ((6482, 6514), 'rpy2.robjects.r', 'R', (['"""dat <- dat[, 1:ncol(dat)-1]"""'], {}), "('dat <- dat[, 1:ncol(dat)-1]')\n", (6483, 6514), True, 'from rpy2.robjects import r as R\n'), ((6523, 6549), 'rpy2.robjects.r', 'R', (['"""dat <- dat[diff[,1],]"""'], {}), "('dat <- dat[diff[,1],]')\n", (6524, 6549), True, 'from rpy2.robjects import r as R\n'), ((6558, 6582), 'rpy2.robjects.r', 'R', (['"""dat <- na.omit(dat)"""'], {}), "('dat <- na.omit(dat)')\n", (6559, 6582), True, 'from rpy2.robjects import r as R\n'), ((6591, 6634), 'rpy2.robjects.r', 'R', (['"""dat <- dat[, mixedsort(colnames(dat))]"""'], {}), "('dat <- dat[, mixedsort(colnames(dat))]')\n", (6592, 6634), True, 'from rpy2.robjects import r as R\n'), ((6643, 6672), 'rpy2.robjects.r', 'R', (['"""samples <- colnames(dat)"""'], {}), "('samples <- colnames(dat)')\n", (6644, 6672), True, 'from rpy2.robjects import r as R\n'), ((6681, 6716), 'rpy2.robjects.r', 'R', (['"""dat <- t(apply(dat, 1, scale))"""'], {}), "('dat <- t(apply(dat, 1, scale))')\n", (6682, 6716), True, 'from rpy2.robjects import r as R\n'), ((6725, 6754), 'rpy2.robjects.r', 'R', (['"""colnames(dat) <- samples"""'], {}), "('colnames(dat) <- samples')\n", (6726, 6754), True, 'from rpy2.robjects import r as R\n'), ((6763, 6819), 'rpy2.robjects.r', 'R', (['"""cols <- colorRampPalette(c("blue", "white", "red"))"""'], {}), '(\'cols <- colorRampPalette(c("blue", "white", "red"))\')\n', (6764, 6819), True, 'from rpy2.robjects import r as R\n'), ((6828, 6852), 'rpy2.robjects.r', 'R', (['(\'pdf("%s")\' % outfile)'], {}), '(\'pdf("%s")\' % outfile)\n', (6829, 6852), True, 'from rpy2.robjects import r as R\n'), ((6861, 7138), 'rpy2.robjects.r', 'R', (['"""heatmap.2(as.matrix(dat), col = cols, scale = "row", trace = "none", Rowv = F, Colv = F, margins = c(15,15), \n distfun = function(x) dist(x, method = "manhattan"),\n hclustfun = function(x) hclust(x, method = "ward.D2"))"""'], {}), '("""heatmap.2(as.matrix(dat), col = cols, scale = "row", trace = "none", Rowv = F, Colv = F, margins = c(15,15), \n distfun = function(x) dist(x, method = "manhattan"),\n hclustfun = function(x) hclust(x, method = "ward.D2"))"""\n )\n', (6862, 7138), True, 'from rpy2.robjects import r as R\n'), ((8121, 8206), 'rpy2.robjects.r', 'R', (['(\'pop <- read.csv("%s", header = F, sep = "\\t", stringsAsFactors = F)\' % common\n )'], {}), '(\'pop <- read.csv("%s", header = F, sep = "\\t", stringsAsFactors = F)\' %\n common)\n', (8122, 8206), True, 'from rpy2.robjects import r as R\n'), ((8211, 8308), 'rpy2.robjects.r', 'R', (['(\'overlaps <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n overlap)'], {}), '(\n \'overlaps <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\'\n % overlap)\n', (8212, 8308), True, 'from rpy2.robjects import r as R\n'), ((8340, 8362), 'rpy2.robjects.r', 'R', (['"""npop <- nrow(pop)"""'], {}), "('npop <- nrow(pop)')\n", (8341, 8362), True, 'from rpy2.robjects import r as R\n'), ((8421, 8448), 'rpy2.robjects.r', 'R', (['"""x <- overlaps$noverlap"""'], {}), "('x <- overlaps$noverlap')\n", (8422, 8448), True, 'from rpy2.robjects import r as R\n'), ((8525, 8548), 'rpy2.robjects.r', 'R', (['"""m <- overlaps$nRNA"""'], {}), "('m <- overlaps$nRNA')\n", (8526, 8548), True, 'from rpy2.robjects import r as R\n'), ((8627, 8645), 'rpy2.robjects.r', 'R', (['"""n <- npop - m"""'], {}), "('n <- npop - m')\n", (8628, 8645), True, 'from rpy2.robjects import r as R\n'), ((8730, 8753), 'rpy2.robjects.r', 'R', (['"""k <- overlaps$nDNA"""'], {}), "('k <- overlaps$nDNA')\n", (8731, 8753), True, 'from rpy2.robjects import r as R\n'), ((8789, 8816), 'rpy2.robjects.r', 'R', (['"""p <- 1-phyper(x,m,n,k)"""'], {}), "('p <- 1-phyper(x,m,n,k)')\n", (8790, 8816), True, 'from rpy2.robjects import r as R\n'), ((8845, 8883), 'rpy2.robjects.r', 'R', (['"""res <- matrix(ncol = 2, nrow = 5)"""'], {}), "('res <- matrix(ncol = 2, nrow = 5)')\n", (8846, 8883), True, 'from rpy2.robjects import r as R\n'), ((8892, 8912), 'rpy2.robjects.r', 'R', (['"""res[1,1] <- "x\\""""'], {}), '(\'res[1,1] <- "x"\')\n', (8893, 8912), True, 'from rpy2.robjects import r as R\n'), ((8921, 8941), 'rpy2.robjects.r', 'R', (['"""res[2,1] <- "m\\""""'], {}), '(\'res[2,1] <- "m"\')\n', (8922, 8941), True, 'from rpy2.robjects import r as R\n'), ((8950, 8970), 'rpy2.robjects.r', 'R', (['"""res[3,1] <- "n\\""""'], {}), '(\'res[3,1] <- "n"\')\n', (8951, 8970), True, 'from rpy2.robjects import r as R\n'), ((8979, 8999), 'rpy2.robjects.r', 'R', (['"""res[4,1] <- "k\\""""'], {}), '(\'res[4,1] <- "k"\')\n', (8980, 8999), True, 'from rpy2.robjects import r as R\n'), ((9008, 9034), 'rpy2.robjects.r', 'R', (['"""res[5,1] <- "p-value\\""""'], {}), '(\'res[5,1] <- "p-value"\')\n', (9009, 9034), True, 'from rpy2.robjects import r as R\n'), ((9043, 9061), 'rpy2.robjects.r', 'R', (['"""res[1,2] <- x"""'], {}), "('res[1,2] <- x')\n", (9044, 9061), True, 'from rpy2.robjects import r as R\n'), ((9070, 9088), 'rpy2.robjects.r', 'R', (['"""res[2,2] <- m"""'], {}), "('res[2,2] <- m')\n", (9071, 9088), True, 'from rpy2.robjects import r as R\n'), ((9097, 9115), 'rpy2.robjects.r', 'R', (['"""res[3,2] <- n"""'], {}), "('res[3,2] <- n')\n", (9098, 9115), True, 'from rpy2.robjects import r as R\n'), ((9124, 9142), 'rpy2.robjects.r', 'R', (['"""res[4,2] <- k"""'], {}), "('res[4,2] <- k')\n", (9125, 9142), True, 'from rpy2.robjects import r as R\n'), ((9151, 9169), 'rpy2.robjects.r', 'R', (['"""res[5,2] <- p"""'], {}), "('res[5,2] <- p')\n", (9152, 9169), True, 'from rpy2.robjects import r as R\n'), ((9178, 9193), 'rpy2.robjects.r', 'R', (['"""print(res)"""'], {}), "('print(res)')\n", (9179, 9193), True, 'from rpy2.robjects import r as R\n'), ((9202, 9309), 'rpy2.robjects.r', 'R', (['(\'write.table(as.data.frame(res), file = "%s", quote = F, sep = "\\t", row.names = F)\'\n % outfile)'], {}), '(\n \'write.table(as.data.frame(res), file = "%s", quote = F, sep = "\\t", row.names = F)\'\n % outfile)\n', (9203, 9309), True, 'from rpy2.robjects import r as R\n'), ((9690, 9778), 'rpy2.robjects.r', 'R', (['(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n rnamatrix)'], {}), '(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n rnamatrix)\n', (9691, 9778), True, 'from rpy2.robjects import r as R\n'), ((9783, 9813), 'rpy2.robjects.r', 'R', (['"""rownames(rna) <- rna$taxa"""'], {}), "('rownames(rna) <- rna$taxa')\n", (9784, 9813), True, 'from rpy2.robjects import r as R\n'), ((9822, 9853), 'rpy2.robjects.r', 'R', (['"""rna <- rna[,1:ncol(rna)-1]"""'], {}), "('rna <- rna[,1:ncol(rna)-1]')\n", (9823, 9853), True, 'from rpy2.robjects import r as R\n'), ((9862, 9950), 'rpy2.robjects.r', 'R', (['(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n dnamatrix)'], {}), '(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n dnamatrix)\n', (9863, 9950), True, 'from rpy2.robjects import r as R\n'), ((9955, 9985), 'rpy2.robjects.r', 'R', (['"""rownames(dna) <- dna$taxa"""'], {}), "('rownames(dna) <- dna$taxa')\n", (9956, 9985), True, 'from rpy2.robjects import r as R\n'), ((9994, 10025), 'rpy2.robjects.r', 'R', (['"""dna <- dna[,1:ncol(dna)-1]"""'], {}), "('dna <- dna[,1:ncol(dna)-1]')\n", (9995, 10025), True, 'from rpy2.robjects import r as R\n'), ((10075, 10127), 'rpy2.robjects.r', 'R', (['"""keep <- intersect(rownames(rna), rownames(dna))"""'], {}), "('keep <- intersect(rownames(rna), rownames(dna))')\n", (10076, 10127), True, 'from rpy2.robjects import r as R\n'), ((10179, 10201), 'rpy2.robjects.r', 'R', (['"""rna <- rna[keep,]"""'], {}), "('rna <- rna[keep,]')\n", (10180, 10201), True, 'from rpy2.robjects import r as R\n'), ((10210, 10232), 'rpy2.robjects.r', 'R', (['"""dna <- dna[keep,]"""'], {}), "('dna <- dna[keep,]')\n", (10211, 10232), True, 'from rpy2.robjects import r as R\n'), ((10270, 10317), 'rpy2.robjects.r', 'R', (['"""rna.ave <- data.frame(apply(rna, 1, mean))"""'], {}), "('rna.ave <- data.frame(apply(rna, 1, mean))')\n", (10271, 10317), True, 'from rpy2.robjects import r as R\n'), ((10326, 10373), 'rpy2.robjects.r', 'R', (['"""dna.ave <- data.frame(apply(dna, 1, mean))"""'], {}), "('dna.ave <- data.frame(apply(dna, 1, mean))')\n", (10327, 10373), True, 'from rpy2.robjects import r as R\n'), ((10383, 10420), 'rpy2.robjects.r', 'R', (['"""print(cor(dna.ave,rna.ave)[[1]])"""'], {}), "('print(cor(dna.ave,rna.ave)[[1]])')\n", (10384, 10420), True, 'from rpy2.robjects import r as R\n'), ((10429, 10453), 'rpy2.robjects.r', 'R', (['(\'png("%s")\' % outfile)'], {}), '(\'png("%s")\' % outfile)\n', (10430, 10453), True, 'from rpy2.robjects import r as R\n'), ((10462, 10780), 'rpy2.robjects.r', 'R', (['"""plot(dna.ave[,1], \n rna.ave[,1], \n pch = 16, \n col = "slateGrey",\n xlab = "Mean DNA abundance",\n ylab = "Mean RNA abundance",\n main = paste("N = ", nrow(dna.ave), sep = ""))\n abline(lm(rna[,1]~dna[,1], na.rm = T))"""'], {}), '("""plot(dna.ave[,1], \n rna.ave[,1], \n pch = 16, \n col = "slateGrey",\n xlab = "Mean DNA abundance",\n ylab = "Mean RNA abundance",\n main = paste("N = ", nrow(dna.ave), sep = ""))\n abline(lm(rna[,1]~dna[,1], na.rm = T))"""\n )\n', (10463, 10780), True, 'from rpy2.robjects import r as R\n'), ((11097, 11185), 'rpy2.robjects.r', 'R', (['(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n rnacounts)'], {}), '(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n rnacounts)\n', (11098, 11185), True, 'from rpy2.robjects import r as R\n'), ((11190, 11220), 'rpy2.robjects.r', 'R', (['"""rownames(rna) <- rna$taxa"""'], {}), "('rownames(rna) <- rna$taxa')\n", (11191, 11220), True, 'from rpy2.robjects import r as R\n'), ((11229, 11258), 'rpy2.robjects.r', 'R', (['"""rna <- rna[,1:ncol(rna)]"""'], {}), "('rna <- rna[,1:ncol(rna)]')\n", (11230, 11258), True, 'from rpy2.robjects import r as R\n'), ((11267, 11355), 'rpy2.robjects.r', 'R', (['(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n dnacounts)'], {}), '(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n dnacounts)\n', (11268, 11355), True, 'from rpy2.robjects import r as R\n'), ((11360, 11390), 'rpy2.robjects.r', 'R', (['"""rownames(dna) <- dna$taxa"""'], {}), "('rownames(dna) <- dna$taxa')\n", (11361, 11390), True, 'from rpy2.robjects import r as R\n'), ((11399, 11428), 'rpy2.robjects.r', 'R', (['"""dna <- dna[,1:ncol(dna)]"""'], {}), "('dna <- dna[,1:ncol(dna)]')\n", (11400, 11428), True, 'from rpy2.robjects import r as R\n'), ((11438, 11468), 'rpy2.robjects.r', 'R', (['"""taxa.rna <- rownames(rna)"""'], {}), "('taxa.rna <- rownames(rna)')\n", (11439, 11468), True, 'from rpy2.robjects import r as R\n'), ((11477, 11507), 'rpy2.robjects.r', 'R', (['"""taxa.dna <- rownames(dna)"""'], {}), "('taxa.dna <- rownames(dna)')\n", (11478, 11507), True, 'from rpy2.robjects import r as R\n'), ((11552, 11580), 'rpy2.robjects.r', 'R', (['"""nrna = length(taxa.rna)"""'], {}), "('nrna = length(taxa.rna)')\n", (11553, 11580), True, 'from rpy2.robjects import r as R\n'), ((11589, 11617), 'rpy2.robjects.r', 'R', (['"""ndna = length(taxa.dna)"""'], {}), "('ndna = length(taxa.dna)')\n", (11590, 11617), True, 'from rpy2.robjects import r as R\n'), ((11649, 11702), 'rpy2.robjects.r', 'R', (['"""noverlap = length(intersect(taxa.rna, taxa.dna))"""'], {}), "('noverlap = length(intersect(taxa.rna, taxa.dna))')\n", (11650, 11702), True, 'from rpy2.robjects import r as R\n'), ((11711, 11782), 'rpy2.robjects.r', 'R', (['"""result = data.frame(nrna = nrna, ndna = ndna, noverlap = noverlap)"""'], {}), "('result = data.frame(nrna = nrna, ndna = ndna, noverlap = noverlap)')\n", (11712, 11782), True, 'from rpy2.robjects import r as R\n'), ((11791, 11880), 'rpy2.robjects.r', 'R', (['(\'write.table(result, file = "%s", sep = "\\t", quote = F, row.names = F)\' %\n outfile)'], {}), '(\'write.table(result, file = "%s", sep = "\\t", quote = F, row.names = F)\' %\n outfile)\n', (11792, 11880), True, 'from rpy2.robjects import r as R\n'), ((12324, 12345), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (12325, 12345), True, 'from rpy2.robjects import r as R\n'), ((12387, 12475), 'rpy2.robjects.r', 'R', (['(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n rnacounts)'], {}), '(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n rnacounts)\n', (12388, 12475), True, 'from rpy2.robjects import r as R\n'), ((12480, 12510), 'rpy2.robjects.r', 'R', (['"""rownames(rna) <- rna$taxa"""'], {}), "('rownames(rna) <- rna$taxa')\n", (12481, 12510), True, 'from rpy2.robjects import r as R\n'), ((12519, 12548), 'rpy2.robjects.r', 'R', (['"""rna <- rna[,2:ncol(rna)]"""'], {}), "('rna <- rna[,2:ncol(rna)]')\n", (12520, 12548), True, 'from rpy2.robjects import r as R\n'), ((12557, 12609), 'rpy2.robjects.r', 'R', (['"""rna <- sweep(rna, 2, colSums(rna)/1000000, "/")"""'], {}), '(\'rna <- sweep(rna, 2, colSums(rna)/1000000, "/")\')\n', (12558, 12609), True, 'from rpy2.robjects import r as R\n'), ((12651, 12739), 'rpy2.robjects.r', 'R', (['(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n dnacounts)'], {}), '(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n dnacounts)\n', (12652, 12739), True, 'from rpy2.robjects import r as R\n'), ((12744, 12774), 'rpy2.robjects.r', 'R', (['"""rownames(dna) <- dna$taxa"""'], {}), "('rownames(dna) <- dna$taxa')\n", (12745, 12774), True, 'from rpy2.robjects import r as R\n'), ((12783, 12812), 'rpy2.robjects.r', 'R', (['"""dna <- dna[,2:ncol(dna)]"""'], {}), "('dna <- dna[,2:ncol(dna)]')\n", (12784, 12812), True, 'from rpy2.robjects import r as R\n'), ((12821, 12873), 'rpy2.robjects.r', 'R', (['"""dna <- sweep(dna, 2, colSums(dna)/1000000, "/")"""'], {}), '(\'dna <- sweep(dna, 2, colSums(dna)/1000000, "/")\')\n', (12822, 12873), True, 'from rpy2.robjects import r as R\n'), ((12914, 12968), 'rpy2.robjects.r', 'R', (['"""common <- intersect(rownames(dna), rownames(rna))"""'], {}), "('common <- intersect(rownames(dna), rownames(rna))')\n", (12915, 12968), True, 'from rpy2.robjects import r as R\n'), ((12977, 13031), 'rpy2.robjects.r', 'R', (['"""rna.only <- setdiff(rownames(rna), rownames(dna))"""'], {}), "('rna.only <- setdiff(rownames(rna), rownames(dna))')\n", (12978, 13031), True, 'from rpy2.robjects import r as R\n'), ((13040, 13094), 'rpy2.robjects.r', 'R', (['"""dna.only <- setdiff(rownames(dna), rownames(rna))"""'], {}), "('dna.only <- setdiff(rownames(dna), rownames(rna))')\n", (13041, 13094), True, 'from rpy2.robjects import r as R\n'), ((13139, 13186), 'rpy2.robjects.r', 'R', (['"""rna.common <- apply(rna[common,], 1, mean)"""'], {}), "('rna.common <- apply(rna[common,], 1, mean)')\n", (13140, 13186), True, 'from rpy2.robjects import r as R\n'), ((13195, 13242), 'rpy2.robjects.r', 'R', (['"""dna.common <- apply(dna[common,], 1, mean)"""'], {}), "('dna.common <- apply(dna[common,], 1, mean)')\n", (13196, 13242), True, 'from rpy2.robjects import r as R\n'), ((13251, 13302), 'rpy2.robjects.r', 'R', (['"""rna.distinct <- apply(rna[rna.only,], 1, mean)"""'], {}), "('rna.distinct <- apply(rna[rna.only,], 1, mean)')\n", (13252, 13302), True, 'from rpy2.robjects import r as R\n'), ((13311, 13362), 'rpy2.robjects.r', 'R', (['"""dna.distinct <- apply(dna[dna.only,], 1, mean)"""'], {}), "('dna.distinct <- apply(dna[dna.only,], 1, mean)')\n", (13312, 13362), True, 'from rpy2.robjects import r as R\n'), ((13643, 13695), 'rpy2.robjects.r', 'R', (['"""wtest1 <- wilcox.test(rna.common, rna.distinct)"""'], {}), "('wtest1 <- wilcox.test(rna.common, rna.distinct)')\n", (13644, 13695), True, 'from rpy2.robjects import r as R\n'), ((13704, 13756), 'rpy2.robjects.r', 'R', (['"""wtest2 <- wilcox.test(dna.common, dna.distinct)"""'], {}), "('wtest2 <- wilcox.test(dna.common, dna.distinct)')\n", (13705, 13756), True, 'from rpy2.robjects import r as R\n'), ((13765, 13817), 'rpy2.robjects.r', 'R', (['"""wtest3 <- wilcox.test(rna.common, dna.distinct)"""'], {}), "('wtest3 <- wilcox.test(rna.common, dna.distinct)')\n", (13766, 13817), True, 'from rpy2.robjects import r as R\n'), ((13826, 13878), 'rpy2.robjects.r', 'R', (['"""wtest4 <- wilcox.test(dna.common, rna.distinct)"""'], {}), "('wtest4 <- wilcox.test(dna.common, rna.distinct)')\n", (13827, 13878), True, 'from rpy2.robjects import r as R\n'), ((13887, 13937), 'rpy2.robjects.r', 'R', (['"""wtest5 <- wilcox.test(dna.common, rna.common)"""'], {}), "('wtest5 <- wilcox.test(dna.common, rna.common)')\n", (13888, 13937), True, 'from rpy2.robjects import r as R\n'), ((13947, 14335), 'rpy2.robjects.r', 'R', (['"""res <- data.frame("rna.common_vs_rna.distinct" = wtest1$p.value,\n "dna.common_vs_dna.distinct" = wtest2$p.value,\n "rna.common_vs_dna.distinct" = wtest3$p.value,\n "dna.common_vs_rna.distinct" = wtest4$p.value,\n "dna.common_vs_rna.common" = wtest5$p.value)"""'], {}), '("""res <- data.frame("rna.common_vs_rna.distinct" = wtest1$p.value,\n "dna.common_vs_dna.distinct" = wtest2$p.value,\n "rna.common_vs_dna.distinct" = wtest3$p.value,\n "dna.common_vs_rna.distinct" = wtest4$p.value,\n "dna.common_vs_rna.common" = wtest5$p.value)"""\n )\n', (13948, 14335), True, 'from rpy2.robjects import r as R\n'), ((14381, 14471), 'rpy2.robjects.r', 'R', (['(\'write.table(res, file = "%s", row.names = F, sep = "\\t", quote = F)\' %\n outname_sig)'], {}), '(\'write.table(res, file = "%s", row.names = F, sep = "\\t", quote = F)\' %\n outname_sig)\n', (14382, 14471), True, 'from rpy2.robjects import r as R\n'), ((14513, 14921), 'rpy2.robjects.r', 'R', (['"""dat <- data.frame(values = c(dna.distinct, dna.common, rna.common, rna.distinct),\n status = c(rep("unique.dna", length(dna.distinct)),\n rep("common.dna", length(dna.common)),\n rep("common.rna", length(rna.common)),\n rep("unique.rna", length(rna.distinct))))"""'], {}), '("""dat <- data.frame(values = c(dna.distinct, dna.common, rna.common, rna.distinct),\n status = c(rep("unique.dna", length(dna.distinct)),\n rep("common.dna", length(dna.common)),\n rep("common.rna", length(rna.common)),\n rep("unique.rna", length(rna.distinct))))"""\n )\n', (14514, 14921), True, 'from rpy2.robjects import r as R\n'), ((14921, 15024), 'rpy2.robjects.r', 'R', (['"""plot1 <- ggplot(dat, aes(x = factor(status, levels = status), y = values, stat = "identity"))"""'], {}), '(\'plot1 <- ggplot(dat, aes(x = factor(status, levels = status), y = values, stat = "identity"))\'\n )\n', (14922, 15024), True, 'from rpy2.robjects import r as R\n'), ((15028, 15073), 'rpy2.robjects.r', 'R', (['"""plot1 + geom_boxplot() + scale_y_log10()"""'], {}), "('plot1 + geom_boxplot() + scale_y_log10()')\n", (15029, 15073), True, 'from rpy2.robjects import r as R\n'), ((15082, 15109), 'rpy2.robjects.r', 'R', (['(\'ggsave("%s")\' % outfile)'], {}), '(\'ggsave("%s")\' % outfile)\n', (15083, 15109), True, 'from rpy2.robjects import r as R\n'), ((15858, 15943), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' % infile\n )'], {}), '(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n infile)\n', (15859, 15943), True, 'from rpy2.robjects import r as R\n'), ((15948, 15978), 'rpy2.robjects.r', 'R', (['"""rownames(dat) <- dat$taxa"""'], {}), "('rownames(dat) <- dat$taxa')\n", (15949, 15978), True, 'from rpy2.robjects import r as R\n'), ((15987, 16019), 'rpy2.robjects.r', 'R', (['"""dat <- dat[, 1:ncol(dat)-1]"""'], {}), "('dat <- dat[, 1:ncol(dat)-1]')\n", (15988, 16019), True, 'from rpy2.robjects import r as R\n'), ((16028, 16053), 'rpy2.robjects.r', 'R', (['"""pc <- prcomp(t(dat))"""'], {}), "('pc <- prcomp(t(dat))')\n", (16029, 16053), True, 'from rpy2.robjects import r as R\n'), ((16062, 16147), 'rpy2.robjects.r', 'R', (['"""conds <- unlist(strsplit(colnames(dat), ".R[0-9]"))[seq(1, ncol(dat)*2, 2)]"""'], {}), '(\'conds <- unlist(strsplit(colnames(dat), ".R[0-9]"))[seq(1, ncol(dat)*2, 2)]\'\n )\n', (16063, 16147), True, 'from rpy2.robjects import r as R\n'), ((16151, 16237), 'rpy2.robjects.r', 'R', (['"""conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]"""'], {}), '(\'conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]\'\n )\n', (16152, 16237), True, 'from rpy2.robjects import r as R\n'), ((16288, 16309), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (16289, 16309), True, 'from rpy2.robjects import r as R\n'), ((16318, 16346), 'rpy2.robjects.r', 'R', (['"""pcs <- data.frame(pc$x)"""'], {}), "('pcs <- data.frame(pc$x)')\n", (16319, 16346), True, 'from rpy2.robjects import r as R\n'), ((16355, 16377), 'rpy2.robjects.r', 'R', (['"""pcs$cond <- conds"""'], {}), "('pcs$cond <- conds')\n", (16356, 16377), True, 'from rpy2.robjects import r as R\n'), ((16415, 16483), 'rpy2.robjects.r', 'R', (['"""imps <- c(summary(pc)$importance[2], summary(pc)$importance[5])"""'], {}), "('imps <- c(summary(pc)$importance[2], summary(pc)$importance[5])')\n", (16416, 16483), True, 'from rpy2.robjects import r as R\n'), ((16492, 16581), 'rpy2.robjects.r', 'R', (['"""p <- ggplot(pcs, aes(x = PC1, y = PC2, colour = cond, size = 3)) + geom_point()"""'], {}), "('p <- ggplot(pcs, aes(x = PC1, y = PC2, colour = cond, size = 3)) + geom_point()'\n )\n", (16493, 16581), True, 'from rpy2.robjects import r as R\n'), ((16585, 16629), 'rpy2.robjects.r', 'R', (['"""p2 <- p + xlab(imps[1]) + ylab(imps[2])"""'], {}), "('p2 <- p + xlab(imps[1]) + ylab(imps[2])')\n", (16586, 16629), True, 'from rpy2.robjects import r as R\n'), ((16638, 16727), 'rpy2.robjects.r', 'R', (['"""p3 <- p2 + scale_colour_manual(values = c("slateGrey", "green", "red", "blue"))"""'], {}), '(\'p3 <- p2 + scale_colour_manual(values = c("slateGrey", "green", "red", "blue"))\'\n )\n', (16639, 16727), True, 'from rpy2.robjects import r as R\n'), ((16731, 16803), 'rpy2.robjects.r', 'R', (["('p3 + xlim(c(-%i, %i)) + ylim(c(-%i, %i))' % (xlim, xlim, ylim, ylim))"], {}), "('p3 + xlim(c(-%i, %i)) + ylim(c(-%i, %i))' % (xlim, xlim, ylim, ylim))\n", (16732, 16803), True, 'from rpy2.robjects import r as R\n'), ((16812, 16844), 'rpy2.robjects.r', 'R', (['(\'ggsave("%s")\' % outname_plot)'], {}), '(\'ggsave("%s")\' % outname_plot)\n', (16813, 16844), True, 'from rpy2.robjects import r as R\n'), ((16877, 16914), 'rpy2.robjects.r', 'R', (['"""loads <- data.frame(pc$rotation)"""'], {}), "('loads <- data.frame(pc$rotation)')\n", (16878, 16914), True, 'from rpy2.robjects import r as R\n'), ((16923, 16957), 'rpy2.robjects.r', 'R', (['"""loads$taxa <- rownames(loads)"""'], {}), "('loads$taxa <- rownames(loads)')\n", (16924, 16957), True, 'from rpy2.robjects import r as R\n'), ((17348, 17369), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (17349, 17369), True, 'from rpy2.robjects import r as R\n'), ((17378, 17396), 'rpy2.robjects.r', 'R', (['"""library(grid)"""'], {}), "('library(grid)')\n", (17379, 17396), True, 'from rpy2.robjects import r as R\n'), ((17405, 17490), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' % infile\n )'], {}), '(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n infile)\n', (17406, 17490), True, 'from rpy2.robjects import r as R\n'), ((17500, 17543), 'rpy2.robjects.r', 'R', (['"""top5pc1 <- dat[order(-dat$PC1),][1:5,]"""'], {}), "('top5pc1 <- dat[order(-dat$PC1),][1:5,]')\n", (17501, 17543), True, 'from rpy2.robjects import r as R\n'), ((17552, 17597), 'rpy2.robjects.r', 'R', (['"""bottom5pc1 <- dat[order(dat$PC1),][1:5,]"""'], {}), "('bottom5pc1 <- dat[order(dat$PC1),][1:5,]')\n", (17553, 17597), True, 'from rpy2.robjects import r as R\n'), ((17606, 17649), 'rpy2.robjects.r', 'R', (['"""top5pc2 <- dat[order(-dat$PC2),][1:5,]"""'], {}), "('top5pc2 <- dat[order(-dat$PC2),][1:5,]')\n", (17607, 17649), True, 'from rpy2.robjects import r as R\n'), ((17658, 17703), 'rpy2.robjects.r', 'R', (['"""bottom5pc2 <- dat[order(dat$PC2),][1:5,]"""'], {}), "('bottom5pc2 <- dat[order(dat$PC2),][1:5,]')\n", (17659, 17703), True, 'from rpy2.robjects import r as R\n'), ((17712, 17786), 'rpy2.robjects.r', 'R', (['"""totext <- data.frame(rbind(top5pc1, bottom5pc1, top5pc2, bottom5pc2))"""'], {}), "('totext <- data.frame(rbind(top5pc1, bottom5pc1, top5pc2, bottom5pc2))')\n", (17713, 17786), True, 'from rpy2.robjects import r as R\n'), ((17797, 17812), 'rpy2.robjects.r', 'R', (['"""dat$x <- 0"""'], {}), "('dat$x <- 0')\n", (17798, 17812), True, 'from rpy2.robjects import r as R\n'), ((17821, 17836), 'rpy2.robjects.r', 'R', (['"""dat$y <- 0"""'], {}), "('dat$y <- 0')\n", (17822, 17836), True, 'from rpy2.robjects import r as R\n'), ((17845, 17924), 'rpy2.robjects.r', 'R', (['"""p <- ggplot(dat, aes(x = x, y = y, xend = PC1, yend = PC2, colour = taxa))"""'], {}), "('p <- ggplot(dat, aes(x = x, y = y, xend = PC1, yend = PC2, colour = taxa))')\n", (17846, 17924), True, 'from rpy2.robjects import r as R\n'), ((17933, 18001), 'rpy2.robjects.r', 'R', (['"""p2 <- p + geom_segment(arrow = arrow(length = unit(0.2, "cm")))"""'], {}), '(\'p2 <- p + geom_segment(arrow = arrow(length = unit(0.2, "cm")))\')\n', (17934, 18001), True, 'from rpy2.robjects import r as R\n'), ((18010, 18144), 'rpy2.robjects.r', 'R', (['"""p2 + geom_text(data = totext, aes(x = PC1, y = PC2, label = totext$taxa, size = 6)) + xlim(c(-0.5,0.5)) + ylim(c(-0.5,0.25))"""'], {}), "('p2 + geom_text(data = totext, aes(x = PC1, y = PC2, label = totext$taxa, size = 6)) + xlim(c(-0.5,0.5)) + ylim(c(-0.5,0.25))'\n )\n", (18011, 18144), True, 'from rpy2.robjects import r as R\n'), ((18148, 18175), 'rpy2.robjects.r', 'R', (['(\'ggsave("%s")\' % outfile)'], {}), '(\'ggsave("%s")\' % outfile)\n', (18149, 18175), True, 'from rpy2.robjects import r as R\n'), ((19656, 19677), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (19657, 19677), True, 'from rpy2.robjects import r as R\n'), ((19686, 19706), 'rpy2.robjects.r', 'R', (['"""library(gtools)"""'], {}), "('library(gtools)')\n", (19687, 19706), True, 'from rpy2.robjects import r as R\n'), ((19715, 19736), 'rpy2.robjects.r', 'R', (['"""library(reshape)"""'], {}), "('library(reshape)')\n", (19716, 19736), True, 'from rpy2.robjects import r as R\n'), ((19746, 19831), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' % infile\n )'], {}), '(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n infile)\n', (19747, 19831), True, 'from rpy2.robjects import r as R\n'), ((19836, 19866), 'rpy2.robjects.r', 'R', (['"""rownames(dat) <- dat$taxa"""'], {}), "('rownames(dat) <- dat$taxa')\n", (19837, 19866), True, 'from rpy2.robjects import r as R\n'), ((19909, 19940), 'rpy2.robjects.r', 'R', (['"""dat <- dat[,1:ncol(dat)-1]"""'], {}), "('dat <- dat[,1:ncol(dat)-1]')\n", (19910, 19940), True, 'from rpy2.robjects import r as R\n'), ((19949, 20013), 'rpy2.robjects.r', 'R', (['"""dat.percent <- data.frame(apply(dat, 2, function(x) x*100))"""'], {}), "('dat.percent <- data.frame(apply(dat, 2, function(x) x*100))')\n", (19950, 20013), True, 'from rpy2.robjects import r as R\n'), ((20050, 20391), 'rpy2.robjects.r', 'R', (['"""candidates <- c("Peptoniphilus",\n "Deferribacter",\n "Escherichia",\n "Lactobacillus",\n "Turicibacter",\n "Akkermansia",\n "Bifidobacterium",\n "Methylacidiphilum")"""'], {}), '("""candidates <- c("Peptoniphilus",\n "Deferribacter",\n "Escherichia",\n "Lactobacillus",\n "Turicibacter",\n "Akkermansia",\n "Bifidobacterium",\n "Methylacidiphilum")"""\n )\n', (20051, 20391), True, 'from rpy2.robjects import r as R\n'), ((20396, 20440), 'rpy2.robjects.r', 'R', (['"""dat.percent <- dat.percent[candidates,]"""'], {}), "('dat.percent <- dat.percent[candidates,]')\n", (20397, 20440), True, 'from rpy2.robjects import r as R\n'), ((20449, 20515), 'rpy2.robjects.r', 'R', (['"""dat.percent <- dat.percent[,mixedsort(colnames(dat.percent))]"""'], {}), "('dat.percent <- dat.percent[,mixedsort(colnames(dat.percent))]')\n", (20450, 20515), True, 'from rpy2.robjects import r as R\n'), ((20581, 20627), 'rpy2.robjects.r', 'R', (['"""dat.percent$taxa <- rownames(dat.percent)"""'], {}), "('dat.percent$taxa <- rownames(dat.percent)')\n", (20582, 20627), True, 'from rpy2.robjects import r as R\n'), ((20670, 20693), 'CGATPipelines.Pipeline.snip', 'P.snip', (['outfile', '""".pdf"""'], {}), "(outfile, '.pdf')\n", (20676, 20693), True, 'import CGATPipelines.Pipeline as P\n'), ((20698, 20735), 'rpy2.robjects.r', 'R', (['"""dat.percent <- melt(dat.percent)"""'], {}), "('dat.percent <- melt(dat.percent)')\n", (20699, 20735), True, 'from rpy2.robjects import r as R\n'), ((20744, 20858), 'rpy2.robjects.r', 'R', (['"""conds <- unlist(strsplit(as.character(dat.percent$variable), ".R[0-9]"))[seq(1, nrow(dat.percent)*2, 2)]"""'], {}), '(\'conds <- unlist(strsplit(as.character(dat.percent$variable), ".R[0-9]"))[seq(1, nrow(dat.percent)*2, 2)]\'\n )\n', (20745, 20858), True, 'from rpy2.robjects import r as R\n'), ((20862, 20948), 'rpy2.robjects.r', 'R', (['"""conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]"""'], {}), '(\'conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]\'\n )\n', (20863, 20948), True, 'from rpy2.robjects import r as R\n'), ((20954, 20984), 'rpy2.robjects.r', 'R', (['"""dat.percent$cond <- conds"""'], {}), "('dat.percent$cond <- conds')\n", (20955, 20984), True, 'from rpy2.robjects import r as R\n'), ((20993, 21563), 'rpy2.robjects.r', 'R', (['("""for (taxon in candidates){\n outname <- paste("%s", paste("_", taxon, sep=""), ".pdf", sep="")\n dat.percent.restrict <- dat.percent[dat.percent$taxa==taxon,]\n plot1 <- ggplot(dat.percent.restrict, \n aes(x=factor(cond, levels=c("WT","aIL10R", "Hh", "HhaIL10R")), \n y=value, group=cond, colour=cond, label=variable))\n plot1 + geom_boxplot() + geom_jitter() + geom_text() + scale_colour_manual(values=c("darkGreen", "red", "grey", "blue"))\n ggsave(outname)}"""\n % outname)'], {}), '(\n """for (taxon in candidates){\n outname <- paste("%s", paste("_", taxon, sep=""), ".pdf", sep="")\n dat.percent.restrict <- dat.percent[dat.percent$taxa==taxon,]\n plot1 <- ggplot(dat.percent.restrict, \n aes(x=factor(cond, levels=c("WT","aIL10R", "Hh", "HhaIL10R")), \n y=value, group=cond, colour=cond, label=variable))\n plot1 + geom_boxplot() + geom_jitter() + geom_text() + scale_colour_manual(values=c("darkGreen", "red", "grey", "blue"))\n ggsave(outname)}"""\n % outname)\n', (20994, 21563), True, 'from rpy2.robjects import r as R\n'), ((21990, 22076), 'rpy2.robjects.r', 'R', (['(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n rnadiff)'], {}), '(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n rnadiff)\n', (21991, 22076), True, 'from rpy2.robjects import r as R\n'), ((22081, 22167), 'rpy2.robjects.r', 'R', (['(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n dnadiff)'], {}), '(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n dnadiff)\n', (22082, 22167), True, 'from rpy2.robjects import r as R\n'), ((22172, 22235), 'rpy2.robjects.r', 'R', (['"""rna <- rna[rna$group1 == "HhaIL10R" & rna$group2 == "WT",]"""'], {}), '(\'rna <- rna[rna$group1 == "HhaIL10R" & rna$group2 == "WT",]\')\n', (22173, 22235), True, 'from rpy2.robjects import r as R\n'), ((22244, 22307), 'rpy2.robjects.r', 'R', (['"""dna <- dna[dna$group1 == "HhaIL10R" & dna$group2 == "WT",]"""'], {}), '(\'dna <- dna[dna$group1 == "HhaIL10R" & dna$group2 == "WT",]\')\n', (22245, 22307), True, 'from rpy2.robjects import r as R\n'), ((22321, 22351), 'rpy2.robjects.r', 'R', (['"""rownames(rna) <- rna$taxa"""'], {}), "('rownames(rna) <- rna$taxa')\n", (22322, 22351), True, 'from rpy2.robjects import r as R\n'), ((22360, 22390), 'rpy2.robjects.r', 'R', (['"""rownames(dna) <- dna$taxa"""'], {}), "('rownames(dna) <- dna$taxa')\n", (22361, 22390), True, 'from rpy2.robjects import r as R\n'), ((22400, 22431), 'rpy2.robjects.r', 'R', (['"""rna <- rna[,1:ncol(rna)-1]"""'], {}), "('rna <- rna[,1:ncol(rna)-1]')\n", (22401, 22431), True, 'from rpy2.robjects import r as R\n'), ((22440, 22471), 'rpy2.robjects.r', 'R', (['"""dna <- dna[,1:ncol(dna)-1]"""'], {}), "('dna <- dna[,1:ncol(dna)-1]')\n", (22441, 22471), True, 'from rpy2.robjects import r as R\n'), ((22531, 22583), 'rpy2.robjects.r', 'R', (['"""keep <- intersect(rownames(rna), rownames(dna))"""'], {}), "('keep <- intersect(rownames(rna), rownames(dna))')\n", (22532, 22583), True, 'from rpy2.robjects import r as R\n'), ((22592, 22614), 'rpy2.robjects.r', 'R', (['"""rna <- rna[keep,]"""'], {}), "('rna <- rna[keep,]')\n", (22593, 22614), True, 'from rpy2.robjects import r as R\n'), ((22623, 22645), 'rpy2.robjects.r', 'R', (['"""dna <- dna[keep,]"""'], {}), "('dna <- dna[keep,]')\n", (22624, 22645), True, 'from rpy2.robjects import r as R\n'), ((22655, 22682), 'rpy2.robjects.r', 'R', (['"""rna.ratio <- rna$logFC"""'], {}), "('rna.ratio <- rna$logFC')\n", (22656, 22682), True, 'from rpy2.robjects import r as R\n'), ((22691, 22718), 'rpy2.robjects.r', 'R', (['"""dna.ratio <- dna$logFC"""'], {}), "('dna.ratio <- dna$logFC')\n", (22692, 22718), True, 'from rpy2.robjects import r as R\n'), ((22727, 22754), 'rpy2.robjects.r', 'R', (['"""rna.p <- rna$adj.P.Val"""'], {}), "('rna.p <- rna$adj.P.Val')\n", (22728, 22754), True, 'from rpy2.robjects import r as R\n'), ((22763, 22790), 'rpy2.robjects.r', 'R', (['"""dna.p <- dna$adj.P.Val"""'], {}), "('dna.p <- dna$adj.P.Val')\n", (22764, 22790), True, 'from rpy2.robjects import r as R\n'), ((22804, 23093), 'rpy2.robjects.r', 'R', (['"""ratio <- data.frame(gene = keep, \n dna = dna.ratio, \n rna = rna.ratio, \n pdna = dna.p, \n prna = rna.p, \n ratio = rna.ratio - dna.ratio)"""'], {}), '("""ratio <- data.frame(gene = keep, \n dna = dna.ratio, \n rna = rna.ratio, \n pdna = dna.p, \n prna = rna.p, \n ratio = rna.ratio - dna.ratio)"""\n )\n', (22805, 23093), True, 'from rpy2.robjects import r as R\n'), ((23093, 23279), 'rpy2.robjects.r', 'R', (['("""write.table(ratio, \n file = "%s", \n sep = "\t", \n row.names = F, \n quote = F)"""\n % outfile)'], {}), '(\n """write.table(ratio, \n file = "%s", \n sep = "\t", \n row.names = F, \n quote = F)"""\n % outfile)\n', (23094, 23279), True, 'from rpy2.robjects import r as R\n'), ((23855, 23884), 'CGAT.IOTools.openFile', 'IOTools.openFile', (['RNADNARatio'], {}), '(RNADNARatio)\n', (23871, 23884), True, 'import CGAT.IOTools as IOTools\n'), ((23915, 23945), 'CGAT.IOTools.openFile', 'IOTools.openFile', (['outfile', '"""w"""'], {}), "(outfile, 'w')\n", (23931, 23945), True, 'import CGAT.IOTools as IOTools\n'), ((25232, 25253), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (25233, 25253), True, 'from rpy2.robjects import r as R\n'), ((25282, 25367), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' % infile\n )'], {}), '(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n infile)\n', (25283, 25367), True, 'from rpy2.robjects import r as R\n'), ((25398, 25500), 'rpy2.robjects.r', 'R', (['"""cog2gene <- read.csv("goi.tsv", header = F, stringsAsFactors = F, sep = "\t", row.names = 1)"""'], {}), '(\'cog2gene <- read.csv("goi.tsv", header = F, stringsAsFactors = F, sep = "\\t", row.names = 1)\'\n )\n', (25399, 25500), True, 'from rpy2.robjects import r as R\n'), ((25566, 25607), 'rpy2.robjects.r', 'R', (['"""dat$status[dat$status == "NS"] = "z\\""""'], {}), '(\'dat$status[dat$status == "NS"] = "z"\')\n', (25567, 25607), True, 'from rpy2.robjects import r as R\n'), ((25616, 25638), 'rpy2.robjects.r', 'R', (['"""genes <- dat$gene"""'], {}), "('genes <- dat$gene')\n", (25617, 25638), True, 'from rpy2.robjects import r as R\n'), ((25671, 25703), 'rpy2.robjects.r', 'R', (['"""mod1 <- lm(dat$rna~dat$dna)"""'], {}), "('mod1 <- lm(dat$rna~dat$dna)')\n", (25672, 25703), True, 'from rpy2.robjects import r as R\n'), ((25712, 25742), 'rpy2.robjects.r', 'R', (['"""intercept <- mod1[[1]][1]"""'], {}), "('intercept <- mod1[[1]][1]')\n", (25713, 25742), True, 'from rpy2.robjects import r as R\n'), ((25751, 25776), 'rpy2.robjects.r', 'R', (['"""slope = mod1[[1]][2]"""'], {}), "('slope = mod1[[1]][2]')\n", (25752, 25776), True, 'from rpy2.robjects import r as R\n'), ((25785, 25810), 'rpy2.robjects.r', 'R', (['"""print(summary(mod1))"""'], {}), "('print(summary(mod1))')\n", (25786, 25810), True, 'from rpy2.robjects import r as R\n'), ((25847, 25917), 'rpy2.robjects.r', 'R', (['"""pred.ints <- predict(mod1, interval = "prediction", level = 0.95)"""'], {}), '(\'pred.ints <- predict(mod1, interval = "prediction", level = 0.95)\')\n', (25848, 25917), True, 'from rpy2.robjects import r as R\n'), ((25951, 25980), 'rpy2.robjects.r', 'R', (['"""dat$lwr <- pred.ints[,2]"""'], {}), "('dat$lwr <- pred.ints[,2]')\n", (25952, 25980), True, 'from rpy2.robjects import r as R\n'), ((25989, 26018), 'rpy2.robjects.r', 'R', (['"""dat$upr <- pred.ints[,3]"""'], {}), "('dat$upr <- pred.ints[,3]')\n", (25990, 26018), True, 'from rpy2.robjects import r as R\n'), ((26049, 26084), 'rpy2.robjects.r', 'R', (['"""dat$goi <- cog2gene[dat$gene,]"""'], {}), "('dat$goi <- cog2gene[dat$gene,]')\n", (26050, 26084), True, 'from rpy2.robjects import r as R\n'), ((26093, 26147), 'rpy2.robjects.r', 'R', (['"""dat$pointsize <- ifelse(!(is.na(dat$goi)), 10, 1)"""'], {}), "('dat$pointsize <- ifelse(!(is.na(dat$goi)), 10, 1)')\n", (26094, 26147), True, 'from rpy2.robjects import r as R\n'), ((26168, 26244), 'rpy2.robjects.r', 'R', (['"""plot1 <- ggplot(dat, aes(x = dna, y = rna, alpha = 1, colour = status))"""'], {}), "('plot1 <- ggplot(dat, aes(x = dna, y = rna, alpha = 1, colour = status))')\n", (26169, 26244), True, 'from rpy2.robjects import r as R\n'), ((26254, 26321), 'rpy2.robjects.r', 'R', (['"""plot2 <- plot1 + geom_point(shape = 18, aes(size = pointsize))"""'], {}), "('plot2 <- plot1 + geom_point(shape = 18, aes(size = pointsize))')\n", (26255, 26321), True, 'from rpy2.robjects import r as R\n'), ((26330, 26385), 'rpy2.robjects.r', 'R', (['"""plot3 <- plot2 + scale_size_area() + xlim(c(-5,5))"""'], {}), "('plot3 <- plot2 + scale_size_area() + xlim(c(-5,5))')\n", (26331, 26385), True, 'from rpy2.robjects import r as R\n'), ((26395, 26869), 'rpy2.robjects.r', 'R', (['"""plot4 <- plot3 + scale_colour_manual(values = c("blue", \n "brown",\n "darkGreen", \n "orange", \n "purple", \n "red", \n "grey"))"""'], {}), '("""plot4 <- plot3 + scale_colour_manual(values = c("blue", \n "brown",\n "darkGreen", \n "orange", \n "purple", \n "red", \n "grey"))"""\n )\n', (26396, 26869), True, 'from rpy2.robjects import r as R\n'), ((26869, 26940), 'rpy2.robjects.r', 'R', (['"""plot5 <- plot4 + geom_abline(intercept = intercept, slope = slope)"""'], {}), "('plot5 <- plot4 + geom_abline(intercept = intercept, slope = slope)')\n", (26870, 26940), True, 'from rpy2.robjects import r as R\n'), ((26977, 27075), 'rpy2.robjects.r', 'R', (['"""plot6 <- plot5 + geom_line(aes(x = dna, y = lwr), linetype = "dashed", colour = "black")"""'], {}), '(\'plot6 <- plot5 + geom_line(aes(x = dna, y = lwr), linetype = "dashed", colour = "black")\'\n )\n', (26978, 27075), True, 'from rpy2.robjects import r as R\n'), ((27079, 27177), 'rpy2.robjects.r', 'R', (['"""plot7 <- plot6 + geom_line(aes(x = dna, y = upr), linetype = "dashed", colour = "black")"""'], {}), '(\'plot7 <- plot6 + geom_line(aes(x = dna, y = upr), linetype = "dashed", colour = "black")\'\n )\n', (27080, 27177), True, 'from rpy2.robjects import r as R\n'), ((27181, 27221), 'rpy2.robjects.r', 'R', (['"""plot7 + geom_text(aes(label = goi))"""'], {}), "('plot7 + geom_text(aes(label = goi))')\n", (27182, 27221), True, 'from rpy2.robjects import r as R\n'), ((27230, 27257), 'rpy2.robjects.r', 'R', (['(\'ggsave("%s")\' % outfile)'], {}), '(\'ggsave("%s")\' % outfile)\n', (27231, 27257), True, 'from rpy2.robjects import r as R\n'), ((27629, 27714), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' % infile\n )'], {}), '(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n infile)\n', (27630, 27714), True, 'from rpy2.robjects import r as R\n'), ((27781, 27803), 'rpy2.robjects.r', 'R', (['"""genes <- dat$gene"""'], {}), "('genes <- dat$gene')\n", (27782, 27803), True, 'from rpy2.robjects import r as R\n'), ((27836, 27868), 'rpy2.robjects.r', 'R', (['"""mod1 <- lm(dat$rna~dat$dna)"""'], {}), "('mod1 <- lm(dat$rna~dat$dna)')\n", (27837, 27868), True, 'from rpy2.robjects import r as R\n'), ((27905, 27975), 'rpy2.robjects.r', 'R', (['"""pred.ints <- predict(mod1, interval = "prediction", level = 0.95)"""'], {}), '(\'pred.ints <- predict(mod1, interval = "prediction", level = 0.95)\')\n', (27906, 27975), True, 'from rpy2.robjects import r as R\n'), ((28009, 28038), 'rpy2.robjects.r', 'R', (['"""dat$lwr <- pred.ints[,2]"""'], {}), "('dat$lwr <- pred.ints[,2]')\n", (28010, 28038), True, 'from rpy2.robjects import r as R\n'), ((28047, 28076), 'rpy2.robjects.r', 'R', (['"""dat$upr <- pred.ints[,3]"""'], {}), "('dat$upr <- pred.ints[,3]')\n", (28048, 28076), True, 'from rpy2.robjects import r as R\n'), ((28168, 28247), 'rpy2.robjects.r', 'R', (['"""dat$pi_status[dat$rna > dat$upr & dat$status == "up.RNA"] <- "diff.up.rna\\""""'], {}), '(\'dat$pi_status[dat$rna > dat$upr & dat$status == "up.RNA"] <- "diff.up.rna"\')\n', (28169, 28247), True, 'from rpy2.robjects import r as R\n'), ((28256, 28344), 'rpy2.robjects.r', 'R', (['"""dat$pi_status[dat$rna > dat$upr & dat$status == "down.DNA"] <- "diff.down.dna\\""""'], {}), '(\'dat$pi_status[dat$rna > dat$upr & dat$status == "down.DNA"] <- "diff.down.dna"\'\n )\n', (28257, 28344), True, 'from rpy2.robjects import r as R\n'), ((28348, 28433), 'rpy2.robjects.r', 'R', (['"""dat$pi_status[dat$rna > dat$upr & dat$status == "up.both"] <- "diff.up.rna\\""""'], {}), '(\'dat$pi_status[dat$rna > dat$upr & dat$status == "up.both"] <- "diff.up.rna"\'\n )\n', (28349, 28433), True, 'from rpy2.robjects import r as R\n'), ((28438, 28526), 'rpy2.robjects.r', 'R', (['"""dat$pi_status[dat$rna < dat$lwr & dat$status == "down.RNA"] <- "diff.down.rna\\""""'], {}), '(\'dat$pi_status[dat$rna < dat$lwr & dat$status == "down.RNA"] <- "diff.down.rna"\'\n )\n', (28439, 28526), True, 'from rpy2.robjects import r as R\n'), ((28530, 28609), 'rpy2.robjects.r', 'R', (['"""dat$pi_status[dat$rna < dat$lwr & dat$status == "up.DNA"] <- "diff.up.dna\\""""'], {}), '(\'dat$pi_status[dat$rna < dat$lwr & dat$status == "up.DNA"] <- "diff.up.dna"\')\n', (28531, 28609), True, 'from rpy2.robjects import r as R\n'), ((28618, 28707), 'rpy2.robjects.r', 'R', (['"""dat$pi_status[dat$rna < dat$lwr & dat$status == "down.both"] <- "diff.down.rna\\""""'], {}), '(\'dat$pi_status[dat$rna < dat$lwr & dat$status == "down.both"] <- "diff.down.rna"\'\n )\n', (28619, 28707), True, 'from rpy2.robjects import r as R\n'), ((28732, 28818), 'rpy2.robjects.r', 'R', (['(\'write.table(dat, file = "%s", sep = "\\t", quote = F, row.names = F)\' %\n outfile)'], {}), '(\'write.table(dat, file = "%s", sep = "\\t", quote = F, row.names = F)\' %\n outfile)\n', (28733, 28818), True, 'from rpy2.robjects import r as R\n'), ((29269, 29293), 'CGAT.IOTools.openFile', 'IOTools.openFile', (['infile'], {}), '(infile)\n', (29285, 29293), True, 'import CGAT.IOTools as IOTools\n'), ((29592, 29616), 'CGAT.IOTools.openFile', 'IOTools.openFile', (['infile'], {}), '(infile)\n', (29608, 29616), True, 'import CGAT.IOTools as IOTools\n'), ((30197, 30221), 'pandas.DataFrame', 'pandas.DataFrame', (['result'], {}), '(result)\n', (30213, 30221), False, 'import pandas\n'), ((30679, 30783), 'rpy2.robjects.r', 'R', (['(\'anno <- read.csv("%s", header=T, stringsAsFactors=F, sep="\\t", row.names=1)\'\n % annotations)'], {}), '(\n \'anno <- read.csv("%s", header=T, stringsAsFactors=F, sep="\\t", row.names=1)\'\n % annotations)\n', (30680, 30783), True, 'from rpy2.robjects import r as R\n'), ((30782, 30828), 'rpy2.robjects.r', 'R', (['"""anno.no.pathways <- anno[,1:ncol(anno)-1]"""'], {}), "('anno.no.pathways <- anno[,1:ncol(anno)-1]')\n", (30783, 30828), True, 'from rpy2.robjects import r as R\n'), ((30837, 30910), 'rpy2.robjects.r', 'R', (['"""anno.p <- sweep(anno.no.pathways, 2, colSums(anno.no.pathways), "/")"""'], {}), '(\'anno.p <- sweep(anno.no.pathways, 2, colSums(anno.no.pathways), "/")\')\n', (30838, 30910), True, 'from rpy2.robjects import r as R\n'), ((30919, 30958), 'rpy2.robjects.r', 'R', (['"""anno.p$average <- rowMeans(anno.p)"""'], {}), "('anno.p$average <- rowMeans(anno.p)')\n", (30920, 30958), True, 'from rpy2.robjects import r as R\n'), ((30967, 30999), 'rpy2.robjects.r', 'R', (['"""anno.p$pathway <- anno$taxa"""'], {}), "('anno.p$pathway <- anno$taxa')\n", (30968, 30999), True, 'from rpy2.robjects import r as R\n'), ((31027, 31125), 'rpy2.robjects.r', 'R', (['(\'mat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\\t", row.names=1)\' %\n matrix)'], {}), '(\n \'mat <- read.csv("%s", header=T, stringsAsFactors=F, sep="\\t", row.names=1)\'\n % matrix)\n', (31028, 31125), True, 'from rpy2.robjects import r as R\n'), ((31124, 31154), 'rpy2.robjects.r', 'R', (['"""mat <- data.frame(t(mat))"""'], {}), "('mat <- data.frame(t(mat))')\n", (31125, 31154), True, 'from rpy2.robjects import r as R\n'), ((31163, 31192), 'rpy2.robjects.r', 'R', (['"""mat$ref <- rownames(mat)"""'], {}), "('mat$ref <- rownames(mat)')\n", (31164, 31192), True, 'from rpy2.robjects import r as R\n'), ((31234, 31983), 'rpy2.robjects.r', 'R', (['"""for (pathway in unique(anno.p$pathway)){\n if (pathway == "Function unknown"){next}\n # some weirness with some names\n pw <- gsub("/", "_", pathway)\n outname <- paste("candidate_pathways.dir", paste(pw, "tsv", sep = "."), sep="/")\n outname <- gsub(" ", "_", outname)\n print(outname)\n anno.p2 <- anno.p[anno.p$pathway == pathway,]\n anno.p2 <- anno.p2[order(anno.p2$average, decreasing=T),]\n # top 10\n# anno.p2 <- anno.p2[1:10,]\n # merge with matrix\n mat2 <- mat[rownames(anno.p2),]\n mat2$pathway <- anno.p2$pathway\n write.table(mat2, file=outname, sep="\t", row.names=F)}"""'], {}), '("""for (pathway in unique(anno.p$pathway)){\n if (pathway == "Function unknown"){next}\n # some weirness with some names\n pw <- gsub("/", "_", pathway)\n outname <- paste("candidate_pathways.dir", paste(pw, "tsv", sep = "."), sep="/")\n outname <- gsub(" ", "_", outname)\n print(outname)\n anno.p2 <- anno.p[anno.p$pathway == pathway,]\n anno.p2 <- anno.p2[order(anno.p2$average, decreasing=T),]\n # top 10\n# anno.p2 <- anno.p2[1:10,]\n # merge with matrix\n mat2 <- mat[rownames(anno.p2),]\n mat2$pathway <- anno.p2$pathway\n write.table(mat2, file=outname, sep="\t", row.names=F)}"""\n )\n', (31235, 31983), True, 'from rpy2.robjects import r as R\n'), ((32291, 32313), 'CGATPipelines.Pipeline.getTempFilename', 'P.getTempFilename', (['"""."""'], {}), "('.')\n", (32308, 32313), True, 'import CGATPipelines.Pipeline as P\n'), ((32446, 32453), 'CGATPipelines.Pipeline.run', 'P.run', ([], {}), '()\n', (32451, 32453), True, 'import CGATPipelines.Pipeline as P\n'), ((32458, 32479), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (32459, 32479), True, 'from rpy2.robjects import r as R\n'), ((32488, 32506), 'rpy2.robjects.r', 'R', (['"""library(plyr)"""'], {}), "('library(plyr)')\n", (32489, 32506), True, 'from rpy2.robjects import r as R\n'), ((32515, 32536), 'rpy2.robjects.r', 'R', (['"""library(reshape)"""'], {}), "('library(reshape)')\n", (32516, 32536), True, 'from rpy2.robjects import r as R\n'), ((32545, 32616), 'rpy2.robjects.r', 'R', (['(\'dat <-read.csv("%s", header=T, stringsAsFactors=F, sep="\\t")\' % tmp)'], {}), '(\'dat <-read.csv("%s", header=T, stringsAsFactors=F, sep="\\t")\' % tmp)\n', (32546, 32616), True, 'from rpy2.robjects import r as R\n'), ((32625, 32644), 'rpy2.robjects.r', 'R', (['"""t <- ncol(dat)"""'], {}), "('t <- ncol(dat)')\n", (32626, 32644), True, 'from rpy2.robjects import r as R\n'), ((32653, 32677), 'rpy2.robjects.r', 'R', (['"""dat <- na.omit(dat)"""'], {}), "('dat <- na.omit(dat)')\n", (32654, 32677), True, 'from rpy2.robjects import r as R\n'), ((32686, 32714), 'rpy2.robjects.r', 'R', (['"""pathways <- dat$pathway"""'], {}), "('pathways <- dat$pathway')\n", (32687, 32714), True, 'from rpy2.robjects import r as R\n'), ((32723, 32755), 'rpy2.robjects.r', 'R', (['"""dat2 <- dat[,1:ncol(dat)-1]"""'], {}), "('dat2 <- dat[,1:ncol(dat)-1]')\n", (32724, 32755), True, 'from rpy2.robjects import r as R\n'), ((32764, 32798), 'rpy2.robjects.r', 'R', (['"""dat2 <- dat2[,1:ncol(dat2)-1]"""'], {}), "('dat2 <- dat2[,1:ncol(dat2)-1]')\n", (32765, 32798), True, 'from rpy2.robjects import r as R\n'), ((32873, 32943), 'rpy2.robjects.r', 'R', (['"""col.sums <- data.frame(t(sapply(split(dat2, pathways), colSums)))"""'], {}), "('col.sums <- data.frame(t(sapply(split(dat2, pathways), colSums)))')\n", (32874, 32943), True, 'from rpy2.robjects import r as R\n'), ((32952, 32995), 'rpy2.robjects.r', 'R', (['"""rownames(col.sums) <- unique(pathways)"""'], {}), "('rownames(col.sums) <- unique(pathways)')\n", (32953, 32995), True, 'from rpy2.robjects import r as R\n'), ((33096, 33148), 'rpy2.robjects.r', 'R', (['"""total.taxa <- data.frame(rowSums(col.sums > 0))"""'], {}), "('total.taxa <- data.frame(rowSums(col.sums > 0))')\n", (33097, 33148), True, 'from rpy2.robjects import r as R\n'), ((33157, 33202), 'rpy2.robjects.r', 'R', (['"""total.taxa$pathway <- rownames(col.sums)"""'], {}), "('total.taxa$pathway <- rownames(col.sums)')\n", (33158, 33202), True, 'from rpy2.robjects import r as R\n'), ((33234, 33302), 'rpy2.robjects.r', 'R', (['"""total.taxa <- total.taxa[order(total.taxa[,1], decreasing=T), ]"""'], {}), "('total.taxa <- total.taxa[order(total.taxa[,1], decreasing=T), ]')\n", (33235, 33302), True, 'from rpy2.robjects import r as R\n'), ((33315, 33365), 'rpy2.robjects.r', 'R', (['"""colnames(total.taxa) <- c("value", "pathway")"""'], {}), '(\'colnames(total.taxa) <- c("value", "pathway")\')\n', (33316, 33365), True, 'from rpy2.robjects import r as R\n'), ((33375, 33479), 'rpy2.robjects.r', 'R', (['"""plot1 <- ggplot(total.taxa, aes(x=factor(pathway,levels=pathway), y=value/t, stat="identity"))"""'], {}), '(\'plot1 <- ggplot(total.taxa, aes(x=factor(pathway,levels=pathway), y=value/t, stat="identity"))\'\n )\n', (33376, 33479), True, 'from rpy2.robjects import r as R\n'), ((33483, 33570), 'rpy2.robjects.r', 'R', (['"""plot1 + geom_bar(stat="identity") + theme(axis.text.x=element_text(angle=90))"""'], {}), '(\'plot1 + geom_bar(stat="identity") + theme(axis.text.x=element_text(angle=90))\'\n )\n', (33484, 33570), True, 'from rpy2.robjects import r as R\n'), ((33575, 33602), 'rpy2.robjects.r', 'R', (['(\'ggsave("%s")\' % outfile)'], {}), '(\'ggsave("%s")\' % outfile)\n', (33576, 33602), True, 'from rpy2.robjects import r as R\n'), ((33611, 33625), 'os.unlink', 'os.unlink', (['tmp'], {}), '(tmp)\n', (33620, 33625), False, 'import os\n'), ((33990, 34011), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (33991, 34011), True, 'from rpy2.robjects import r as R\n'), ((34020, 34040), 'rpy2.robjects.r', 'R', (['"""library(gplots)"""'], {}), "('library(gplots)')\n", (34021, 34040), True, 'from rpy2.robjects import r as R\n'), ((34049, 34071), 'rpy2.robjects.r', 'R', (['"""library(pheatmap)"""'], {}), "('library(pheatmap)')\n", (34050, 34071), True, 'from rpy2.robjects import r as R\n'), ((34080, 34165), 'rpy2.robjects.r', 'R', (['(\'mat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' % matrix\n )'], {}), '(\'mat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n matrix)\n', (34081, 34165), True, 'from rpy2.robjects import r as R\n'), ((34170, 34194), 'rpy2.robjects.r', 'R', (['"""mat <- na.omit(mat)"""'], {}), "('mat <- na.omit(mat)')\n", (34171, 34194), True, 'from rpy2.robjects import r as R\n'), ((34203, 34222), 'rpy2.robjects.r', 'R', (['"""print(mat$ref)"""'], {}), "('print(mat$ref)')\n", (34204, 34222), True, 'from rpy2.robjects import r as R\n'), ((34255, 34284), 'rpy2.robjects.r', 'R', (['"""rownames(mat) <- mat$ref"""'], {}), "('rownames(mat) <- mat$ref')\n", (34256, 34284), True, 'from rpy2.robjects import r as R\n'), ((34293, 34325), 'rpy2.robjects.r', 'R', (['"""mat2 <- mat[,1:ncol(mat)-1]"""'], {}), "('mat2 <- mat[,1:ncol(mat)-1]')\n", (34294, 34325), True, 'from rpy2.robjects import r as R\n'), ((34334, 34368), 'rpy2.robjects.r', 'R', (['"""mat2 <- mat2[,1:ncol(mat2)-1]"""'], {}), "('mat2 <- mat2[,1:ncol(mat2)-1]')\n", (34335, 34368), True, 'from rpy2.robjects import r as R\n'), ((34443, 34480), 'rpy2.robjects.r', 'R', (['"""mat2 <- mat2[,colSums(mat2) > 5]"""'], {}), "('mat2 <- mat2[,colSums(mat2) > 5]')\n", (34444, 34480), True, 'from rpy2.robjects import r as R\n'), ((34489, 34542), 'rpy2.robjects.r', 'R', (['"""cols <- colorRampPalette(c("white", "blue"))(75)"""'], {}), '(\'cols <- colorRampPalette(c("white", "blue"))(75)\')\n', (34490, 34542), True, 'from rpy2.robjects import r as R\n'), ((34551, 34575), 'rpy2.robjects.r', 'R', (['(\'pdf("%s")\' % outfile)'], {}), '(\'pdf("%s")\' % outfile)\n', (34552, 34575), True, 'from rpy2.robjects import r as R\n'), ((34584, 34758), 'rpy2.robjects.r', 'R', (['"""pheatmap(mat2, \n color=cols, \n cluster_cols=T, \n cluster_rows=T, \n cluster_method="ward.D2")"""'], {}), '("""pheatmap(mat2, \n color=cols, \n cluster_cols=T, \n cluster_rows=T, \n cluster_method="ward.D2")"""\n )\n', (34585, 34758), True, 'from rpy2.robjects import r as R\n'), ((35087, 35108), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (35088, 35108), True, 'from rpy2.robjects import r as R\n'), ((35117, 35202), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' % matrix\n )'], {}), '(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n matrix)\n', (35118, 35202), True, 'from rpy2.robjects import r as R\n'), ((35207, 35311), 'rpy2.robjects.r', 'R', (['(\'annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\'\n % annotations)'], {}), '(\n \'annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\'\n % annotations)\n', (35208, 35311), True, 'from rpy2.robjects import r as R\n'), ((35311, 35346), 'rpy2.robjects.r', 'R', (['"""maximums <- apply(dat, 2, max)"""'], {}), "('maximums <- apply(dat, 2, max)')\n", (35312, 35346), True, 'from rpy2.robjects import r as R\n'), ((35355, 35419), 'rpy2.robjects.r', 'R', (['"""dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)"""'], {}), '(\'dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)\')\n', (35356, 35419), True, 'from rpy2.robjects import r as R\n'), ((35428, 35494), 'rpy2.robjects.r', 'R', (['"""dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")"""'], {}), '(\'dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")\')\n', (35429, 35494), True, 'from rpy2.robjects import r as R\n'), ((35503, 35575), 'rpy2.robjects.r', 'R', (['"""dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)"""'], {}), '(\'dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)\')\n', (35504, 35575), True, 'from rpy2.robjects import r as R\n'), ((35584, 35649), 'rpy2.robjects.r', 'R', (['"""dat3$pi_status[is.na(dat3$pi_status)] <- "other_significant\\""""'], {}), '(\'dat3$pi_status[is.na(dat3$pi_status)] <- "other_significant"\')\n', (35585, 35649), True, 'from rpy2.robjects import r as R\n'), ((35659, 35769), 'rpy2.robjects.r', 'R', (['"""plot1 <- ggplot(dat3, aes(x = as.numeric(as.character(max)), group = pi_status, colour = pi_status))"""'], {}), "('plot1 <- ggplot(dat3, aes(x = as.numeric(as.character(max)), group = pi_status, colour = pi_status))'\n )\n", (35660, 35769), True, 'from rpy2.robjects import r as R\n'), ((35773, 35816), 'rpy2.robjects.r', 'R', (['"""plot2 <- plot1 + stat_ecdf(size = 1.1)"""'], {}), "('plot2 <- plot1 + stat_ecdf(size = 1.1)')\n", (35774, 35816), True, 'from rpy2.robjects import r as R\n'), ((35825, 36193), 'rpy2.robjects.r', 'R', (['"""plot2 + scale_colour_manual(values = c("cyan3", \n "darkorchid", \n "black", \n "darkgoldenrod2", \n "grey", \n "darkBlue"))"""'], {}), '("""plot2 + scale_colour_manual(values = c("cyan3", \n "darkorchid", \n "black", \n "darkgoldenrod2", \n "grey", \n "darkBlue"))"""\n )\n', (35826, 36193), True, 'from rpy2.robjects import r as R\n'), ((36193, 36220), 'rpy2.robjects.r', 'R', (['(\'ggsave("%s")\' % outfile)'], {}), '(\'ggsave("%s")\' % outfile)\n', (36194, 36220), True, 'from rpy2.robjects import r as R\n'), ((36555, 36576), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (36556, 36576), True, 'from rpy2.robjects import r as R\n'), ((36585, 36670), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' % matrix\n )'], {}), '(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n matrix)\n', (36586, 36670), True, 'from rpy2.robjects import r as R\n'), ((36675, 36779), 'rpy2.robjects.r', 'R', (['(\'annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\'\n % annotations)'], {}), '(\n \'annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\'\n % annotations)\n', (36676, 36779), True, 'from rpy2.robjects import r as R\n'), ((36779, 36814), 'rpy2.robjects.r', 'R', (['"""maximums <- apply(dat, 2, max)"""'], {}), "('maximums <- apply(dat, 2, max)')\n", (36780, 36814), True, 'from rpy2.robjects import r as R\n'), ((36823, 36887), 'rpy2.robjects.r', 'R', (['"""dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)"""'], {}), '(\'dat2 <- data.frame("cog" = colnames(dat), "max" = maximums)\')\n', (36824, 36887), True, 'from rpy2.robjects import r as R\n'), ((36896, 36962), 'rpy2.robjects.r', 'R', (['"""dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")"""'], {}), '(\'dat3 <- merge(dat2, annotations, by.x = "cog", by.y = "gene")\')\n', (36897, 36962), True, 'from rpy2.robjects import r as R\n'), ((36971, 37043), 'rpy2.robjects.r', 'R', (['"""dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)"""'], {}), '(\'dat3$pi_status <- ifelse(dat3$status == "NS", "NS", dat3$pi_status)\')\n', (36972, 37043), True, 'from rpy2.robjects import r as R\n'), ((37052, 37144), 'rpy2.robjects.r', 'R', (['"""diff.up.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.rna"]))"""'], {}), '(\'diff.up.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.rna"]))\'\n )\n', (37053, 37144), True, 'from rpy2.robjects import r as R\n'), ((37148, 37244), 'rpy2.robjects.r', 'R', (['"""diff.down.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.rna"]))"""'], {}), '(\'diff.down.rna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.rna"]))\'\n )\n', (37149, 37244), True, 'from rpy2.robjects import r as R\n'), ((37248, 37340), 'rpy2.robjects.r', 'R', (['"""diff.up.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.dna"]))"""'], {}), '(\'diff.up.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.up.dna"]))\'\n )\n', (37249, 37340), True, 'from rpy2.robjects import r as R\n'), ((37344, 37440), 'rpy2.robjects.r', 'R', (['"""diff.down.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.dna"]))"""'], {}), '(\'diff.down.dna <- as.numeric(as.character(dat3$max[dat3$pi_status == "diff.down.dna"]))\'\n )\n', (37345, 37440), True, 'from rpy2.robjects import r as R\n'), ((37444, 37513), 'rpy2.robjects.r', 'R', (['"""ns <- as.numeric(as.character(dat3$max[dat3$pi_status == "NS"]))"""'], {}), '(\'ns <- as.numeric(as.character(dat3$max[dat3$pi_status == "NS"]))\')\n', (37445, 37513), True, 'from rpy2.robjects import r as R\n'), ((37542, 37578), 'rpy2.robjects.r', 'R', (['"""ks1 <- ks.test(diff.up.rna, ns)"""'], {}), "('ks1 <- ks.test(diff.up.rna, ns)')\n", (37543, 37578), True, 'from rpy2.robjects import r as R\n'), ((37587, 37625), 'rpy2.robjects.r', 'R', (['"""ks2 <- ks.test(diff.down.rna, ns)"""'], {}), "('ks2 <- ks.test(diff.down.rna, ns)')\n", (37588, 37625), True, 'from rpy2.robjects import r as R\n'), ((37635, 37671), 'rpy2.robjects.r', 'R', (['"""ks3 <- ks.test(diff.up.dna, ns)"""'], {}), "('ks3 <- ks.test(diff.up.dna, ns)')\n", (37636, 37671), True, 'from rpy2.robjects import r as R\n'), ((37680, 37718), 'rpy2.robjects.r', 'R', (['"""ks4 <- ks.test(diff.down.dna, ns)"""'], {}), "('ks4 <- ks.test(diff.down.dna, ns)')\n", (37681, 37718), True, 'from rpy2.robjects import r as R\n'), ((37728, 38304), 'rpy2.robjects.r', 'R', (['"""res <- data.frame("RNAGreaterThanDNA.up.pvalue" = ks1$p.value,\n "RNAGreaterThanDNA.up.D" = ks1$statistic,\n "RNAGreaterThanDNA.down.pvalue" = ks2$p.value,\n "RNAGreaterThanDNA.down.D" = ks2$statistic,\n "DNAGreaterThanRNA.up.pvalue" = ks3$p.value,\n "DNAGreaterThanRNA.up.D" = ks3$statistic,\n "DNAGreaterThanRNA.down.pvalue" = ks4$p.value,\n "DNAGreaterThanRNA.down.D" = ks4$statistic)"""'], {}), '("""res <- data.frame("RNAGreaterThanDNA.up.pvalue" = ks1$p.value,\n "RNAGreaterThanDNA.up.D" = ks1$statistic,\n "RNAGreaterThanDNA.down.pvalue" = ks2$p.value,\n "RNAGreaterThanDNA.down.D" = ks2$statistic,\n "DNAGreaterThanRNA.up.pvalue" = ks3$p.value,\n "DNAGreaterThanRNA.up.D" = ks3$statistic,\n "DNAGreaterThanRNA.down.pvalue" = ks4$p.value,\n "DNAGreaterThanRNA.down.D" = ks4$statistic)"""\n )\n', (37729, 38304), True, 'from rpy2.robjects import r as R\n'), ((38304, 38390), 'rpy2.robjects.r', 'R', (['(\'write.table(res, file = "%s", sep = "\\t", quote = F, row.names = F)\' %\n outfile)'], {}), '(\'write.table(res, file = "%s", sep = "\\t", quote = F, row.names = F)\' %\n outfile)\n', (38305, 38390), True, 'from rpy2.robjects import r as R\n'), ((38698, 38718), 'rpy2.robjects.r', 'R', (['"""library(gplots)"""'], {}), "('library(gplots)')\n", (38699, 38718), True, 'from rpy2.robjects import r as R\n'), ((38727, 38747), 'rpy2.robjects.r', 'R', (['"""library(gtools)"""'], {}), "('library(gtools)')\n", (38728, 38747), True, 'from rpy2.robjects import r as R\n'), ((38756, 38862), 'rpy2.robjects.r', 'R', (['(\'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t", row.names = 1)\'\n % matrix)'], {}), '(\n \'dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t", row.names = 1)\'\n % matrix)\n', (38757, 38862), True, 'from rpy2.robjects import r as R\n'), ((38862, 38966), 'rpy2.robjects.r', 'R', (['(\'annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\'\n % annotations)'], {}), '(\n \'annotations <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\'\n % annotations)\n', (38863, 38966), True, 'from rpy2.robjects import r as R\n'), ((38965, 39011), 'rpy2.robjects.r', 'R', (['"""rownames(annotations) <- annotations$gene"""'], {}), "('rownames(annotations) <- annotations$gene')\n", (38966, 39011), True, 'from rpy2.robjects import r as R\n'), ((39129, 39190), 'rpy2.robjects.r', 'R', (['"""genes <- intersect(rownames(annotations), colnames(dat))"""'], {}), "('genes <- intersect(rownames(annotations), colnames(dat))')\n", (39130, 39190), True, 'from rpy2.robjects import r as R\n'), ((39199, 39223), 'rpy2.robjects.r', 'R', (['"""dat <- dat[, genes]"""'], {}), "('dat <- dat[, genes]')\n", (39200, 39223), True, 'from rpy2.robjects import r as R\n'), ((39232, 39295), 'rpy2.robjects.r', 'R', (['"""dat <- dat[grep("unassigned", rownames(dat), invert = T),]"""'], {}), '(\'dat <- dat[grep("unassigned", rownames(dat), invert = T),]\')\n', (39233, 39295), True, 'from rpy2.robjects import r as R\n'), ((39305, 39333), 'rpy2.robjects.r', 'R', (['"""genera <- rownames(dat)"""'], {}), "('genera <- rownames(dat)')\n", (39306, 39333), True, 'from rpy2.robjects import r as R\n'), ((39342, 39370), 'rpy2.robjects.r', 'R', (['"""rownames(dat) <- genera"""'], {}), "('rownames(dat) <- genera')\n", (39343, 39370), True, 'from rpy2.robjects import r as R\n'), ((39379, 39406), 'rpy2.robjects.r', 'R', (['"""colnames(dat) <- genes"""'], {}), "('colnames(dat) <- genes')\n", (39380, 39406), True, 'from rpy2.robjects import r as R\n'), ((39415, 39454), 'rpy2.robjects.r', 'R', (['"""annotations <- annotations[genes,]"""'], {}), "('annotations <- annotations[genes,]')\n", (39416, 39454), True, 'from rpy2.robjects import r as R\n'), ((39464, 39526), 'rpy2.robjects.r', 'R', (['"""annotations <- annotations[order(annotations$pi_status),]"""'], {}), "('annotations <- annotations[order(annotations$pi_status),]')\n", (39465, 39526), True, 'from rpy2.robjects import r as R\n'), ((39607, 39679), 'rpy2.robjects.r', 'R', (['"""annotations <- annotations[annotations$pi_status == "diff.up.rna",]"""'], {}), '(\'annotations <- annotations[annotations$pi_status == "diff.up.rna",]\')\n', (39608, 39679), True, 'from rpy2.robjects import r as R\n'), ((39689, 39729), 'rpy2.robjects.r', 'R', (['"""annotations <- na.omit(annotations)"""'], {}), "('annotations <- na.omit(annotations)')\n", (39690, 39729), True, 'from rpy2.robjects import r as R\n'), ((39738, 39777), 'rpy2.robjects.r', 'R', (['"""dat <- dat[,rownames(annotations)]"""'], {}), "('dat <- dat[,rownames(annotations)]')\n", (39739, 39777), True, 'from rpy2.robjects import r as R\n'), ((39787, 39863), 'rpy2.robjects.r', 'R', (['"""annotation <- data.frame(cluster = as.character(annotations$pi_status))"""'], {}), "('annotation <- data.frame(cluster = as.character(annotations$pi_status))')\n", (39788, 39863), True, 'from rpy2.robjects import r as R\n'), ((39872, 39922), 'rpy2.robjects.r', 'R', (['"""rownames(annotation) <- rownames(annotations)"""'], {}), "('rownames(annotation) <- rownames(annotations)')\n", (39873, 39922), True, 'from rpy2.robjects import r as R\n'), ((39933, 39958), 'rpy2.robjects.r', 'R', (['"""colors1 <- c("grey")"""'], {}), '(\'colors1 <- c("grey")\')\n', (39934, 39958), True, 'from rpy2.robjects import r as R\n'), ((39967, 40006), 'rpy2.robjects.r', 'R', (['"""names(colors1) <- c("diff.up.rna")"""'], {}), '(\'names(colors1) <- c("diff.up.rna")\')\n', (39968, 40006), True, 'from rpy2.robjects import r as R\n'), ((40016, 40059), 'rpy2.robjects.r', 'R', (['"""anno_colors <- list(cluster = colors1)"""'], {}), "('anno_colors <- list(cluster = colors1)')\n", (40017, 40059), True, 'from rpy2.robjects import r as R\n'), ((40069, 40127), 'rpy2.robjects.r', 'R', (['"""cols <- colorRampPalette(c("white", "darkBlue"))(150)"""'], {}), '(\'cols <- colorRampPalette(c("white", "darkBlue"))(150)\')\n', (40070, 40127), True, 'from rpy2.robjects import r as R\n'), ((40137, 40177), 'rpy2.robjects.r', 'R', (['"""dat <- dat[,colSums(dat > 50) >= 1]"""'], {}), "('dat <- dat[,colSums(dat > 50) >= 1]')\n", (40138, 40177), True, 'from rpy2.robjects import r as R\n'), ((40186, 40226), 'rpy2.robjects.r', 'R', (['"""dat <- dat[rowSums(dat > 10) >= 1,]"""'], {}), "('dat <- dat[rowSums(dat > 10) >= 1,]')\n", (40187, 40226), True, 'from rpy2.robjects import r as R\n'), ((40279, 40332), 'rpy2.robjects.r', 'R', (['"""dat2 <- data.frame(t(apply(dat, 1, as.numeric)))"""'], {}), "('dat2 <- data.frame(t(apply(dat, 1, as.numeric)))')\n", (40280, 40332), True, 'from rpy2.robjects import r as R\n'), ((40341, 40377), 'rpy2.robjects.r', 'R', (['"""colnames(dat2) <- colnames(dat)"""'], {}), "('colnames(dat2) <- colnames(dat)')\n", (40342, 40377), True, 'from rpy2.robjects import r as R\n'), ((40387, 40436), 'rpy2.robjects.r', 'R', (['(\'pdf("%s", height = 10, width = 15)\' % outfile)'], {}), '(\'pdf("%s", height = 10, width = 15)\' % outfile)\n', (40388, 40436), True, 'from rpy2.robjects import r as R\n'), ((40445, 40467), 'rpy2.robjects.r', 'R', (['"""library(pheatmap)"""'], {}), "('library(pheatmap)')\n", (40446, 40467), True, 'from rpy2.robjects import r as R\n'), ((40476, 40839), 'rpy2.robjects.r', 'R', (['"""pheatmap(dat2, \n clustering_distance_cols = "manhattan",\n clustering_method = "ward",\n annotation = annotation,\n annotation_colors = anno_colors,\n cluster_rows = T,\n cluster_cols = F,\n color = cols,\n fontsize = 8)"""'], {}), '("""pheatmap(dat2, \n clustering_distance_cols = "manhattan",\n clustering_method = "ward",\n annotation = annotation,\n annotation_colors = anno_colors,\n cluster_rows = T,\n cluster_cols = F,\n color = cols,\n fontsize = 8)"""\n )\n', (40477, 40839), True, 'from rpy2.robjects import r as R\n'), ((41348, 41369), 'rpy2.robjects.r', 'R', (['"""library(ggplot2)"""'], {}), "('library(ggplot2)')\n", (41349, 41369), True, 'from rpy2.robjects import r as R\n'), ((41405, 41500), 'rpy2.robjects.r', 'R', (['(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n taxa_cog_dnadiff)'], {}), '(\'dna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n taxa_cog_dnadiff)\n', (41406, 41500), True, 'from rpy2.robjects import r as R\n'), ((41505, 41568), 'rpy2.robjects.r', 'R', (['"""dna <- dna[dna$group2 == "WT" & dna$group1 == "HhaIL10R",]"""'], {}), '(\'dna <- dna[dna$group2 == "WT" & dna$group1 == "HhaIL10R",]\')\n', (41506, 41568), True, 'from rpy2.robjects import r as R\n'), ((41577, 41672), 'rpy2.robjects.r', 'R', (['(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n taxa_cog_rnadiff)'], {}), '(\'rna <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n taxa_cog_rnadiff)\n', (41578, 41672), True, 'from rpy2.robjects import r as R\n'), ((41677, 41740), 'rpy2.robjects.r', 'R', (['"""rna <- rna[rna$group2 == "WT" & rna$group1 == "HhaIL10R",]"""'], {}), '(\'rna <- rna[rna$group2 == "WT" & rna$group1 == "HhaIL10R",]\')\n', (41678, 41740), True, 'from rpy2.robjects import r as R\n'), ((41775, 41869), 'rpy2.robjects.r', 'R', (['(\'dna.cog <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n cog_dnadiff)'], {}), '(\'dna.cog <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n cog_dnadiff)\n', (41776, 41869), True, 'from rpy2.robjects import r as R\n'), ((41874, 41953), 'rpy2.robjects.r', 'R', (['"""dna.cog <- dna.cog[dna.cog$group2 == "WT" & dna.cog$group1 == "HhaIL10R",]"""'], {}), '(\'dna.cog <- dna.cog[dna.cog$group2 == "WT" & dna.cog$group1 == "HhaIL10R",]\')\n', (41875, 41953), True, 'from rpy2.robjects import r as R\n'), ((41962, 42056), 'rpy2.robjects.r', 'R', (['(\'rna.cog <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n cog_rnadiff)'], {}), '(\'rna.cog <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\\t")\' %\n cog_rnadiff)\n', (41963, 42056), True, 'from rpy2.robjects import r as R\n'), ((42061, 42140), 'rpy2.robjects.r', 'R', (['"""rna.cog <- rna.cog[rna.cog$group2 == "WT" & rna.cog$group1 == "HhaIL10R",]"""'], {}), '(\'rna.cog <- rna.cog[rna.cog$group2 == "WT" & rna.cog$group1 == "HhaIL10R",]\')\n', (42062, 42140), True, 'from rpy2.robjects import r as R\n'), ((42183, 42434), 'rpy2.robjects.r', 'R', (['"""dat <- merge(dna, rna, \n by.x = "taxa", \n by.y = "taxa", \n all.x = T, \n all.y = T, \n suffixes = c(".dna.taxa.cog", ".rna.taxa.cog"))"""'], {}), '("""dat <- merge(dna, rna, \n by.x = "taxa", \n by.y = "taxa", \n all.x = T, \n all.y = T, \n suffixes = c(".dna.taxa.cog", ".rna.taxa.cog"))"""\n )\n', (42184, 42434), True, 'from rpy2.robjects import r as R\n'), ((42454, 42479), 'rpy2.robjects.r', 'R', (['"""dat[is.na(dat)] <- 0"""'], {}), "('dat[is.na(dat)] <- 0')\n", (42455, 42479), True, 'from rpy2.robjects import r as R\n'), ((42565, 42644), 'rpy2.robjects.r', 'R', (['"""cogs <- c("COG0783", "COG2837", "COG0435","COG5520", "COG0508", "COG0852")"""'], {}), '(\'cogs <- c("COG0783", "COG2837", "COG0435","COG5520", "COG0508", "COG0852")\')\n', (42566, 42644), True, 'from rpy2.robjects import r as R\n'), ((42808, 44219), 'rpy2.robjects.r', 'R', (['"""for (cog in cogs){\n dat2 <- dat[grep(cog, dat$taxa),]\n dna.cog2 <- dna.cog[grep(cog, dna.cog$taxa),]\n rna.cog2 <- rna.cog[grep(cog, rna.cog$taxa),]\n\n # add the data for COG fold changes and abundance\n dat3 <- data.frame("genus" = append(dat2$taxa, cog),\n "dna.fold" = append(dat2$logFC.dna.taxa.cog, dna.cog2$logFC),\n "rna.fold" = append(dat2$logFC.rna.taxa.cog, rna.cog2$logFC),\n "abundance" = append(dat2$AveExpr.rna.taxa.cog, rna.cog2$AveExpr))\n \n suffix <- paste(cog, "scatters.pdf", sep = ".")\n outname <- paste("scatterplot_genus_cog_fold.dir", suffix, sep = "/")\n\n plot1 <- ggplot(dat3, aes(x = dna.fold, y = rna.fold, size = log10(abundance), label = genus))\n plot2 <- plot1 + geom_point(shape = 18) \n plot3 <- plot2 + geom_text(hjust = 0.5, vjust = 1) + scale_size(range = c(3,6))\n plot4 <- plot3 + geom_abline(intercept = 0, slope = 1, colour = "blue") \n plot5 <- plot4 + geom_hline(yintercept = c(-1,1), linetype = "dashed")\n plot6 <- plot5 + geom_vline(xintercept = c(-1,1), linetype = "dashed")\n plot7 <- plot6 + geom_hline(yintercept = 0) + geom_vline(xintercept = 0)\n ggsave(outname)\n }"""'], {}), '("""for (cog in cogs){\n dat2 <- dat[grep(cog, dat$taxa),]\n dna.cog2 <- dna.cog[grep(cog, dna.cog$taxa),]\n rna.cog2 <- rna.cog[grep(cog, rna.cog$taxa),]\n\n # add the data for COG fold changes and abundance\n dat3 <- data.frame("genus" = append(dat2$taxa, cog),\n "dna.fold" = append(dat2$logFC.dna.taxa.cog, dna.cog2$logFC),\n "rna.fold" = append(dat2$logFC.rna.taxa.cog, rna.cog2$logFC),\n "abundance" = append(dat2$AveExpr.rna.taxa.cog, rna.cog2$AveExpr))\n \n suffix <- paste(cog, "scatters.pdf", sep = ".")\n outname <- paste("scatterplot_genus_cog_fold.dir", suffix, sep = "/")\n\n plot1 <- ggplot(dat3, aes(x = dna.fold, y = rna.fold, size = log10(abundance), label = genus))\n plot2 <- plot1 + geom_point(shape = 18) \n plot3 <- plot2 + geom_text(hjust = 0.5, vjust = 1) + scale_size(range = c(3,6))\n plot4 <- plot3 + geom_abline(intercept = 0, slope = 1, colour = "blue") \n plot5 <- plot4 + geom_hline(yintercept = c(-1,1), linetype = "dashed")\n plot6 <- plot5 + geom_vline(xintercept = c(-1,1), linetype = "dashed")\n plot7 <- plot6 + geom_hline(yintercept = 0) + geom_vline(xintercept = 0)\n ggsave(outname)\n }"""\n )\n', (42809, 44219), True, 'from rpy2.robjects import r as R\n'), ((1048, 1072), 'os.path.basename', 'os.path.basename', (['infile'], {}), '(infile)\n', (1064, 1072), False, 'import os\n'), ((1549, 1575), 'itertools.product', 'itertools.product', (['ps', 'fcs'], {}), '(ps, fcs)\n', (1566, 1575), False, 'import itertools\n'), ((13516, 13547), 'rpy2.robjects.r', 'R', (['"""rna.distinct <- rep(0, 20)"""'], {}), "('rna.distinct <- rep(0, 20)')\n", (13517, 13547), True, 'from rpy2.robjects import r as R\n'), ((13570, 13603), 'rpy2.robjects.r', 'R', (['"""rna.distinct <- rna.distinct"""'], {}), "('rna.distinct <- rna.distinct')\n", (13571, 13603), True, 'from rpy2.robjects import r as R\n'), ((30089, 30104), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (30096, 30104), True, 'import numpy as np\n'), ((15778, 15810), 'CGATPipelines.Pipeline.snip', 'P.snip', (['outfile', '""".loadings.tsv"""'], {}), "(outfile, '.loadings.tsv')\n", (15784, 15810), True, 'import CGATPipelines.Pipeline as P\n')]
|
# Third-party
import astropy.units as u
import numpy as np
import pymc3 as pm
from pymc3.distributions import generate_samples
import aesara_theano_fallback.tensor as tt
import exoplanet.units as xu
__all__ = ['UniformLog', 'FixedCompanionMass']
class UniformLog(pm.Continuous):
def __init__(self, a, b, **kwargs):
"""A distribution over a value, x, that is uniform in log(x) over the
domain :math:`(a, b)`.
"""
self.a = float(a)
self.b = float(b)
assert (self.a > 0) and (self.b > 0)
self._fac = np.log(self.b) - np.log(self.a)
shape = kwargs.get("shape", None)
if shape is None:
testval = 0.5 * (self.a + self.b)
else:
testval = 0.5 * (self.a + self.b) + np.zeros(shape)
kwargs["testval"] = kwargs.pop("testval", testval)
super(UniformLog, self).__init__(**kwargs)
def _random(self, size=None):
uu = np.random.uniform(size=size)
return np.exp(uu * self._fac + np.log(self.a))
def random(self, point=None, size=None):
return generate_samples(
self._random,
dist_shape=self.shape,
broadcast_shape=self.shape,
size=size,
)
def logp(self, value):
return -tt.as_tensor_variable(value) - np.log(self._fac)
class FixedCompanionMass(pm.Normal):
r"""
A distribution over velocity semi-amplitude, :math:`K`, that, at
fixed primary mass, is a fixed Normal distribution in companion mass. This
has the form:
.. math::
p(K) \propto \mathcal{N}(K \,|\, \mu_K, \sigma_K)
\sigma_K = \sigma_{K, 0} \, \left(\frac{P}{P_0}\right)^{-1/3} \,
\left(1 - e^2\right)^{-1}
where :math:`P` and :math:`e` are period and eccentricity, and
``sigma_K0`` and ``P0`` are parameters of this distribution that must
be specified.
"""
@u.quantity_input(sigma_K0=u.km/u.s, P0=u.day, max_K=u.km/u.s)
def __init__(self, P, e, sigma_K0, P0, mu=0., max_K=500*u.km/u.s,
K_unit=None, **kwargs):
self._sigma_K0 = sigma_K0
self._P0 = P0
self._max_K = max_K
if K_unit is not None:
self._sigma_K0 = self.sigma_K0.to(K_unit)
self._max_K = self._max_K.to(self._sigma_K0.unit)
if hasattr(P, xu.UNIT_ATTR_NAME):
self._P0 = self._P0.to(getattr(P, xu.UNIT_ATTR_NAME))
sigma_K0 = self._sigma_K0.value
P0 = self._P0.value
sigma = tt.min([self._max_K.value,
sigma_K0 * (P/P0)**(-1/3) / np.sqrt(1-e**2)])
super().__init__(mu=mu, sigma=sigma)
class Kipping13Long(pm.Beta):
def __init__(self):
r"""
The inferred long-period eccentricity distribution from Kipping (2013).
"""
super().__init__(1.12, 3.09)
class Kipping13Short(pm.Beta):
def __init__(self):
r"""
The inferred short-period eccentricity distribution from Kipping (2013).
"""
super().__init__(0.697, 3.27)
class Kipping13Global(pm.Beta):
def __init__(self):
r"""
The inferred global eccentricity distribution from Kipping (2013).
"""
super().__init__(0.867, 3.03)
|
[
"numpy.sqrt",
"numpy.log",
"aesara_theano_fallback.tensor.as_tensor_variable",
"numpy.zeros",
"numpy.random.uniform",
"pymc3.distributions.generate_samples",
"astropy.units.quantity_input"
] |
[((1908, 1973), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'sigma_K0': '(u.km / u.s)', 'P0': 'u.day', 'max_K': '(u.km / u.s)'}), '(sigma_K0=u.km / u.s, P0=u.day, max_K=u.km / u.s)\n', (1924, 1973), True, 'import astropy.units as u\n'), ((945, 973), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'size'}), '(size=size)\n', (962, 973), True, 'import numpy as np\n'), ((1090, 1187), 'pymc3.distributions.generate_samples', 'generate_samples', (['self._random'], {'dist_shape': 'self.shape', 'broadcast_shape': 'self.shape', 'size': 'size'}), '(self._random, dist_shape=self.shape, broadcast_shape=self.\n shape, size=size)\n', (1106, 1187), False, 'from pymc3.distributions import generate_samples\n'), ((562, 576), 'numpy.log', 'np.log', (['self.b'], {}), '(self.b)\n', (568, 576), True, 'import numpy as np\n'), ((579, 593), 'numpy.log', 'np.log', (['self.a'], {}), '(self.a)\n', (585, 593), True, 'import numpy as np\n'), ((1317, 1334), 'numpy.log', 'np.log', (['self._fac'], {}), '(self._fac)\n', (1323, 1334), True, 'import numpy as np\n'), ((771, 786), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (779, 786), True, 'import numpy as np\n'), ((1013, 1027), 'numpy.log', 'np.log', (['self.a'], {}), '(self.a)\n', (1019, 1027), True, 'import numpy as np\n'), ((1286, 1314), 'aesara_theano_fallback.tensor.as_tensor_variable', 'tt.as_tensor_variable', (['value'], {}), '(value)\n', (1307, 1314), True, 'import aesara_theano_fallback.tensor as tt\n'), ((2583, 2602), 'numpy.sqrt', 'np.sqrt', (['(1 - e ** 2)'], {}), '(1 - e ** 2)\n', (2590, 2602), True, 'import numpy as np\n')]
|
import cv2
import numpy as np
from PyQt5.QtGui import QIntValidator
from PyQt5.QtWidgets import QDialog
from PyQt5.uic import loadUi
from utils import processing_utils as utils
IMAGE_DESCRIPT_DIALOG_UI = 'coreUI/image_description_dialog.ui'
class ImageDescriptionDialog(QDialog):
"""Image Description Dialog Window"""
def __init__(self, image):
super(ImageDescriptionDialog, self).__init__()
loadUi(IMAGE_DESCRIPT_DIALOG_UI, self)
self.kernel = None
self.kernel_matrix_edits = [
self.m_r1c1,
self.m_r2c1,
self.m_r3c1,
self.m_r1c2,
self.m_r2c2,
self.m_r3c2,
self.m_r1c3,
self.m_r2c3,
self.m_r3c3,
]
self.set_validation()
description_labels = [
self.shapeDataLabel,
self.channelsDataLabel,
self.sizeDataLabel,
self.datatypeDataLabel
]
# Set up initial display labels
utils.display_img(image, self.imageViewLabel)
(h, w, c) = image.shape
self.shapeDataLabel.setText(f"{w} x {h} pixels")
self.channelsDataLabel.setText(f"{c}")
self.sizeDataLabel.setText(f"{image.size} pixels")
self.datatypeDataLabel.setText(f"{image.dtype}")
# Resize all labels to match the max width
max_label_width = 0
for label in description_labels:
if label.width() > max_label_width:
max_label_width = label.width()
for label in description_labels:
# Add padding to the width for size dimensions
label.setFixedWidth(max_label_width + 50)
# CV image that is passed to the dialog menu
# Note: We do not want a dialog without an image to accompany it
self._image = image
# Cache both the original and processed image
self._processed_image = image.copy()
# Connect UI buttons
self.applyButton.clicked.connect(self.on_apply_clicked)
self.resetButton.clicked.connect(self.on_reset_clicked)
self.normalizeButton.clicked.connect(self.on_normalize_clicked)
def on_apply_clicked(self):
"""
Handle when apply button is clicked
Only apply the matrix when it meets all validation criteria
:return: None if verification fails
"""
if self.verify_not_empty():
utils.display_img(self.apply_kernel_transformation(), self.imageViewLabel)
def on_reset_clicked(self):
"""
Handle when the reset button is clicked
:return:
"""
# Display the original image and reset the processed image
utils.display_img(self._image, self.imageViewLabel)
self._processed_image = self._image.copy()
def verify_not_empty(self):
"""
Verify that all text fields are not empty
:return: True only if all fields are not empty
"""
for text_edit in self.kernel_matrix_edits:
if not text_edit.hasAcceptableInput():
return False
return True
def on_normalize_clicked(self):
"""
Normalize the current kernel matrix and display the result
:return: None if the kernel has not been applied
"""
if self.kernel is None:
return
# Get the sum of the matrix and create the new normalized matrix
sum = np.sum(self.kernel, dtype=np.int32)
normalized_kernel = self.kernel / sum
self._processed_image = self._image.copy()
image = cv2.filter2D(self._processed_image, -1, normalized_kernel)
utils.display_img(image, self.imageViewLabel)
def set_validation(self):
"""
Set validators on all matrix text edits (ensure integer input)
:return:
"""
validator = QIntValidator(-9999999999, 9999999999)
for text_exit in self.kernel_matrix_edits:
text_exit.setValidator(validator)
def apply_kernel_transformation(self):
def val(text_edit): return int(text_edit.text())
self.kernel = np.array([
[val(self.m_r1c1), val(self.m_r1c2), val(self.m_r1c3)],
[val(self.m_r2c1), val(self.m_r2c2), val(self.m_r2c3)],
[val(self.m_r3c1), val(self.m_r3c2), val(self.m_r3c3)]
])
return cv2.filter2D(self._processed_image, -1, self.kernel)
|
[
"PyQt5.QtGui.QIntValidator",
"PyQt5.uic.loadUi",
"utils.processing_utils.display_img",
"cv2.filter2D",
"numpy.sum"
] |
[((420, 458), 'PyQt5.uic.loadUi', 'loadUi', (['IMAGE_DESCRIPT_DIALOG_UI', 'self'], {}), '(IMAGE_DESCRIPT_DIALOG_UI, self)\n', (426, 458), False, 'from PyQt5.uic import loadUi\n'), ((1016, 1061), 'utils.processing_utils.display_img', 'utils.display_img', (['image', 'self.imageViewLabel'], {}), '(image, self.imageViewLabel)\n', (1033, 1061), True, 'from utils import processing_utils as utils\n'), ((2703, 2754), 'utils.processing_utils.display_img', 'utils.display_img', (['self._image', 'self.imageViewLabel'], {}), '(self._image, self.imageViewLabel)\n', (2720, 2754), True, 'from utils import processing_utils as utils\n'), ((3444, 3479), 'numpy.sum', 'np.sum', (['self.kernel'], {'dtype': 'np.int32'}), '(self.kernel, dtype=np.int32)\n', (3450, 3479), True, 'import numpy as np\n'), ((3594, 3652), 'cv2.filter2D', 'cv2.filter2D', (['self._processed_image', '(-1)', 'normalized_kernel'], {}), '(self._processed_image, -1, normalized_kernel)\n', (3606, 3652), False, 'import cv2\n'), ((3661, 3706), 'utils.processing_utils.display_img', 'utils.display_img', (['image', 'self.imageViewLabel'], {}), '(image, self.imageViewLabel)\n', (3678, 3706), True, 'from utils import processing_utils as utils\n'), ((3870, 3908), 'PyQt5.QtGui.QIntValidator', 'QIntValidator', (['(-9999999999)', '(9999999999)'], {}), '(-9999999999, 9999999999)\n', (3883, 3908), False, 'from PyQt5.QtGui import QIntValidator\n'), ((4372, 4424), 'cv2.filter2D', 'cv2.filter2D', (['self._processed_image', '(-1)', 'self.kernel'], {}), '(self._processed_image, -1, self.kernel)\n', (4384, 4424), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
import os
os.environ['DJANGO_SETTINGS_MODULE']='settings'
from logistic import logisticdb
import webapp2 as webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext.webapp import template
import numpy as np
import cgi
import cgitb
cgitb.enable()
def lesliegrow(n_o, T, l_m,S):
n_f=np.zeros(shape=(S,T))
for i in range(0, T):
n=np.dot(l_m, n_o)
n_o=n
n_f[:,i]=n.squeeze()
return n_f.tolist()
class leslieOutputPage(webapp.RequestHandler):
def post(self):
form = cgi.FieldStorage()
#text_file = open('.txt','r')
#x1 = text_file.read()
T = form.getvalue('T')
T = int(T)
S = form.getvalue('S')
S = int(S)
l_m = np.zeros(shape=(S,S))
n_o = np.zeros(shape=(S,1))
for i in range(S):
n_o_temp = form.getvalue('no'+str(i))
n_o[i,] = n_o_temp
for j in range(S):
l_m_temp = form.getvalue('a'+str(i)+str(j))
l_m[i,j] = l_m_temp
x=lesliegrow(n_o, T, l_m,S)
templatepath = os.path.dirname(__file__) + '/../templates/'
html = template.render(templatepath + '01pop_uberheader.html', {'title':'Ubertool'})
html = html + template.render(templatepath + '02pop_uberintroblock_wmodellinks.html', {'model':'leslie','page':'output'})
html = html + template.render(templatepath + '03pop_ubertext_links_left.html', {})
html = html + template.render(templatepath + '04uberoutput_start.html', {
'model':'leslie',
'model_attributes':'Leslie Matrix Outputs'})
html = html + """<table class="out-in out_" width="550" border="1">
<tr>
<th scope="col" width="250"><div align="center">Inputs</div></th>
<th scope="col" width="150"><div align="center">Unit</div></th>
<th scope="col" width="150"><div align="center">Value</div></th>
</tr>
<tr>
<td><div align="center">Simulation duration</div></td>
<td><div align="center">time unit</div></td>
<td><div align="center">%s</div></td>
</tr>
<tr>
<td><div align="center">Modeled stages</div></td>
<td><div align="center"> </div></td>
<td id="MS"><div align="center">%s</div></td>
</tr>
<tr style="display:none">
<td><div align="center">Leslie Matrix</div></td>
<td><div align="center"> </div></td>
<td id="LM"><div align="center">%s</div></td>
</tr>
<tr style="display:none">
<td><div align="center">Initial numbers</div></td>
<td><div align="center"> </div></td>
<td id="IN"><div align="center">%s</div></td>
</tr>
</table>
<p> </p>"""%(T, S, l_m.tolist(), n_o.tolist())
html = html + """<table class="lm out_" border="1">"""
html = html + """<table class="ii out_" border="1">"""
html = html + """<table width="400" border="1" style="display: none">
<tr>
<td>X</td>
<td id="final">%s</td>
</tr>
</table>"""%(x)
html = html + template.render(templatepath + 'leslie-output-jqplot.html', {})
html = html + template.render(templatepath + '04uberoutput_end.html', {})
html = html + template.render(templatepath + 'export.html', {})
html = html + template.render(templatepath + '06pop_uberfooter.html', {'links': ''})
self.response.out.write(html)
app = webapp.WSGIApplication([('/.*', leslieOutputPage)], debug=True)
def main():
run_wsgi_app(app)
if __name__ == '__main__':
main()
|
[
"cgi.FieldStorage",
"os.path.dirname",
"numpy.dot",
"numpy.zeros",
"google.appengine.ext.webapp.util.run_wsgi_app",
"webapp2.WSGIApplication",
"cgitb.enable",
"google.appengine.ext.webapp.template.render"
] |
[((291, 305), 'cgitb.enable', 'cgitb.enable', ([], {}), '()\n', (303, 305), False, 'import cgitb\n'), ((4709, 4772), 'webapp2.WSGIApplication', 'webapp.WSGIApplication', (["[('/.*', leslieOutputPage)]"], {'debug': '(True)'}), "([('/.*', leslieOutputPage)], debug=True)\n", (4731, 4772), True, 'import webapp2 as webapp\n'), ((351, 373), 'numpy.zeros', 'np.zeros', ([], {'shape': '(S, T)'}), '(shape=(S, T))\n', (359, 373), True, 'import numpy as np\n'), ((4799, 4816), 'google.appengine.ext.webapp.util.run_wsgi_app', 'run_wsgi_app', (['app'], {}), '(app)\n', (4811, 4816), False, 'from google.appengine.ext.webapp.util import run_wsgi_app\n'), ((409, 425), 'numpy.dot', 'np.dot', (['l_m', 'n_o'], {}), '(l_m, n_o)\n', (415, 425), True, 'import numpy as np\n'), ((584, 602), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {}), '()\n', (600, 602), False, 'import cgi\n'), ((788, 810), 'numpy.zeros', 'np.zeros', ([], {'shape': '(S, S)'}), '(shape=(S, S))\n', (796, 810), True, 'import numpy as np\n'), ((824, 846), 'numpy.zeros', 'np.zeros', ([], {'shape': '(S, 1)'}), '(shape=(S, 1))\n', (832, 846), True, 'import numpy as np\n'), ((1239, 1317), 'google.appengine.ext.webapp.template.render', 'template.render', (["(templatepath + '01pop_uberheader.html')", "{'title': 'Ubertool'}"], {}), "(templatepath + '01pop_uberheader.html', {'title': 'Ubertool'})\n", (1254, 1317), False, 'from google.appengine.ext.webapp import template\n'), ((1179, 1204), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1194, 1204), False, 'import os\n'), ((1339, 1454), 'google.appengine.ext.webapp.template.render', 'template.render', (["(templatepath + '02pop_uberintroblock_wmodellinks.html')", "{'model': 'leslie', 'page': 'output'}"], {}), "(templatepath + '02pop_uberintroblock_wmodellinks.html', {\n 'model': 'leslie', 'page': 'output'})\n", (1354, 1454), False, 'from google.appengine.ext.webapp import template\n'), ((1469, 1537), 'google.appengine.ext.webapp.template.render', 'template.render', (["(templatepath + '03pop_ubertext_links_left.html')", '{}'], {}), "(templatepath + '03pop_ubertext_links_left.html', {})\n", (1484, 1537), False, 'from google.appengine.ext.webapp import template\n'), ((1560, 1687), 'google.appengine.ext.webapp.template.render', 'template.render', (["(templatepath + '04uberoutput_start.html')", "{'model': 'leslie', 'model_attributes': 'Leslie Matrix Outputs'}"], {}), "(templatepath + '04uberoutput_start.html', {'model':\n 'leslie', 'model_attributes': 'Leslie Matrix Outputs'})\n", (1575, 1687), False, 'from google.appengine.ext.webapp import template\n'), ((4323, 4386), 'google.appengine.ext.webapp.template.render', 'template.render', (["(templatepath + 'leslie-output-jqplot.html')", '{}'], {}), "(templatepath + 'leslie-output-jqplot.html', {})\n", (4338, 4386), False, 'from google.appengine.ext.webapp import template\n'), ((4434, 4493), 'google.appengine.ext.webapp.template.render', 'template.render', (["(templatepath + '04uberoutput_end.html')", '{}'], {}), "(templatepath + '04uberoutput_end.html', {})\n", (4449, 4493), False, 'from google.appengine.ext.webapp import template\n'), ((4516, 4565), 'google.appengine.ext.webapp.template.render', 'template.render', (["(templatepath + 'export.html')", '{}'], {}), "(templatepath + 'export.html', {})\n", (4531, 4565), False, 'from google.appengine.ext.webapp import template\n'), ((4588, 4658), 'google.appengine.ext.webapp.template.render', 'template.render', (["(templatepath + '06pop_uberfooter.html')", "{'links': ''}"], {}), "(templatepath + '06pop_uberfooter.html', {'links': ''})\n", (4603, 4658), False, 'from google.appengine.ext.webapp import template\n')]
|
import time
import os
import arcade
import argparse
import gym
from gym import spaces
import swarm_env
import numpy as np
import random
import sys
sys.path.insert(0, '..')
from objects import SwarmSimulator
# Running experiment 22 in standalone file.
def experiment_runner(SWARM_SIZE = 15, ARENA_WIDTH = 600, ARENA_HEIGHT = 600, name_of_experiment = time.time(), INPUT_TIME = 300, GRID_X = 40, GRID_Y = 40,
disaster_size = 1, disaster_location = 'random', operator_size = 1, operator_location = 'random', reliability = (100, 101), unreliability_percentage = 0,
moving_disaster = False, communication_noise = 0, alpha = 10, normal_command = None, command_period = 0, constant_repulsion = False,
operator_vision_radius = 150, communication_range = 8, vision_range = 2, velocity_weight_coef = 0.01, boundary_repulsion = 1, aging_factor = 0.9999,
gp = False, gp_step = 50, maze = None, through_walls = True, rl_sim = None):
########### q-learning parameter setup #############
max_steps_per_episode = 10 # Steps allowed in a single episode.
learning_rate = 0.1 # alpha in bellman.
discount_rate = 0.99 # gamma in bellman for discount.
# Epsilon greedy policy vars.
exploration_rate = 1 # To set exploration (1 means 100% exploration)
max_exploration_rate = 1 # How large can exploration be.
min_exploration_rate = 0.01 # How small can exploration be.
exploration_decay_rate = 0.001 # decay rate for exploration.
rewards_all_episodes = [] # Saving all scores in rewards.
gym_swarm_env = gym.make('humanswarm-v0', maze_size=GRID_X) # Creating the environment for swarm learning.
gym_swarm_env.action_space = np.zeros((GRID_X, GRID_Y))
q_table = np.zeros((gym_swarm_env.observation_space.n , gym_swarm_env.action_space.size)) # Creating q-table for measuring score.
action = np.zeros((gym_swarm_env.action_space.size))
print('\n')
print("===== Reinforcement Parameters =====")
print("# Discount rate: " + str(discount_rate))
print("# Learning rate: " + str(learning_rate))
print("# Max steps per iteration: " + str(max_steps_per_episode))
print("# Max exploration rate: " + str(max_exploration_rate))
print("# Min exploration rate: " + str(min_exploration_rate))
print("# Exploration decay rate: " + str(exploration_decay_rate))
print("# Algorithm: " + str(rl_sim))
print("# State space size: " + str(gym_swarm_env.observation_space.n))
print("# Action space size: " + str(gym_swarm_env.action_space.size))
print("# Q-table size: " + str(q_table.shape))
print("====================================")
print('\n')
# Implemeting Q-learning algorithm.
done = False
state = gym_swarm_env.reset()
s_list = []
for step in range(max_steps_per_episode):
print('\n' + "============ start of step " + str(step) + " =============")
"""
In this loop we will set up exploration-exploitation trade-off,
Taking new action,
Updating Q-table,
Setting new state,
Adding new reward.
"""
# Simulation functions
sim = SwarmSimulator(ARENA_WIDTH, ARENA_HEIGHT, name_of_experiment, SWARM_SIZE, INPUT_TIME, GRID_X, GRID_Y, rl_sim)
sim.setup(disaster_size, disaster_location, operator_size, operator_location, reliability[0], reliability[1], unreliability_percentage, moving_disaster, communication_noise,
alpha, normal_command, command_period, constant_repulsion, operator_vision_radius,
communication_range, vision_range, velocity_weight_coef, boundary_repulsion, aging_factor, gp, gp_step, maze, through_walls)
if (not os.path.isdir('../outputs/' + name_of_experiment)):
os.mkdir('../outputs/' + name_of_experiment)
if (not os.path.isdir('../outputs/' + name_of_experiment + '/step_' + str(step))):
os.mkdir('../outputs/' + name_of_experiment + '/step_' + str(step))
if (not os.path.isdir('../outputs/' + name_of_experiment + '/step_' + str(step) + '/data')):
os.mkdir('../outputs/' + name_of_experiment + '/step_' + str(step) + '/data')
if (not os.path.isdir('../outputs/' + name_of_experiment + '/step_' + str(step) + '/data' + '/results')):
os.mkdir('../outputs/' + name_of_experiment + '/step_' + str(step) + '/data' + '/results')
sim.directory = str('../outputs/' + name_of_experiment + '/data/results/'+ str(time.time()))
while os.path.isdir(sim.directory):
sim.directory = str('../outputs/' + name_of_experiment + '/step_'+ str(step) + '/data/results/' + str(time.time()))
sim.directory = str('../outputs/' + name_of_experiment + '/step_'+ str(step) + '/data/results/'+ str(time.time()))
while os.path.isdir(sim.directory):
sim.directory = str('../outputs/' + name_of_experiment + '/step_'+ str(step) + '/data/results/' + str(time.time()))
directory = sim.directory
os.mkdir(directory)
sim.log_setup(directory)
# Adding new RL parameters to log #
with open(directory + "/log_setup.txt", "a") as file:
file.write('\n')
file.write('REINFORCEMENT LEARNING INFO:' + '\n')
file.write(' -- DISCOUNT RATE: ' + str(discount_rate) + '\n')
file.write(' -- LEARNING RATE: ' + str(learning_rate) + '\n')
file.write(' -- MAX STEPS PER ITERATION: ' + str(max_steps_per_episode) + '\n')
file.write(' -- MAX EXPLORATION RATE: ' + str(max_exploration_rate) + '\n')
file.write(' -- MIN EXPLORATION RATE: ' + str(min_exploration_rate) + '\n')
file.write(' -- EXPLORATION DECAY RATE: ' + str(exploration_decay_rate) + '\n')
file.write(' -- ALGORITHM: ' + str(rl_sim) + '\n')
file.write(' -- STATE SPACE SIZE: ' + str(gym_swarm_env.observation_space.n) + '\n')
file.write(' -- ACTION SPACE SIZE: ' + str(gym_swarm_env.action_space.size) + '\n')
file.write(' -- Q-TABLE SIZE: ' + str(q_table.shape) + '\n')
arcade.run()
########################
##### Exploration and explotation block. ####
exploration_rate_threshold = random.uniform(0, 1) # Setting a random number that will be compared to exploration_rate.
if exploration_rate_threshold > exploration_rate:
i, j = np.unravel_index(np.argmax(q_table[state, :]), q_table.shape)
print ("i ", i, " , j ", j)
#action = (i, j) # Choosing the action that had the highest q-value in q-table.
action = i*GRID_X + j # Choosing the action that had the highest q-value in q-table.
#print (action)
#exit(0)
else:
i = random.randint(0, GRID_X - 1)
j = random.randint(0, GRID_Y - 1)
action = i*GRID_X + j # Sample an action randomly to explore.
##### Exploration and explotation block. ####
##### Taking appropriate action after choosing the action. ####
new_state, reward, done, info, operator_cm = gym_swarm_env.step(action, sim.operator_list[0], GRID_X, GRID_Y) # Returns a tuple contaning the new state, the reward for this action, the end status of action, some additional info.
sim.operator_list[0].confidence_map = operator_cm
# Updating q-table values
q_table[state, action]=q_table[state, action] * (1 - learning_rate) + \
learning_rate * (reward + discount_rate * np.max(q_table[new_state, :]))
print('*** State-Action pair in Q-table ***')
print('Q[' + str(state) + ', ' + str(action) + '] = '+ str(q_table[state, action]))
state = new_state
if done == True:
break
##### Taking appropriate action after choosing the action. ####
print("============= End of step " + str(step) + " =============")
"""
# logging q-table
if self.directory == None:
self.q_table.tofile(self.directory + "/q_table" + "_step" + str(step) + "_timer" + str(self.timer) + ".txt", sep=" ", format="%s")
else:
self.q_table.tofile(self.directory + "/q_table" + "_step" + str(step) + "_timer" + str(self.timer) + ".txt", sep=" ", format="%s")
"""
# Decay exploration rate using a formula.
exploration_rate = min_exploration_rate + (max_exploration_rate - min_exploration_rate) * np.exp(-exploration_decay_rate * step)
######## END of q-learning parameter setup #########
if __name__ == '__main__':
experiment_runner(operator_vision_radius=40, INPUT_TIME=0, command_period=200, alpha=10, moving_disaster=False, disaster_location=[(500, 500)], operator_location=[(450, 300)], name_of_experiment='RL model experiment_r40_t200')
|
[
"random.uniform",
"sys.path.insert",
"random.randint",
"numpy.argmax",
"numpy.max",
"numpy.exp",
"numpy.zeros",
"os.path.isdir",
"os.mkdir",
"arcade.run",
"objects.SwarmSimulator",
"time.time",
"gym.make"
] |
[((147, 171), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (162, 171), False, 'import sys\n'), ((352, 363), 'time.time', 'time.time', ([], {}), '()\n', (361, 363), False, 'import time\n'), ((1595, 1638), 'gym.make', 'gym.make', (['"""humanswarm-v0"""'], {'maze_size': 'GRID_X'}), "('humanswarm-v0', maze_size=GRID_X)\n", (1603, 1638), False, 'import gym\n'), ((1719, 1745), 'numpy.zeros', 'np.zeros', (['(GRID_X, GRID_Y)'], {}), '((GRID_X, GRID_Y))\n', (1727, 1745), True, 'import numpy as np\n'), ((1760, 1838), 'numpy.zeros', 'np.zeros', (['(gym_swarm_env.observation_space.n, gym_swarm_env.action_space.size)'], {}), '((gym_swarm_env.observation_space.n, gym_swarm_env.action_space.size))\n', (1768, 1838), True, 'import numpy as np\n'), ((1893, 1934), 'numpy.zeros', 'np.zeros', (['gym_swarm_env.action_space.size'], {}), '(gym_swarm_env.action_space.size)\n', (1901, 1934), True, 'import numpy as np\n'), ((3172, 3285), 'objects.SwarmSimulator', 'SwarmSimulator', (['ARENA_WIDTH', 'ARENA_HEIGHT', 'name_of_experiment', 'SWARM_SIZE', 'INPUT_TIME', 'GRID_X', 'GRID_Y', 'rl_sim'], {}), '(ARENA_WIDTH, ARENA_HEIGHT, name_of_experiment, SWARM_SIZE,\n INPUT_TIME, GRID_X, GRID_Y, rl_sim)\n', (3186, 3285), False, 'from objects import SwarmSimulator\n'), ((4535, 4563), 'os.path.isdir', 'os.path.isdir', (['sim.directory'], {}), '(sim.directory)\n', (4548, 4563), False, 'import os\n'), ((4840, 4868), 'os.path.isdir', 'os.path.isdir', (['sim.directory'], {}), '(sim.directory)\n', (4853, 4868), False, 'import os\n'), ((5054, 5073), 'os.mkdir', 'os.mkdir', (['directory'], {}), '(directory)\n', (5062, 5073), False, 'import os\n'), ((6167, 6179), 'arcade.run', 'arcade.run', ([], {}), '()\n', (6177, 6179), False, 'import arcade\n'), ((6307, 6327), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (6321, 6327), False, 'import random\n'), ((3722, 3771), 'os.path.isdir', 'os.path.isdir', (["('../outputs/' + name_of_experiment)"], {}), "('../outputs/' + name_of_experiment)\n", (3735, 3771), False, 'import os\n'), ((3786, 3830), 'os.mkdir', 'os.mkdir', (["('../outputs/' + name_of_experiment)"], {}), "('../outputs/' + name_of_experiment)\n", (3794, 3830), False, 'import os\n'), ((6845, 6874), 'random.randint', 'random.randint', (['(0)', '(GRID_X - 1)'], {}), '(0, GRID_X - 1)\n', (6859, 6874), False, 'import random\n'), ((6891, 6920), 'random.randint', 'random.randint', (['(0)', '(GRID_Y - 1)'], {}), '(0, GRID_Y - 1)\n', (6905, 6920), False, 'import random\n'), ((6491, 6519), 'numpy.argmax', 'np.argmax', (['q_table[state, :]'], {}), '(q_table[state, :])\n', (6500, 6519), True, 'import numpy as np\n'), ((8585, 8623), 'numpy.exp', 'np.exp', (['(-exploration_decay_rate * step)'], {}), '(-exploration_decay_rate * step)\n', (8591, 8623), True, 'import numpy as np\n'), ((4498, 4509), 'time.time', 'time.time', ([], {}), '()\n', (4507, 4509), False, 'import time\n'), ((4803, 4814), 'time.time', 'time.time', ([], {}), '()\n', (4812, 4814), False, 'import time\n'), ((4679, 4690), 'time.time', 'time.time', ([], {}), '()\n', (4688, 4690), False, 'import time\n'), ((4984, 4995), 'time.time', 'time.time', ([], {}), '()\n', (4993, 4995), False, 'import time\n'), ((7623, 7652), 'numpy.max', 'np.max', (['q_table[new_state, :]'], {}), '(q_table[new_state, :])\n', (7629, 7652), True, 'import numpy as np\n')]
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
class DMatrix:
def __init__(self, data_arr, missing={np.nan, 0}):
"""
:param data_arr: 样本特征 (不含标签)
:param missing: 缺失值的集合, 若特征值在此集合中, 则认为其为缺失值
"""
# N 样本总个数( 包含缺出现缺失值的样本 )
# m 特征的总数
self.N, self.m = np.shape(data_arr)
# row_index 样本行的索引
self.row_index = list(range(self.N))
# 样本行
self.row_data = data_arr
# 所有特征对应的块集合
self.sorted_pages = []
# 不同特征中出现过特征缺失值的行的集合
# self.missing_value_pages = []
for i in range(self.m): # 遍历所有的特征
feature = data_arr[:, i] # 特征 i 拎出来 shape:(N,)
feature_index = []
for rid in range(self.N):
if feature[rid] not in missing: # 特征值 不在 缺失值集合中
feature_index.append((feature[rid], rid)) # (特征值, 样本标号)
# 按照特征值的大小排序
sorted_feature_index = sorted(feature_index, key=lambda t: t[0])
self.sorted_pages.append(sorted_feature_index)
|
[
"numpy.shape"
] |
[((326, 344), 'numpy.shape', 'np.shape', (['data_arr'], {}), '(data_arr)\n', (334, 344), True, 'import numpy as np\n')]
|
import os
from glob import glob
import tensorflow as tf
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array, load_img
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Activation, Flatten, Dense, Input, BatchNormalization
from model.baseline import MyModel
import numpy as np
import cv2
from tensorflow.keras.optimizers import SGD
TRAIN_DIR = './dataset/PKLot/custom_dataset/train/'
VALID_DIR = './dataset/PKLot/custom_dataset/valid/'
ROOT_DIR = '../../dataset/Pklot/PKLotSegmented/'
WIDTH, HEIGHT = 39,72
NB_EPOCHS = 5
LR = 1e-4
BATCH_SIZE = 32
# print("Setup model")
#base_model = load_model('./output')
base_model = MobileNetV2(weights='imagenet', include_top=True)
print(base_model.summary())
model = Model(inputs=base_model.input, outputs=base_model.layers[-2].output)
#opt = SGD(lr = LR, momentum=0.9, decay = LR/NB_EPOCHS)
#model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
def extract_features(list_img_path, features_path = './features',label_path_to_save = './'):
ground_truths = []
for img in list_img_path:
img = img.replace("\\","/")
label = img.split("/")[-2]
img_name = img.split("/")[-1]
img_name = img_name.replace(".jpg", ".npy")
if label == "Empty":
ground_truths.append(0)
else:
ground_truths.append(1)
image = cv2.imread(img)
image = cv2.resize(image, (224, 224))
image_x = np.expand_dims(image, axis=0)
image_x = preprocess_input(image_x)
feature = model.predict(image_x)
os.makedirs(os.path.dirname(features_path), exist_ok=True)
np.save(features_path + img_name, feature)
np.save(label_path_to_save,ground_truths)
if __name__ == "__main__":
# occupied_val = VALID_DIR + 'Occupied/*.jpg'
# empty_val = VALID_DIR + 'Empty/*.jpg'
# valid_images = list(glob(occupied_val) + glob(empty_val))
occupied_train = TRAIN_DIR + 'Occupied/*.jpg'
empty_train = TRAIN_DIR + 'Empty/*.jpg'
train_images = list(glob(occupied_train) + glob(empty_train))
extract_features(train_images, './features/PKLot/Train/', './features/PKLot/train_label.npy')
occupied_valid = VALID_DIR + 'Occupied/*.jpg'
empty_valid = VALID_DIR + 'Empty/*.jpg'
valid_images = list(glob(occupied_valid) + glob(empty_valid))
extract_features(valid_images, './features/PKLot/Valid/', './features/PKLot/valid_label.npy')
|
[
"tensorflow.keras.applications.mobilenet_v2.MobileNetV2",
"os.path.dirname",
"glob.glob",
"tensorflow.keras.applications.mobilenet_v2.preprocess_input",
"numpy.expand_dims",
"tensorflow.keras.models.Model",
"cv2.resize",
"cv2.imread",
"numpy.save"
] |
[((816, 865), 'tensorflow.keras.applications.mobilenet_v2.MobileNetV2', 'MobileNetV2', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), "(weights='imagenet', include_top=True)\n", (827, 865), False, 'from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input\n'), ((906, 974), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'base_model.layers[-2].output'}), '(inputs=base_model.input, outputs=base_model.layers[-2].output)\n', (911, 974), False, 'from tensorflow.keras.models import Model, load_model\n'), ((1967, 2009), 'numpy.save', 'np.save', (['label_path_to_save', 'ground_truths'], {}), '(label_path_to_save, ground_truths)\n', (1974, 2009), True, 'import numpy as np\n'), ((1615, 1630), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (1625, 1630), False, 'import cv2\n'), ((1652, 1681), 'cv2.resize', 'cv2.resize', (['image', '(224, 224)'], {}), '(image, (224, 224))\n', (1662, 1681), False, 'import cv2\n'), ((1705, 1734), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1719, 1734), True, 'import numpy as np\n'), ((1758, 1783), 'tensorflow.keras.applications.mobilenet_v2.preprocess_input', 'preprocess_input', (['image_x'], {}), '(image_x)\n', (1774, 1783), False, 'from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input\n'), ((1915, 1957), 'numpy.save', 'np.save', (['(features_path + img_name)', 'feature'], {}), '(features_path + img_name, feature)\n', (1922, 1957), True, 'import numpy as np\n'), ((1855, 1885), 'os.path.dirname', 'os.path.dirname', (['features_path'], {}), '(features_path)\n', (1870, 1885), False, 'import os\n'), ((2335, 2355), 'glob.glob', 'glob', (['occupied_train'], {}), '(occupied_train)\n', (2339, 2355), False, 'from glob import glob\n'), ((2358, 2375), 'glob.glob', 'glob', (['empty_train'], {}), '(empty_train)\n', (2362, 2375), False, 'from glob import glob\n'), ((2615, 2635), 'glob.glob', 'glob', (['occupied_valid'], {}), '(occupied_valid)\n', (2619, 2635), False, 'from glob import glob\n'), ((2638, 2655), 'glob.glob', 'glob', (['empty_valid'], {}), '(empty_valid)\n', (2642, 2655), False, 'from glob import glob\n')]
|
import argparse
import matplotlib.pyplot as plt
import meshcut
import numpy as np
import pandas
import seaborn as sns
import pandas as pd
import sys, os
import math
#from scipy.stats import norm
SAVE_PATH = os.path.join(os.path.expanduser("~"),'PycharmProjects/Gibson_Exercise/examples/plot_result/')
WAY_PATH = os.path.join(os.path.expanduser("~"),'PycharmProjects/Gibson_Exercise/examples/plot_result/')
def load_obj(fn):
verts = []
faces = []
with open(fn) as f:
for line in f:
if line[:2] == 'v ':
verts.append(list(map(float, line.strip().split()[1:4])))
if line[:2] == 'f ':
face = [int(item.split('/')[0]) for item in line.strip().split()[-3:]]
faces.append(face)
verts = np.array(verts)
faces = np.array(faces) - 1
return verts, faces
def add_arrow(line, position=None, direction='right', size=15, color='dodgerblue'):
xdata = line.get_xdata()
ydata = line.get_ydata()
if position is None:
position = xdata.mean()
start_ind = np.argmin(np.absolute(xdata - position))
if direction == 'right':
end_ind = start_ind + 1
else:
end_ind = start_ind - 1
line.axes.annotate('',
xytext=(xdata[start_ind], ydata[start_ind]),
xy=(xdata[end_ind], ydata[end_ind]),
arrowprops=dict(arrowstyle="->", color=color),
size=size
)
def mesh(model_id="", waypoint=False):
C1 = '\033[91m'
C1END = '\033[0m'
print(C1 + "PLOTTING EPISODE:" + C1END)
plt.style.use('default') # switches back to matplotlib style
fn = os.path.join(os.path.expanduser("~"),
"PycharmProjects/Gibson_Exercise/gibson/assets/dataset/") + str(model_id) + "/mesh_z_up.obj"
verts, faces = load_obj(fn)
z = np.min(verts[:, -1]) + 0.5 # your robot height
cross_section = meshcut.cross_section(verts, faces, plane_orig=(0, 0, z), plane_normal=(0, 0, 1))
plt.figure(figsize=(8,8))
for item in cross_section:
for i in range(len(item) - 1):
plt.plot(item[i:i + 2, 0], item[i:i + 2, 1], 'k')
plt.title('Map of Navigation')
plt.xlabel('X Position'); plt.ylabel('Y Position')
plt.grid(True)
if waypoint:
#df = pandas.read_csv(WAY_PATH + str('aloha_waypoints_sort_test.csv'))
#df = pandas.read_csv(WAY_PATH + str('aloha_waypoints_clipped_sort.csv'))
df = pandas.read_csv(WAY_PATH + str('euharlee_waypoints_sort_test.csv'))
#df = pandas.read_csv(WAY_PATH + str('euharlee_waypoints_clipped_sort.csv'))
points = df.values
length = len(points)
sp = np.zeros((length, 3)); ang = np.zeros((length, 1)); gp = np.zeros((length, 3))
complexity = np.zeros((length, 1))
for r in range(length):
sp[r] = np.array([points[r][2], points[r][3], points[r][4]])
ang[r] = np.array([points[r][5]])
gp[r] = np.array([points[r][6], points[r][7], points[r][8]])
complexity[r] = np.array([points[r][10]/points[r][9]])
for k in range(length):
plt.plot(sp[k][0], sp[k][1], 'r*')
plt.plot(gp[k][0], gp[k][1], 'g*')
line=plt.plot([sp[k][0], gp[k][0]], [sp[k][1], gp[k][1]], color='dodgerblue', linewidth=1)
m1 = (sp[k][0] + gp[k][0]) / 2
m2 = (sp[k][1] + gp[k][1]) / 2
plt.annotate(s='', xy=(gp[k][0],gp[k][1]), xytext=(sp[k][0],sp[k][1]), arrowprops=dict(arrowstyle='->',color='grey'))
#plt.arrow([sp[k][0], gp[k][0]], [sp[k][1], gp[k][1]], [dx],[dy], shape='full', lw=0, length_includes_head=True, head_width=.05)
print("%i Waypoint Navigation Complexity ---> %.3f" % (k+1,complexity[k]))
debug=1
if debug:
plt.savefig(os.path.join(SAVE_PATH + 'waypoints_map_test.png'))
#plt.savefig(os.path.join(SAVE_PATH + 'waypoints_map.png'))
plt.show()
def main(raw_args=None):
"This function shows that analysis of training process"
deb = bool(0)
mesh(model_id=raw_args.model, waypoint=True)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', type=str, default="Euharlee")
args = parser.parse_args()
main(args)
|
[
"matplotlib.pyplot.grid",
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"meshcut.cross_section",
"matplotlib.pyplot.xlabel",
"numpy.absolute",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.style.use",
"os.path.join",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.zeros",
"numpy.min",
"matplotlib.pyplot.title",
"os.path.expanduser",
"matplotlib.pyplot.show"
] |
[((221, 244), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (239, 244), False, 'import sys, os\n'), ((326, 349), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (344, 349), False, 'import sys, os\n'), ((705, 720), 'numpy.array', 'np.array', (['verts'], {}), '(verts)\n', (713, 720), True, 'import numpy as np\n'), ((1418, 1442), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""default"""'], {}), "('default')\n", (1431, 1442), True, 'import matplotlib.pyplot as plt\n'), ((1723, 1809), 'meshcut.cross_section', 'meshcut.cross_section', (['verts', 'faces'], {'plane_orig': '(0, 0, z)', 'plane_normal': '(0, 0, 1)'}), '(verts, faces, plane_orig=(0, 0, z), plane_normal=(0, \n 0, 1))\n', (1744, 1809), False, 'import meshcut\n'), ((1807, 1833), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1817, 1833), True, 'import matplotlib.pyplot as plt\n'), ((1949, 1979), 'matplotlib.pyplot.title', 'plt.title', (['"""Map of Navigation"""'], {}), "('Map of Navigation')\n", (1958, 1979), True, 'import matplotlib.pyplot as plt\n'), ((1981, 2005), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X Position"""'], {}), "('X Position')\n", (1991, 2005), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2031), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y Position"""'], {}), "('Y Position')\n", (2017, 2031), True, 'import matplotlib.pyplot as plt\n'), ((2033, 2047), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2041, 2047), True, 'import matplotlib.pyplot as plt\n'), ((3740, 3819), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), '(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n', (3763, 3819), False, 'import argparse\n'), ((730, 745), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (738, 745), True, 'import numpy as np\n'), ((982, 1011), 'numpy.absolute', 'np.absolute', (['(xdata - position)'], {}), '(xdata - position)\n', (993, 1011), True, 'import numpy as np\n'), ((1658, 1678), 'numpy.min', 'np.min', (['verts[:, -1]'], {}), '(verts[:, -1])\n', (1664, 1678), True, 'import numpy as np\n'), ((2417, 2438), 'numpy.zeros', 'np.zeros', (['(length, 3)'], {}), '((length, 3))\n', (2425, 2438), True, 'import numpy as np\n'), ((2446, 2467), 'numpy.zeros', 'np.zeros', (['(length, 1)'], {}), '((length, 1))\n', (2454, 2467), True, 'import numpy as np\n'), ((2474, 2495), 'numpy.zeros', 'np.zeros', (['(length, 3)'], {}), '((length, 3))\n', (2482, 2495), True, 'import numpy as np\n'), ((2511, 2532), 'numpy.zeros', 'np.zeros', (['(length, 1)'], {}), '((length, 1))\n', (2519, 2532), True, 'import numpy as np\n'), ((3529, 3539), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3537, 3539), True, 'import matplotlib.pyplot as plt\n'), ((1897, 1946), 'matplotlib.pyplot.plot', 'plt.plot', (['item[i:i + 2, 0]', 'item[i:i + 2, 1]', '"""k"""'], {}), "(item[i:i + 2, 0], item[i:i + 2, 1], 'k')\n", (1905, 1946), True, 'import matplotlib.pyplot as plt\n'), ((2570, 2622), 'numpy.array', 'np.array', (['[points[r][2], points[r][3], points[r][4]]'], {}), '([points[r][2], points[r][3], points[r][4]])\n', (2578, 2622), True, 'import numpy as np\n'), ((2635, 2659), 'numpy.array', 'np.array', (['[points[r][5]]'], {}), '([points[r][5]])\n', (2643, 2659), True, 'import numpy as np\n'), ((2671, 2723), 'numpy.array', 'np.array', (['[points[r][6], points[r][7], points[r][8]]'], {}), '([points[r][6], points[r][7], points[r][8]])\n', (2679, 2723), True, 'import numpy as np\n'), ((2743, 2783), 'numpy.array', 'np.array', (['[points[r][10] / points[r][9]]'], {}), '([points[r][10] / points[r][9]])\n', (2751, 2783), True, 'import numpy as np\n'), ((2812, 2846), 'matplotlib.pyplot.plot', 'plt.plot', (['sp[k][0]', 'sp[k][1]', '"""r*"""'], {}), "(sp[k][0], sp[k][1], 'r*')\n", (2820, 2846), True, 'import matplotlib.pyplot as plt\n'), ((2850, 2884), 'matplotlib.pyplot.plot', 'plt.plot', (['gp[k][0]', 'gp[k][1]', '"""g*"""'], {}), "(gp[k][0], gp[k][1], 'g*')\n", (2858, 2884), True, 'import matplotlib.pyplot as plt\n'), ((2893, 2982), 'matplotlib.pyplot.plot', 'plt.plot', (['[sp[k][0], gp[k][0]]', '[sp[k][1], gp[k][1]]'], {'color': '"""dodgerblue"""', 'linewidth': '(1)'}), "([sp[k][0], gp[k][0]], [sp[k][1], gp[k][1]], color='dodgerblue',\n linewidth=1)\n", (2901, 2982), True, 'import matplotlib.pyplot as plt\n'), ((3413, 3463), 'os.path.join', 'os.path.join', (["(SAVE_PATH + 'waypoints_map_test.png')"], {}), "(SAVE_PATH + 'waypoints_map_test.png')\n", (3425, 3463), False, 'import sys, os\n'), ((1499, 1522), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1517, 1522), False, 'import sys, os\n')]
|
"""A module for the uFJC single-chain model in the isometric ensemble.
This module consist of the class ``uFJCIsometric`` which contains
methods for computing single-chain quantities
in the isometric (constant end-to-end vector) thermodynamic ensemble.
Example:
Import and instantiate the class:
>>> from ufjc.isometric import uFJCIsometric
>>> class_instance = uFJCIsometric()
"""
# Import internal modules
from .monte_carlo import MHMCMC
from .isotensional import uFJCIsotensional
# Import external modules
import numpy as np
import numpy.linalg as la
class uFJCIsometric(uFJCIsotensional):
"""The uFJC single-chain model class for the isometric ensemble.
This class contains methods for computing single-chain quantities
in the isometric (constant end-to-end vector) thermodynamic ensemble.
It inherits all attributes and methods from the ``uFJCIsotensional``
class, which inherits all attributes and methods from the
``BasicUtility`` class.
"""
def __init__(self):
"""Initializes the ``uFJCIsometric`` class.
Initialize and inherit all attributes and methods
from a ``uFJCIsotensional`` class instance.
"""
uFJCIsotensional.__init__(self)
def eta_isometric(self, gamma, **kwargs):
r"""Main function for the isometric :math:`\eta(\gamma)`.
This is the main function utilized to compute the isometric
nondimensional single-chain mechanical response.
Keyword arguments specify and are passed onto the method.
Args:
gamma (array_like): The nondimensional end-to-end length(s).
**kwargs: Arbitrary keyword arguments.
Passed to the chosen method.
Returns:
numpy.ndarray: The nondimensional force(s).
Example:
Compute the nondimensional force for an eight-link Morse-FJC at a
nondimensional end-to-end length of 0.8 in the isometric ensemble,
using the Legendre transformation method from the isotensional
ensemble, and using the reduced asymptotic approach to compute
quantities in the isotensional ensemble:
>>> from ufjc import uFJC
>>> model = uFJC(N_b=8, potential='morse')
>>> model.eta_isometric([0, 0.8], \
... method='legendre', approach='reduced')
array([0. , 4.41715473])
Warning:
Only the Legendre transformation method is currently unavailable:
>>> from ufjc import uFJC
>>> uFJC().eta_isometric(0.8, method='exact')
array([nan])
"""
gamma = self.np_array(gamma)
method = kwargs.get('method', 'legendre')
if method == 'legendre':
return self.eta_isometric_legendre(gamma, **kwargs)
else:
return np.nan*gamma
def eta_isometric_legendre(self, gamma, **kwargs):
r"""The Legendre transformation method of approximating
the isometric :math:`\eta(\gamma)`.
This function uses the Legendre transformation method to obtain an
approximate isometric nondimensional single-chain mechanical response.
The result is to simply use the isotensional :math:`\eta(\gamma)`,
and this approximation is asymptotically valid for :math:`N_b\gg 1`
and appreciable loads :cite:`buche2020statistical`.
Args:
gamma (array_like): The nondimensional end-to-end length(s).
**kwargs: Arbitrary keyword arguments.
Passed to ``_eta_isotensional``.
Returns:
numpy.ndarray: The nondimensional force(s).
Example:
Compute the nondimensional force at a large nondimensional
end-to-end length using the Legendre transformation method:
>>> from ufjc import uFJC
>>> model = uFJC()
>>> model.eta_isometric_legendre(1.3)
array([28.71102552])
"""
return self.eta_isotensional(gamma, **kwargs)
def gamma_isometric(self, eta, **kwargs):
r"""Main function for the isometric :math:`\gamma(\eta)`.
This function obtains the isometric nondimensional
single-chain mechanical response :math:`\gamma(\eta)`
by inverting the isometric :math:`\eta(\gamma)`.
Args:
eta (array_like): the nondimensional force(s).
**kwargs: Arbitrary keyword arguments.
Passed to ``_eta_isometric``.
Returns:
numpy.ndarray: The nondimensional end-to-end length(s).
Example:
Check that :math:`\gamma[\eta(\gamma)] = \gamma\,`:
>>> import numpy as np
>>> from ufjc import uFJC
>>> model = uFJC()
>>> def check_eta(gamma):
... eta_fun = lambda gamma: model.eta_isometric(gamma)
... gamma_fun = lambda eta: model.gamma_isometric(eta)
... return np.isclose(gamma_fun(eta_fun(gamma))[0], gamma)
>>> check_eta(np.random.rand())
True
"""
def eta_fun(gamma):
return self.eta_isometric(gamma, **kwargs)
return self.inv_fun_1D(eta, eta_fun)
def vartheta_isometric(self, gamma, **kwargs):
r"""Main function for the isometric :math:`\vartheta(\gamma)`.
This is the main function utilized to compute the nondimensional
Helmholtz free energy per link, an isometric quantity.
Keyword arguments specify and are passed onto the method.
Args:
gamma (array_like): The nondimensional end-to-end length(s).
**kwargs: Arbitrary keyword arguments.
Passed to the chosen method.
Returns:
numpy.ndarray: The nondimensional Helmholtz free energy per link.
Example:
Compute the nondimensional Helmholtz free energy per link
for an eight-link Morse-FJC at a
nondimensional end-to-end length of 0.8 in the isometric ensemble,
using the Legendre transformation method from the isotensional
ensemble, and using the reduced asymptotic approach to compute
quantities in the isotensional ensemble:
>>> from ufjc import uFJC
>>> model = uFJC(N_b=8, potential='morse')
>>> model.vartheta_isometric(0.8, \
... method='legendre', approach='reduced')
array([1.23847534])
Warning:
The exact method is currently unavailable:
>>> from ufjc import uFJC
>>> uFJC().vartheta_isometric(0.8, method='exact')
nan
"""
method = kwargs.get('method', 'legendre')
if method == 'exact':
return np.nan*gamma
elif method == 'legendre':
return self.vartheta_isometric_legendre(gamma, **kwargs)
def vartheta_isometric_legendre(self, gamma, **kwargs):
r"""The Legendre transformation method of approximating
the isometric :math:`\vartheta(\gamma)`.
This function uses the Legendre transformation method to obtain an
approximate isometric Helmholtz free energy per link.
The result is independent of the number of links :math:`N_b`, and
this approximation is asymptotically valid for :math:`N_b\gg 1`
and appreciable loads :cite:`buche2021chain`.
For example, using the reduced asymptotic approach, this is
.. math::
\vartheta(\gamma) \sim
\ln\left\{\frac{
\eta\exp[\eta\mathcal{L}(\eta)]}{\sinh(\eta)}\right\}
+ \beta u[\lambda(\eta)]
,
valid when :math:`\varepsilon\gg 1` and :math:`N_b\gg 1` are
simultaneously true.
Note that :math:`\eta=\eta(\gamma)` is implied, and obtained
through inverting the isotensional :math:`\gamma(\eta)`.
Args:
gamma (array_like): The nondimensional end-to-end length(s).
**kwargs: Arbitrary keyword arguments.
Passed to the chosen method.
Returns:
numpy.ndarray: The nondimensional Helmholtz free energy per link.
Example:
Approximate the nondimensional Helmholtz free energy per link
using the Legendre method and both asymptotic approaches:
>>> from ufjc import uFJC
>>> model = uFJC(potential='log-squared', varepsilon=23)
>>> model.vartheta_isometric_legendre(1.1)
array([1.90431381])
>>> model.vartheta_isometric_legendre(1.1, approach='reduced')
array([2.09238198])
Warning:
Only the asymptotic approaches are currently unavailable:
>>> from ufjc import uFJC
>>> model = uFJC(potential='log-squared', varepsilon=23)
>>> model.vartheta_isometric_legendre(1.1, approach='exact')
nan
"""
# Invert gamma=gamma(eta) for the corresponding eta
eta = self.eta_isotensional(gamma, **kwargs)
# Avoid overflow, important for integrating P_eq
eta[eta > self.maximum_exponent] = self.maximum_exponent
# Find the corresponding bond stretch under direct eta
lambda_ = 1 + self.delta_lambda(eta)
# Need to finish this portion
approach = kwargs.get('approach', 'asymptotic')
if approach == 'asymptotic':
Ln = self.langevin(eta)
coth = self.coth(eta)
return eta*Ln + self.log_over_sinh(eta) + \
self.varepsilon*(self.phi(lambda_) - self.phi(1)) + \
eta**2/self.kappa*(
(1 - Ln*coth)/(self.c + eta/self.kappa*coth)
) - np.log(1 + eta*coth/self.kappa)
elif approach == 'reduced':
return eta*self.langevin(eta) + self.log_over_sinh(eta) + \
self.varepsilon*(self.phi(lambda_) - self.phi(1))
else:
return np.nan*gamma
def beta_U_config(self, config):
r"""The nondimensional potential energy of a configuration.
This function provides the nondimensional potential energy
:math:`\beta U` given the configuration of the chain, i.e. the
vector position of each atom/hinge relative to the first one.
Args:
config (numpy.ndarray): The configuration of the chain,
a :math:`(N_b+1)`-by-3 numpy array.
Returns:
float: The nondimensional potential energy :math:`\beta U`.
Example:
Compute the potential energy of the uniformly-stretched
default initial configuration:
>>> from ufjc import uFJC
>>> model = uFJC(N_b=8, potential='lennard-jones')
>>> model.beta_U_config(1.1*model.init_config)
133.5368021523727
"""
beta_U = 0
for j in range(1, len(config)):
lambda_ = la.norm(config[j, :] - config[j - 1, :])
beta_U += self.beta_u(lambda_)
return beta_U
|
[
"numpy.log",
"numpy.linalg.norm"
] |
[((11454, 11494), 'numpy.linalg.norm', 'la.norm', (['(config[j, :] - config[j - 1, :])'], {}), '(config[j, :] - config[j - 1, :])\n', (11461, 11494), True, 'import numpy.linalg as la\n'), ((10199, 10234), 'numpy.log', 'np.log', (['(1 + eta * coth / self.kappa)'], {}), '(1 + eta * coth / self.kappa)\n', (10205, 10234), True, 'import numpy as np\n')]
|
"""
Plot various visualizations
"""
import sys
import json
from collections import Counter
import numpy as np
import scipy.stats as scstats
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.patches as mp
from zipteedo.util import GzipFileType, load_jsonl, first
from zipteedo.stats import get_correlations, get_data_efficiencies, make_bias_table
from zipteedo.viz import draw_matrix, violinplot
import zipteedo.estimators as zpe
LABELS = {
"rouge-l": "ROUGE-L",
"rouge-1": "ROUGE-1",
"rouge-2": "ROUGE-2",
"ter": "TER",
"sim": "VecSim",
"meteor": "METEOR",
"bleu": "BLEU-2",
"bleu-2": "BLEU-2",
"bleu-4": "BLEU-4",
"gold": "Upper bound",
"hter": "Edit",
"lqual": "CNN/DailyMail",
"msmarco": "MSMARCO",
"fastqa": "fastqa",
"fastqa_ext": "fastqa_ext",
"snet.single": "snet",
"snet.ensemble": "snet.ens",
"*": "Combined"
}
SYSTEMS = {
"lqual": ["seq2seq", "pointer", "ml", "ml+rl"],
"msmarco": ["fastqa", "fastqa_ext", "snet.single", "snet.ensemble"],
}
PROMPTS = {
"lqual": ["hter", "overall", "redundancy", "grammar"],
"msmarco": ["AnyCorrect", "AvgCorrect"],
}
def do_correlation_table(args):
with open(args.input) as f:
data = load_jsonl(f)
data = get_correlations(data)
data = data[args.data_prompt]
prompt = args.data_prompt
metrics = sorted(data.keys())
task = first(key for key, values in PROMPTS.items() if prompt in values)
systems = SYSTEMS[task] + ["*"]
X = np.array([[data[metric][system] for system in systems] for metric in metrics])
plt.rc("font", size=16)
plt.rc("text", usetex=False)
#plt.rc("figure", figsize=(10,10))
draw_matrix(X, with_values=True,
x_labels=[LABELS.get(s, s) for s in systems],
y_labels=[LABELS.get(m, m) for m in metrics],)
plt.colorbar(label=r"Pearson ρ")
plt.xlabel("Systems")
plt.ylabel("Metrics")
if args.with_title:
task = first(key for key, values in PROMPTS.items() if prompt in values)
plt.title(r"Correlations on {} using the {} prompt".format(
LABELS.get(task, task),
LABELS.get(prompt, prompt),
), fontsize=14)
plt.tight_layout()
plt.savefig(args.output)
def do_trajectory(args):
data = [json.loads(line) for line in open(args.input, "rt")]
data = {(obj["system"], obj["metric"], obj["prompt"], obj["estimator"]): obj for obj in data}
if args.input_gold:
data_gold = [json.loads(line) for line in open(args.input_gold, "rt")]
data_gold = {(obj["system"], obj["metric"], obj["prompt"], obj["estimator"]): obj for obj in data_gold}
else:
data_gold = None
colors = cm.tab10.colors
system = args.data_system
metric = args.data_metric
prompt = args.data_prompt
baseline = np.array(data[system, metric, prompt, "simple"]["summary"])
model = np.array(data[system, metric, prompt, "model_variate"]["summary"])
if data_gold:
model_gold = np.array(data_gold[system, metric, prompt, "model_variate"]["summary"])
gold = np.array(data[system, "gold", prompt, "model_variate"]["summary"])
plt.rc("font", size=16)
plt.rc("text", usetex=False)
#plt.rc("figure", figsize=(10,10))
plt.xlabel("Number of samples")
plt.ylabel(r"80% confidence interval")
plt.plot(baseline.T[2] - baseline.T[1], color=colors[0], label="Humans")
plt.plot(model.T[2] - model.T[1], color=colors[1], label="Humans + {}".format(LABELS.get(metric,metric)))
if data_gold:
plt.plot(model_gold.T[2] - model_gold.T[1], ':', color=colors[2], label="Noiseless humans + {}".format(LABELS.get(metric,metric)))
plt.plot(gold.T[2] - gold.T[1], ':', color=colors[4], label="Humans + perfect metric")
plt.xlim([0, 500])
plt.ylim([0.05, 0.2])
plt.legend()
if args.with_title:
task = first(key for key, values in PROMPTS.items() if prompt in values)
plt.title(r"{} on {} using the {} prompt".format(
LABELS.get(system, system),
LABELS.get(task, task),
LABELS.get(prompt, prompt),
), fontsize=14)
plt.tight_layout()
plt.savefig(args.output)
def do_data_efficiency_table(args):
data = [json.loads(line) for line in open(args.input, "rt")]
data = get_data_efficiencies(data)
prompt = args.data_prompt
metrics = sorted(data.keys())
task = first(key for key, values in PROMPTS.items() if prompt in values)
systems = SYSTEMS[task]
X = np.array([[data[metric][prompt][system]**2 for system in systems] for metric in metrics])
plt.rc("font", size=16)
plt.rc("text", usetex=False)
draw_matrix(X, with_values=True,
x_labels=[LABELS.get(s, s) for s in systems],
y_labels=[LABELS.get(m, m) for m in metrics],
vmin=0.9, vmax=1.3)
plt.colorbar(label="Data efficiency")
plt.xlabel("Systems")
plt.ylabel("Metrics")
if args.with_title:
plt.title(r"Data efficiencies on {} using the {} prompt".format(
LABELS.get(task, task),
LABELS.get(prompt, prompt),
), fontsize=14)
plt.tight_layout()
plt.savefig(args.output)
def do_system_correlation(args):
data = [json.loads(line) for line in open(args.input)]
prompt, metric = args.data_prompt, args.data_metric
task = first(key for key, values in PROMPTS.items() if prompt in values)
systems = SYSTEMS[task]
# Group by data by system.
data = make_bias_table(data, prompt, metric, ["lr", "ur"])
plt.rc("font", size=16)
plt.rc("text", usetex=False)
plt.rc("figure", figsize=(8,6))
colors = cm.Dark2.colors[:len(systems)]
def _thresh(y):
return max(min(y, 1), -1)
# 0. Plot the xy correlation curve.
xy = np.array([[x, _thresh(y)] for system in systems for (x, *_), (y, *_) in [data[system]["default"]]])
xlim = np.array([xy.T[0].min(), xy.T[0].max()])
coeffs = np.polyfit(xy.T[0], xy.T[1], 1)
plt.plot(xlim, xlim * coeffs[0] + coeffs[1], linestyle='--', linewidth=2, zorder=-1)
# 1. Plot actual data points with error bars.
xy = np.array([[x, y] for system in systems for (x, *_), (y, *_) in data[system].values()])
xy_l = np.array([[x, y] for system in systems for (_, x, _), (_, y, _) in data[system].values()])
xy_u = np.array([[x, y] for system in systems for (_, _, x), (_, _, y) in data[system].values()])
plt.errorbar(xy.T[0], xy.T[1],
xerr=[(xy - xy_l).T[0], (xy_u - xy).T[0]],
yerr=[(xy - xy_l).T[1], (xy_u - xy).T[1]],
capsize=2, alpha=0.5, linestyle='', marker="", zorder=-1)
# 2. Plot markers.
xy = np.array([[x, y] for system in systems for (x, *_), (y, *_) in [data[system]["default"]]])
xy_lr = np.array([[x, y] for system in systems for (x, *_), (y, *_) in [data[system]["lr"]]])
xy_ur = np.array([[x, y] for system in systems for (x, *_), (y, *_) in [data[system]["ur"]]])
plt.scatter(xy_lr.T[0], xy_lr.T[1], color=colors, marker=">")
plt.scatter(xy_ur.T[0], xy_ur.T[1], color=colors, marker="^")
plt.scatter(xy.T[0], xy.T[1], 100, c=colors, marker="o")
plt.xlabel(r"Human judgement ({})".format(LABELS.get(prompt, prompt)))
plt.ylabel(LABELS.get(metric, metric))
if args.with_title:
task = first(key for key, values in PROMPTS.items() if prompt in values)
plt.title(r"System-level correlation on {}".format(
LABELS.get(task, task),
), fontsize=14)
plt.tight_layout()
plt.legend(handles=[mp.Patch(color=colors[i], label=LABELS.get(system, system)) for i, system in enumerate(systems)])
plt.savefig(args.output)
def _snap(vs, points):
ret = []
for x, y in vs:
ret.append((first(x_ for x_ in points if x_ >= x), y))
return np.array(ret)
def do_instance_correlation(args):
data = [json.loads(line) for line in open(args.input)]
prompt, metric = args.data_prompt, args.data_metric
task = first(key for key, values in PROMPTS.items() if prompt in values)
systems = SYSTEMS[task]
# Group by data by system.
plt.rc("font", size=16)
plt.rc("text", usetex=False)
plt.rc("figure", figsize=(6,8))
colors = cm.Dark2.colors[:len(systems)]
# 1. How many distinct Y values exist?
fig, axs = plt.subplots(4, 1, sharex=True, sharey=True)
def _thresh(y):
return max(min(y, 1), -1)
xy = {system: np.array([[_thresh(datum["prompts"][prompt]["gold"]), datum["prompts"][prompt][metric]] for datum in data if system in datum["system"].split(";")])
for system in systems}
if args.bins:
y = np.array([_thresh(datum["prompts"][prompt]["gold"]) for datum in data])
distinct_values = np.linspace(y.min(), y.max(), args.bins)
plt.xticks(distinct_values)
for system in systems:
xy[system] = _snap(xy[system], distinct_values)
# 2. Make violin plots.
for i, system in enumerate(systems):
violinplot(axs[i], xy[system], distinct_values, colors[i])
for i, system in enumerate(systems):
x, y = xy[system].T[0], xy[system].T[1]
axs[i].scatter(x, y, alpha=0.3, marker='.', color=colors[i])
for i, system in enumerate(systems):
x, y = xy[system].T[0], xy[system].T[1]
coeffs = np.polyfit(x, y, 1)
xlim = np.array([x.min(), x.max()])
axs[i].plot(xlim, xlim * coeffs[0] + coeffs[1], linestyle='--', linewidth=1, zorder=-1, color=colors[i])
for i, system in enumerate(systems):
axs[i].text(1.2, 0.5, LABELS.get(system, system), va='center', rotation='vertical')
plt.xlabel(r"Human judgement ({})".format(LABELS.get(prompt, prompt)))
#plt.text(-1, 0, LABELS.get(metric, metric), va="center")
fig.text(0.01, 0.5, LABELS.get(metric, metric), va='center', rotation='vertical')
if args.with_title:
task = first(key for key, values in PROMPTS.items() if prompt in values)
axs[0].set_title(r"Instance-level correlation on {}".format(
LABELS.get(task, task),
), fontsize=14)
plt.subplots_adjust(wspace=0, hspace=0.05)
#plt.tight_layout()
#plt.legend(handles=[mp.Patch(color=colors[i], label=LABELS.get(system, system)) for i, system in enumerate(systems)])
plt.savefig(args.output)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='')
parser.set_defaults(func=None)
subparsers = parser.add_subparsers()
command_parser = subparsers.add_parser('correlation-table', help='Plot the system-wide correlation of a models output with truth')
command_parser.add_argument('-i', '--input', type=str, default="lqual_correlation.jsonl", help="Bias data")
command_parser.add_argument('-Dp', '--data-prompt', type=str, default="hter", help="An example trajectory for a task")
command_parser.add_argument('-o', '--output', type=str, default="correlations.pdf", help="Where to save plot")
command_parser.add_argument('-wt', '--with-title', action="store_true", help="An example trajectory for a task")
command_parser.set_defaults(func=do_correlation_table)
command_parser = subparsers.add_parser('data-efficiency-table', help='Plot data efficiencies for different systems and automatic metrics')
command_parser.add_argument('-i', '--input', type=str, default="lqual_trajectories.jsonl", help="Trajecotry data")
command_parser.add_argument('-Dp', '--data-prompt', type=str, default="hter", help="An example trajectory for a task")
command_parser.add_argument('-o', '--output', type=str, default="data_efficiencies.pdf", help="Where to save plot")
command_parser.add_argument('-wt', '--with-title', action="store_true", help="An example trajectory for a task")
command_parser.set_defaults(func=do_data_efficiency_table)
command_parser = subparsers.add_parser('trajectory', help='Plot a trajectory for an estimator')
command_parser.add_argument('-i', '--input', type=str, default="lqual/lqual_trajectories.json", help="")
command_parser.add_argument('-ig', '--input-gold', type=str, help="")
command_parser.add_argument('-o', '--output', type=str, default="trajectory.pdf", help="An example trajectory for a task")
command_parser.add_argument('-Dp', '--data-prompt', type=str, default="hter", help="An example trajectory for a task")
command_parser.add_argument('-Dm', '--data-metric', type=str, default="sim", help="An example trajectory for a task")
command_parser.add_argument('-Ds', '--data-system', type=str, default="seq2seq", help="An example trajectory for a task")
command_parser.add_argument('-wt', '--with-title', action="store_true", help="An example trajectory for a task")
command_parser.set_defaults(func=do_trajectory)
command_parser = subparsers.add_parser('system-correlation', help='Plot the system-wide correlation of a models output with truth')
command_parser.add_argument('-i', '--input', type=str, default="lqual.json", help="Bias data")
command_parser.add_argument('-Dp', '--data-prompt', type=str, default="overall", help="An example trajectory for a task")
command_parser.add_argument('-Dm', '--data-metric', type=str, default="sim", help="An example trajectory for a task")
command_parser.add_argument('-o', '--output', type=str, default="system_correlation.pdf", help="Where to save plot")
command_parser.add_argument('-wt', '--with-title', action="store_true", help="An example trajectory for a task")
command_parser.set_defaults(func=do_system_correlation)
command_parser = subparsers.add_parser('instance-correlation', help='Plot the system-wide correlation of a models output with truth')
command_parser.add_argument('-i', '--input', type=str, default="lqual.json", help="Bias data")
command_parser.add_argument('-Dp', '--data-prompt', type=str, default="overall", help="An example trajectory for a task")
command_parser.add_argument('-Dm', '--data-metric', type=str, default="sim", help="An example trajectory for a task")
command_parser.add_argument('-o', '--output', type=str, default="instance_correlation.pdf", help="Where to save plot")
command_parser.add_argument('-wt', '--with-title', action="store_true", help="An example trajectory for a task")
command_parser.add_argument('-b', '--bins', type=int, help="An example trajectory for a task")
command_parser.set_defaults(func=do_instance_correlation)
ARGS = parser.parse_args()
if ARGS.func is None:
parser.print_help()
sys.exit(1)
else:
ARGS.func(ARGS)
|
[
"zipteedo.stats.make_bias_table",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"zipteedo.util.first",
"numpy.array",
"zipteedo.stats.get_correlations",
"matplotlib.pyplot.errorbar",
"sys.exit",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"zipteedo.viz.violinplot",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"json.loads",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.use",
"zipteedo.util.load_jsonl",
"zipteedo.stats.get_data_efficiencies",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots"
] |
[((160, 181), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (174, 181), False, 'import matplotlib\n'), ((1343, 1365), 'zipteedo.stats.get_correlations', 'get_correlations', (['data'], {}), '(data)\n', (1359, 1365), False, 'from zipteedo.stats import get_correlations, get_data_efficiencies, make_bias_table\n'), ((1587, 1665), 'numpy.array', 'np.array', (['[[data[metric][system] for system in systems] for metric in metrics]'], {}), '([[data[metric][system] for system in systems] for metric in metrics])\n', (1595, 1665), True, 'import numpy as np\n'), ((1671, 1694), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(16)'}), "('font', size=16)\n", (1677, 1694), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1727), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (1705, 1727), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1966), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""Pearson ρ"""'}), "(label='Pearson ρ')\n", (1947, 1966), True, 'import matplotlib.pyplot as plt\n'), ((1972, 1993), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Systems"""'], {}), "('Systems')\n", (1982, 1993), True, 'import matplotlib.pyplot as plt\n'), ((1998, 2019), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Metrics"""'], {}), "('Metrics')\n", (2008, 2019), True, 'import matplotlib.pyplot as plt\n'), ((2303, 2321), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2319, 2321), True, 'import matplotlib.pyplot as plt\n'), ((2326, 2350), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output'], {}), '(args.output)\n', (2337, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2929, 2988), 'numpy.array', 'np.array', (["data[system, metric, prompt, 'simple']['summary']"], {}), "(data[system, metric, prompt, 'simple']['summary'])\n", (2937, 2988), True, 'import numpy as np\n'), ((3004, 3070), 'numpy.array', 'np.array', (["data[system, metric, prompt, 'model_variate']['summary']"], {}), "(data[system, metric, prompt, 'model_variate']['summary'])\n", (3012, 3070), True, 'import numpy as np\n'), ((3197, 3263), 'numpy.array', 'np.array', (["data[system, 'gold', prompt, 'model_variate']['summary']"], {}), "(data[system, 'gold', prompt, 'model_variate']['summary'])\n", (3205, 3263), True, 'import numpy as np\n'), ((3269, 3292), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(16)'}), "('font', size=16)\n", (3275, 3292), True, 'import matplotlib.pyplot as plt\n'), ((3297, 3325), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (3303, 3325), True, 'import matplotlib.pyplot as plt\n'), ((3370, 3401), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of samples"""'], {}), "('Number of samples')\n", (3380, 3401), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3443), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""80% confidence interval"""'], {}), "('80% confidence interval')\n", (3416, 3443), True, 'import matplotlib.pyplot as plt\n'), ((3449, 3521), 'matplotlib.pyplot.plot', 'plt.plot', (['(baseline.T[2] - baseline.T[1])'], {'color': 'colors[0]', 'label': '"""Humans"""'}), "(baseline.T[2] - baseline.T[1], color=colors[0], label='Humans')\n", (3457, 3521), True, 'import matplotlib.pyplot as plt\n'), ((3793, 3884), 'matplotlib.pyplot.plot', 'plt.plot', (['(gold.T[2] - gold.T[1])', '""":"""'], {'color': 'colors[4]', 'label': '"""Humans + perfect metric"""'}), "(gold.T[2] - gold.T[1], ':', color=colors[4], label=\n 'Humans + perfect metric')\n", (3801, 3884), True, 'import matplotlib.pyplot as plt\n'), ((3885, 3903), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 500]'], {}), '([0, 500])\n', (3893, 3903), True, 'import matplotlib.pyplot as plt\n'), ((3908, 3929), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.05, 0.2]'], {}), '([0.05, 0.2])\n', (3916, 3929), True, 'import matplotlib.pyplot as plt\n'), ((3935, 3947), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3945, 3947), True, 'import matplotlib.pyplot as plt\n'), ((4261, 4279), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4277, 4279), True, 'import matplotlib.pyplot as plt\n'), ((4284, 4308), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output'], {}), '(args.output)\n', (4295, 4308), True, 'import matplotlib.pyplot as plt\n'), ((4423, 4450), 'zipteedo.stats.get_data_efficiencies', 'get_data_efficiencies', (['data'], {}), '(data)\n', (4444, 4450), False, 'from zipteedo.stats import get_correlations, get_data_efficiencies, make_bias_table\n'), ((4630, 4727), 'numpy.array', 'np.array', (['[[(data[metric][prompt][system] ** 2) for system in systems] for metric in\n metrics]'], {}), '([[(data[metric][prompt][system] ** 2) for system in systems] for\n metric in metrics])\n', (4638, 4727), True, 'import numpy as np\n'), ((4725, 4748), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(16)'}), "('font', size=16)\n", (4731, 4748), True, 'import matplotlib.pyplot as plt\n'), ((4753, 4781), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (4759, 4781), True, 'import matplotlib.pyplot as plt\n'), ((4985, 5022), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'label': '"""Data efficiency"""'}), "(label='Data efficiency')\n", (4997, 5022), True, 'import matplotlib.pyplot as plt\n'), ((5027, 5048), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Systems"""'], {}), "('Systems')\n", (5037, 5048), True, 'import matplotlib.pyplot as plt\n'), ((5053, 5074), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Metrics"""'], {}), "('Metrics')\n", (5063, 5074), True, 'import matplotlib.pyplot as plt\n'), ((5283, 5301), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5299, 5301), True, 'import matplotlib.pyplot as plt\n'), ((5306, 5330), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output'], {}), '(args.output)\n', (5317, 5330), True, 'import matplotlib.pyplot as plt\n'), ((5628, 5679), 'zipteedo.stats.make_bias_table', 'make_bias_table', (['data', 'prompt', 'metric', "['lr', 'ur']"], {}), "(data, prompt, metric, ['lr', 'ur'])\n", (5643, 5679), False, 'from zipteedo.stats import get_correlations, get_data_efficiencies, make_bias_table\n'), ((5685, 5708), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(16)'}), "('font', size=16)\n", (5691, 5708), True, 'import matplotlib.pyplot as plt\n'), ((5713, 5741), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (5719, 5741), True, 'import matplotlib.pyplot as plt\n'), ((5746, 5778), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'figsize': '(8, 6)'}), "('figure', figsize=(8, 6))\n", (5752, 5778), True, 'import matplotlib.pyplot as plt\n'), ((6092, 6123), 'numpy.polyfit', 'np.polyfit', (['xy.T[0]', 'xy.T[1]', '(1)'], {}), '(xy.T[0], xy.T[1], 1)\n', (6102, 6123), True, 'import numpy as np\n'), ((6128, 6216), 'matplotlib.pyplot.plot', 'plt.plot', (['xlim', '(xlim * coeffs[0] + coeffs[1])'], {'linestyle': '"""--"""', 'linewidth': '(2)', 'zorder': '(-1)'}), "(xlim, xlim * coeffs[0] + coeffs[1], linestyle='--', linewidth=2,\n zorder=-1)\n", (6136, 6216), True, 'import matplotlib.pyplot as plt\n'), ((6568, 6750), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['xy.T[0]', 'xy.T[1]'], {'xerr': '[(xy - xy_l).T[0], (xy_u - xy).T[0]]', 'yerr': '[(xy - xy_l).T[1], (xy_u - xy).T[1]]', 'capsize': '(2)', 'alpha': '(0.5)', 'linestyle': '""""""', 'marker': '""""""', 'zorder': '(-1)'}), "(xy.T[0], xy.T[1], xerr=[(xy - xy_l).T[0], (xy_u - xy).T[0]],\n yerr=[(xy - xy_l).T[1], (xy_u - xy).T[1]], capsize=2, alpha=0.5,\n linestyle='', marker='', zorder=-1)\n", (6580, 6750), True, 'import matplotlib.pyplot as plt\n'), ((6827, 6922), 'numpy.array', 'np.array', (["[[x, y] for system in systems for (x, *_), (y, *_) in [data[system]['default']]\n ]"], {}), "([[x, y] for system in systems for (x, *_), (y, *_) in [data[system\n ]['default']]])\n", (6835, 6922), True, 'import numpy as np\n'), ((6930, 7020), 'numpy.array', 'np.array', (["[[x, y] for system in systems for (x, *_), (y, *_) in [data[system]['lr']]]"], {}), "([[x, y] for system in systems for (x, *_), (y, *_) in [data[system\n ]['lr']]])\n", (6938, 7020), True, 'import numpy as np\n'), ((7028, 7118), 'numpy.array', 'np.array', (["[[x, y] for system in systems for (x, *_), (y, *_) in [data[system]['ur']]]"], {}), "([[x, y] for system in systems for (x, *_), (y, *_) in [data[system\n ]['ur']]])\n", (7036, 7118), True, 'import numpy as np\n'), ((7119, 7180), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xy_lr.T[0]', 'xy_lr.T[1]'], {'color': 'colors', 'marker': '""">"""'}), "(xy_lr.T[0], xy_lr.T[1], color=colors, marker='>')\n", (7130, 7180), True, 'import matplotlib.pyplot as plt\n'), ((7185, 7246), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xy_ur.T[0]', 'xy_ur.T[1]'], {'color': 'colors', 'marker': '"""^"""'}), "(xy_ur.T[0], xy_ur.T[1], color=colors, marker='^')\n", (7196, 7246), True, 'import matplotlib.pyplot as plt\n'), ((7251, 7307), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xy.T[0]', 'xy.T[1]', '(100)'], {'c': 'colors', 'marker': '"""o"""'}), "(xy.T[0], xy.T[1], 100, c=colors, marker='o')\n", (7262, 7307), True, 'import matplotlib.pyplot as plt\n'), ((7661, 7679), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7677, 7679), True, 'import matplotlib.pyplot as plt\n'), ((7808, 7832), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output'], {}), '(args.output)\n', (7819, 7832), True, 'import matplotlib.pyplot as plt\n'), ((7965, 7978), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (7973, 7978), True, 'import numpy as np\n'), ((8272, 8295), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(16)'}), "('font', size=16)\n", (8278, 8295), True, 'import matplotlib.pyplot as plt\n'), ((8300, 8328), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(False)'}), "('text', usetex=False)\n", (8306, 8328), True, 'import matplotlib.pyplot as plt\n'), ((8333, 8365), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'figsize': '(6, 8)'}), "('figure', figsize=(6, 8))\n", (8339, 8365), True, 'import matplotlib.pyplot as plt\n'), ((8468, 8512), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(4)', '(1)'], {'sharex': '(True)', 'sharey': '(True)'}), '(4, 1, sharex=True, sharey=True)\n', (8480, 8512), True, 'import matplotlib.pyplot as plt\n'), ((10262, 10304), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0.05)'}), '(wspace=0, hspace=0.05)\n', (10281, 10304), True, 'import matplotlib.pyplot as plt\n'), ((10458, 10482), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output'], {}), '(args.output)\n', (10469, 10482), True, 'import matplotlib.pyplot as plt\n'), ((10545, 10584), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (10568, 10584), False, 'import argparse\n'), ((1318, 1331), 'zipteedo.util.load_jsonl', 'load_jsonl', (['f'], {}), '(f)\n', (1328, 1331), False, 'from zipteedo.util import GzipFileType, load_jsonl, first\n'), ((2390, 2406), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2400, 2406), False, 'import json\n'), ((3110, 3181), 'numpy.array', 'np.array', (["data_gold[system, metric, prompt, 'model_variate']['summary']"], {}), "(data_gold[system, metric, prompt, 'model_variate']['summary'])\n", (3118, 3181), True, 'import numpy as np\n'), ((4359, 4375), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4369, 4375), False, 'import json\n'), ((5377, 5393), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (5387, 5393), False, 'import json\n'), ((8028, 8044), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (8038, 8044), False, 'import json\n'), ((8948, 8975), 'matplotlib.pyplot.xticks', 'plt.xticks', (['distinct_values'], {}), '(distinct_values)\n', (8958, 8975), True, 'import matplotlib.pyplot as plt\n'), ((9483, 9502), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (9493, 9502), True, 'import numpy as np\n'), ((14745, 14756), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14753, 14756), False, 'import sys\n'), ((2587, 2603), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (2597, 2603), False, 'import json\n'), ((9158, 9216), 'zipteedo.viz.violinplot', 'violinplot', (['axs[i]', 'xy[system]', 'distinct_values', 'colors[i]'], {}), '(axs[i], xy[system], distinct_values, colors[i])\n', (9168, 9216), False, 'from zipteedo.viz import draw_matrix, violinplot\n'), ((7911, 7948), 'zipteedo.util.first', 'first', (['(x_ for x_ in points if x_ >= x)'], {}), '(x_ for x_ in points if x_ >= x)\n', (7916, 7948), False, 'from zipteedo.util import GzipFileType, load_jsonl, first\n')]
|
#!/usr/bin/env python3
import statistics
import os
import glob
from tkinter import filedialog
from tkinter import * # noqa
import pandas as pd
from eventcodes import eventcodes_dictionary
from natsort import natsorted, ns
import matplotlib.pyplot as plt
import numpy as np
import datetime
__all__ = ["loop_over_days", "load_file", "concat_lickometer_files",
"extract_info_from_file", "DNAMIC_extract_info_from_file",
"DNAMIC_loop_over_days", "get_events_indices", "reward_retrieval", "cue_iti_responding",
"cue_iti_responding_PavCA", "binned_responding",
"cue_responding_duration", "lever_pressing", "lever_press_latency", "lever_press_latency_PavCA",
"total_head_pokes",
"num_successful_go_nogo_trials", "count_go_nogo_trials", "num_switch_trials", "bin_by_time",
"lever_press_lat_gng", "RVI_gng_weird", "RVI_nogo_latency", "lever_press_latency_Switch",
"response_rate_across_cue_iti", "duration_across_cue_iti"]
def date_sort_key(date_as_string, date_fmt = '%b_%d_%y', date_grep_fmt = '\w{3}_\d{1,2}_\d{2}'):
'''
:param date_as_string: a string containing a date, typically from a file or directory name
:param date_fmt: The formatting to use with datetime to extract the raw date information.
A default is provided for ease of use.
:param date_grep_fmt: A pattern used to pull the date itself out of date_as_string.
Default matches that of date_fmt.
:return: A tuple containing date information in Month, Day, Year order to be used by
a sort function, such as sorted, as a key for sorting a list of dates.
'''
# Ensure separator between year, month, and day is underscore.
date_as_string = date_as_string.replace('-', '_')
try:
sanitized_string_date = re.search(date_grep_fmt, date_as_string).group(0)
date_info = datetime.datetime.strptime(sanitized_string_date, date_fmt)
except (AttributeError, ValueError) as e:
print(e)
# If the desired string is not matched, re.search will return NoneType and
# group(0) will yield an AttributeError.
print(f'The date is {date_as_string}\n\
The regex pattern is {date_grep_fmt}\n\
The datetime format is {date_fmt}.)')
date_grep_fmt = input('Enter desired regex pattern to match date string: ')
date_fmt = input('Enter desired date format string for strptime: ')
# and then try it again.
sanitized_string_date = re.search(date_grep_fmt, date_as_string).group(0)
date_info = datetime.datetime.strptime(sanitized_string_date, date_fmt)
return date_info.month, date_info.day, date_info.year
def loop_over_days(column_list, behavioral_test_function, master_data_folder=''):
"""
:param column_list: list of strings/column titles for analysis that will be output in a table
:param behavioral_test_function: function that contains all the analysis functions to run on each file
:param master_data_folder: A directory which contains all single-day directories of interest. Used instead
of GUI selection with Tk. Path provided by sys.argv in executing script.
:return: one concatenated data table of analysis for each animal for each day specified
"""
# If a data folder is passed, skip user input.
if master_data_folder == '':
days = int(input("How many days would you like to analyze?"))
gui=True
else:
data_folders = glob.glob(os.path.join(master_data_folder, '*'))
data_folders = natsorted(data_folders, key=date_sort_key)
print('I found {}'.format(data_folders))
continue_script = input('Are these in the right order (y/n)? ')
if continue_script =='y':
pass
elif continue_script=='n':
date_fmt = input('Enter desired date format string for strptime: ')
regex_fmt = input('Enter desired regex pattern to match date string: ')
data_folders = natsorted(data_folders, key=date_sort_key(date_fmt=date_fmt, date_grep_fmt=regex_fmt))
days = len(data_folders)
gui=False
df = pd.DataFrame(columns=column_list)
for i in range(days):
# Ask user to specify data folder if necessary.
if gui:
root = Tk() # noqa
root.withdraw()
folder_selected = filedialog.askdirectory()
else:
folder_selected = data_folders[i]
# Iterate over single animal datafiles in current folder.
file_pattern = os.path.join(folder_selected, '*')
for file in sorted(glob.glob(file_pattern)):
loaded_file = load_file(file)
df2 = behavioral_test_function(loaded_file, i)
df = df.append(df2, ignore_index=True)
return days, df
def loop_over_days_lickometer(column_list, behavioral_test_function):
"""
:param column_list: list of strings/column titles for analysis that will be output in a table
:param behavioral_test_function: function that contains all the analysis functions to run on each file
:return: one concatenated data table of analysis for each animal for each day specified
"""
days = int(input("How many days would you like to analyze?"))
df = pd.DataFrame(columns=column_list)
for i in range(days):
root = Tk() # noqa
root.withdraw()
folder_selected = filedialog.askdirectory()
file_pattern = os.path.join(folder_selected, '*')
for file in sorted(glob.glob(file_pattern)):
loaded_file = load_file(file)
df2 = behavioral_test_function(loaded_file, i)
df = df.append(df2, ignore_index=True)
return days, df
def load_file(filename):
"""
:param filename: string that refers to single operant file location, file is txt
:return: dictionary of all the fields and their values contained in the file (like subject, group, or w array)
"""
with open(filename, "r") as fileref:
filelines = fileref.readlines()
fields_dictionary = {}
for line in filelines:
if line[0] != ' ' and line[0] != '\n':
name = line.split(':')[0]
fields_dictionary[name] = line.replace(name + ':', '')
fields_dictionary[name] = fields_dictionary[name].replace('\n', '')
fields_dictionary[name] = fields_dictionary[name].replace(' ', '')
elif line[0] == ' ':
fields_dictionary[name] += line
fields_dictionary[name] = fields_dictionary[name].replace('\n', '')
group_identities = fields_dictionary['Group'].split('/')
fields_dictionary['Group'] = group_identities.pop(0)
for remaining in group_identities:
if ':' in remaining:
next_group = remaining.split(':')
fields_dictionary[next_group[0]] = next_group[1]
return fields_dictionary
def concat_lickometer_files():
"""
:return: data frame for lickometer analysis
"""
files_list = []
root = Tk();
root.withdraw()
home = os.path.expanduser('~') # returns the home directory on any OS --> ex) /Users/jhl
selected_folder = filedialog.askdirectory(initialdir=home)
file_pattern = os.path.join(selected_folder, '*.txt')
data_dict = {}
for fname in natsorted(glob.glob(file_pattern), alg=ns.IGNORECASE): # loop through all the txt files
with open(fname, "r") as file:
filelines = file.readlines() # read the lines in each file
subject_line = filelines[5] # Animal ID will always be at the 6th index (5+1)
subject = subject_line.split(",")[-1].strip() # subject will be the last element, strip any whitespaces!
values = filelines[-1].strip().split(",") # Need to split by delimiter in order to make the list!
data_dict[subject] = values
lick_df = pd.DataFrame.from_dict(data_dict, orient='index')
lick_final = lick_df.T
# Delete row at index position 0 & 1
lick_final = lick_final.drop([lick_final.index[0]]) # to get rid of row of ones at top
lick_final.reset_index(inplace=True)
for c in lick_final.columns:
lick_final[c] = pd.to_numeric(lick_final[c], errors='coerce')
lick_final = lick_final.drop(lick_final.columns[[0]], axis=1)
lick_final.fillna(value=pd.np.nan, inplace=True)
lick_final.rename(columns=lick_final.iloc[0]).drop(lick_final.index[0])
lick_final.to_excel("output.xlsx")
return lick_final
def extract_info_from_file(dictionary_from_file, time_conversion):
"""
:param dictionary_from_file: dictionary of all the fields and their values contained in the file (like subject, group, or w array)
:param time_conversion: conversion number the timecode needs to be divided by to get seconds
:return: timecode and eventcode lists derived from the w array
"""
time_event_codes = dictionary_from_file["W"].split()
for num in time_event_codes:
if ':' in num:
time_event_codes.remove(num)
for num in time_event_codes:
time_event_codes[time_event_codes.index(num)] = str(int(float(num)))
timecode = []
eventcode = []
first_timecode = (float(time_event_codes[0][:-4]) / time_conversion)
for num in time_event_codes:
if num == time_event_codes[0]:
timecode += [0.0]
else:
timecode += [round((float(num[:-4]) / time_conversion) - first_timecode, 2)]
eventcode += [eventcodes_dictionary[int(num[-4:])]]
return timecode, eventcode
def DNAMIC_loop_over_days(column_list, behavioral_test_function):
"""
:param column_list: list of strings/column titles for analysis that will be output in a table
:param behavioral_test_function: function that contains all the analysis functions to run on each file
:return: one concatenated data table of analysis for each animal for each day specified
"""
days = int(input("How many days would you like to analyze?"))
df = pd.DataFrame(columns=column_list)
for i in range(days):
root = Tk() # noqa
root.withdraw()
folder_selected = filedialog.askdirectory()
file_pattern = os.path.join(folder_selected, '*')
for file in sorted(glob.glob(file_pattern)):
(eventcode, timecode, fields_dictionary) = DNAMIC_extract_info_from_file(file)
df2 = behavioral_test_function(eventcode, timecode, fields_dictionary, i)
df = df.append(df2, ignore_index=True)
return days, df
def DNAMIC_extract_info_from_file(filename):
df = pd.read_csv(filename, sep=':', names=['event', 'timestamp'])
df['timestamp'] = df['timestamp'].str.strip()
# 0, 0, 0 appears after successful initialization --> serves as a cutoff mark
end_of_init_idx = df.loc[df['timestamp'] == '0'].index[-1]
body_start_idx = end_of_init_idx + 1
keys = df[:body_start_idx]['event'].tolist()
values = df[:body_start_idx]['timestamp'].tolist()
fields_dictionary = dict(zip(keys, values))
df_body = df[body_start_idx:-2]
eventcode = df_body['event'].tolist()
eventcode = [eventcodes_dictionary[int(i)] for i in eventcode]
timecode = df_body['timestamp'].tolist()
timecode = [int(i) / 1000 for i in timecode]
return eventcode, timecode, fields_dictionary
def get_events_indices(eventcode, eventtypes):
"""
:param eventcode: list of event codes from operant conditioning file
:param eventtypes: list of event types to index
:return: list of indices of target events
"""
return [i for i, event in enumerate(eventcode) if event in eventtypes]
def reward_retrieval(timecode, eventcode):
"""
:param timecode: list of time codes from operant conditioning file
:param eventcode: list of event codes from operant conditioning file
:return: number of reinforcers (dippers) presented, number retrieved, and latency to retrieve as floats
"""
dip_on = get_events_indices(eventcode, ['DipOn'])
dip_off = get_events_indices(eventcode, ['DipOff', 'EndSession'])
poke_on = get_events_indices(eventcode, ['PokeOn1'])
poke_off = get_events_indices(eventcode, ['PokeOff1'])
dips_retrieved = 0
latency_dip_retrieval = []
for i in range(len(dip_on)):
for x in range(len(poke_off)):
dip_on_idx = dip_on[i]
dip_off_idx = dip_off[i]
if poke_on[x] < dip_on_idx < poke_off[x]:
dips_retrieved += 1
latency_dip_retrieval += [0]
break
elif 'PokeOn1' in eventcode[dip_on_idx:dip_off_idx]:
dips_retrieved += 1
poke_during_dip_idx = eventcode[dip_on_idx:dip_off_idx].index('PokeOn1')
latency_dip_retrieval += [round(timecode[poke_during_dip_idx + dip_on_idx] - timecode[dip_on_idx], 2)]
break
if dips_retrieved == 0:
return len(dip_on), dips_retrieved, 0
else:
return len(dip_on), dips_retrieved, round(statistics.mean(latency_dip_retrieval), 3)
def cue_iti_responding(timecode, eventcode, code_on, code_off, counted_behavior):
"""
:param timecode: list of time codes from operant conditioning file
:param eventcode: list of event codes from operant conditioning file
:param code_on: event code for the beginning of a cue
:param code_off: event code for the end of a cue
:param counted_behavior: event code for counted behavior
:return: mean rpm of head pokes during cue and mean rpm of head pokes during equivalent ITI preceding cue
"""
cue_on = get_events_indices(eventcode, [code_on])
cue_off = get_events_indices(eventcode, [code_off])
if len(cue_on) != len(cue_off):
cue_off += get_events_indices(eventcode, ['EndSession'])
iti_on = get_events_indices(eventcode, [code_off, 'StartSession'])
all_poke_rpm = []
all_poke_iti_rpm = []
for i in range(len(cue_on)):
cue_on_idx = cue_on[i]
cue_off_idx = cue_off[i]
iti_on_idx = iti_on[i]
cue_length_sec = (timecode[cue_off_idx] - timecode[cue_on_idx])
if cue_length_sec > 0:
poke_rpm = ((eventcode[cue_on_idx:cue_off_idx].count(counted_behavior)) / (cue_length_sec / 60))
else:
poke_rpm = 0
all_poke_rpm += [poke_rpm]
iti_poke = 0
for x in range(iti_on_idx, cue_on_idx):
if eventcode[x] == counted_behavior and timecode[x] >= (timecode[cue_on_idx] - cue_length_sec):
iti_poke += 1
if cue_length_sec > 0:
iti_poke_rpm = iti_poke / (cue_length_sec / 60)
else:
iti_poke_rpm = 0
all_poke_iti_rpm += [iti_poke_rpm]
return round(statistics.mean(all_poke_rpm), 3), round(statistics.mean(all_poke_iti_rpm), 3)
def cue_iti_responding_PavCA(timecode, eventcode, code_on, code_off, counted_behavior):
"""
:param timecode: list of time codes from operant conditioning file
:param eventcode: list of event codes from operant conditioning file
:param code_on: event code for the beginning of a cue
:param code_off: event code for the end of a cue
:param counted_behavior: event code for counted behavior
:return: mean rpm of head pokes during cue and mean rpm of head pokes during equivalent ITI preceding cue
"""
cue_on = get_events_indices(eventcode, [code_on])
cue_off = get_events_indices(eventcode, [code_off])
if len(cue_on) != len(cue_off):
cue_off += get_events_indices(eventcode, ['EndSession'])
iti_on = get_events_indices(eventcode, [code_off, 'StartSession'])
all_poke_rpm = []
all_poke_iti_rpm = []
for i in range(len(cue_on)):
cue_on_idx = cue_on[i]
cue_off_idx = cue_off[i]
iti_on_idx = iti_on[i]
cue_length_sec = (timecode[cue_off_idx] - timecode[cue_on_idx])
if cue_length_sec > 0:
poke_rpm = ((eventcode[cue_on_idx:cue_off_idx].count(counted_behavior)) / (cue_length_sec / 60))
else:
poke_rpm = 0
all_poke_rpm += [poke_rpm]
iti_poke = 0
for x in range(iti_on_idx, cue_on_idx):
if eventcode[x] == counted_behavior and timecode[x] >= (timecode[cue_on_idx] - cue_length_sec):
iti_poke += 1
if cue_length_sec > 0:
iti_poke_rpm = iti_poke / (cue_length_sec / 60)
else:
iti_poke_rpm = 0
all_poke_iti_rpm += [iti_poke_rpm]
return round(statistics.mean(all_poke_rpm), 3), round(statistics.mean(all_poke_iti_rpm), 3), len(
[j for j in all_poke_rpm if j > 0])
def binned_responding(timecode, eventcode, code_on, code_off, counted_behavior, trial_count):
"""
:param timecode: list of time codes from operant conditioning file
:param eventcode: list of event codes from operant conditioning file
:param code_on: event code for the beginning of a cue
:param code_off: event code for the end of a cue
:param counted_behavior: event code for behavior you want counted
:param trial_count: number of bins
:return: mean rpm of head pokes during cue and mean rpm of head pokes during equivalent ITI preceding cue
"""
cue_on = get_events_indices(eventcode, [code_on])
cue_off = get_events_indices(eventcode, [code_off])
iti_on = get_events_indices(eventcode, [code_off, 'StartSession'])
all_poke_rpm = []
all_poke_iti_rpm = []
for i in range(trial_count):
cue_on_idx = cue_on[i]
cue_off_idx = cue_off[i]
iti_on_idx = iti_on[i]
cue_length_sec = (timecode[cue_off_idx] - timecode[cue_on_idx])
poke_rpm = ((eventcode[cue_on_idx:cue_off_idx].count(counted_behavior)) / (cue_length_sec / 60))
all_poke_rpm += [poke_rpm]
iti_poke = 0
for x in range(iti_on_idx, cue_on_idx):
if eventcode[x] == counted_behavior and timecode[x] >= (timecode[cue_on_idx] - cue_length_sec):
iti_poke += 1
iti_poke_rpm = iti_poke / (cue_length_sec / 60)
all_poke_iti_rpm += [iti_poke_rpm]
return round(statistics.mean(all_poke_rpm), 3), round(statistics.mean(all_poke_iti_rpm), 3)
def cue_responding_duration(timecode, eventcode, code_on, code_off, counted_behavior_on, counted_behavior_off):
"""
:param timecode: list of time codes from operant conditioning file
:param eventcode: list of event codes from operant conditioning file
:param code_on: event code for the beginning of a cue
:param code_off: event code for the end of a cue
:param counted_behavior_off: event code for the beginning of target behavior
:param counted_behavior_on: event code for the end of target behavior
:return: mean duration of individual head pokes during cue, mean total duration of head poking during cue, also these for ITI preceeding cue
"""
cue_on = get_events_indices(eventcode, [code_on])
cue_off = get_events_indices(eventcode, [code_off])
if len(cue_on) != len(cue_off):
cue_off += get_events_indices(eventcode, ['EndSession'])
iti_on = get_events_indices(eventcode, [code_off, 'StartSession'])
all_poke_dur = []
all_iti_poke_dur = []
all_cue_duration = []
all_iti_duration = []
for i in range(len(cue_on)):
cue_on_idx = cue_on[i]
cue_off_idx = cue_off[i]
iti_on_idx = iti_on[i]
cue_length_sec = (timecode[cue_off_idx] - timecode[cue_on_idx])
in_cue_duration = 0
iti_cue_duration = 0
for x in range(cue_on_idx, cue_off_idx):
if eventcode[x - 1] == code_on and eventcode[x] == counted_behavior_off:
poke_dur = timecode[x] - timecode[x - 1]
all_poke_dur += [poke_dur]
in_cue_duration += poke_dur
elif eventcode[x] == code_off and eventcode[x - 1] == code_on and eventcode[x + 1] == counted_behavior_off:
poke_dur = timecode[x] - timecode[x - 1]
all_poke_dur += [poke_dur]
in_cue_duration += poke_dur
elif eventcode[x] == counted_behavior_on and (
eventcode[x + 1] == counted_behavior_off or eventcode[x + 1] == code_off):
poke_dur = timecode[x + 1] - timecode[x]
all_poke_dur += [poke_dur]
in_cue_duration += poke_dur
all_cue_duration += [in_cue_duration]
for x in range(iti_on_idx, cue_on_idx):
if eventcode[x] == counted_behavior_on and timecode[x] >= (timecode[cue_on_idx] - cue_length_sec):
if eventcode[x - 1] == code_on and eventcode[x] == counted_behavior_off:
poke_dur = timecode[x] - timecode[x - 1]
all_iti_poke_dur += [poke_dur]
iti_cue_duration += poke_dur
elif eventcode[x] == code_off and eventcode[x - 1] == code_on and eventcode[
x + 1] == counted_behavior_off:
poke_dur = timecode[x] - timecode[x - 1]
all_iti_poke_dur += [poke_dur]
iti_cue_duration += poke_dur
elif eventcode[x] == counted_behavior_on and (
eventcode[x + 1] == counted_behavior_off or eventcode[x + 1] == code_off):
poke_dur = timecode[x + 1] - timecode[x]
all_iti_poke_dur += [poke_dur]
iti_cue_duration += poke_dur
all_iti_duration += [iti_cue_duration]
if not all_cue_duration:
all_cue_duration += [0]
if not all_poke_dur:
all_poke_dur += [0]
if not all_iti_duration:
all_iti_duration += [0]
if not all_iti_poke_dur:
all_iti_poke_dur += [0]
return round(statistics.mean(all_poke_dur), 3), round(statistics.mean(all_cue_duration), 3), \
round(statistics.mean(all_iti_poke_dur), 3), round(statistics.mean(all_iti_duration), 3)
def lever_pressing(eventcode, lever1, lever2=False):
"""
:param eventcode: list of event codes from operant conditioning file
:param lever1: eventcode for lever pressing or
:param lever2: optional parameter for second lever eventcode if two levers are used
:return: count of first lever presses, second lever presses, and total lever presses, as int
"""
lever1_presses = eventcode.count(lever1)
if lever2:
lever2_presses = eventcode.count(lever2)
else:
lever2_presses = 0
total_lever_presses = lever1_presses + lever2_presses
return lever1_presses, lever2_presses, total_lever_presses
def lever_press_latency(timecode, eventcode, lever_on, lever_press):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name for lever presentation
:param lever_press: event name for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, [lever_on, 'EndSession'])
press_latency = []
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if lever_press in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index(lever_press)
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
pass
if len(press_latency) > 0:
return round(statistics.mean(press_latency), 3)
else:
return 0
def lever_press_latency_PavCA(timecode, eventcode, lever_on, lever_press, pres_len):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name for lever presentation
:param lever_press: event name for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, [lever_on, 'EndSession'])
press_latency = []
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if lever_press in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index(lever_press)
if round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2) <= pres_len:
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
press_latency += [10]
else:
press_latency += [10]
if len(press_latency) > 0:
return round(statistics.mean(press_latency), 3)
else:
return 0
def total_head_pokes(eventcode):
"""
:param eventcode: list of event codes from operant conditioning file
:return: total number of times animal poked head into reward receptacle
"""
return eventcode.count('PokeOn1')
def num_successful_go_nogo_trials(eventcode):
"""
:param eventcode: list of event codes from operant conditioning file
:return: number of successful go and no go trials in the go/no go tasks
"""
return eventcode.count('SuccessfulGoTrial'), eventcode.count('SuccessfulNoGoTrial')
def count_go_nogo_trials(eventcode):
"""
:param eventcode: list of event codes from operant conditioning file
:return: number of go and no go trials in the go/no go tasks
"""
lever_on = get_events_indices(eventcode, ['RLeverOn', 'LLeverOn'])
(go_trials, nogo_trials) = (0, 0)
for lever in lever_on:
if eventcode[lever + 1] in ('LightOn1', 'LightOn2'):
nogo_trials += 1
else:
go_trials += 1
return go_trials, nogo_trials
def num_switch_trials(eventcode):
"""
:param eventcode: list of event codes from operant conditioning file
:return: number of large and small rewards in the switch task
"""
return eventcode.count('LargeReward'), eventcode.count('SmallReward')
def bin_by_time(timecode, eventcode, bin_length, counted_event):
"""
:param timecode: list of time codes from operant conditioning file
:param eventcode: list of event codes from operant conditioning file
:param bin_length: length of time in seconds to split the session into
:param counted_event: event that is counted in each bin, in list format
:return: a list of counts of specified event for each bin
"""
event_on_list = get_events_indices(eventcode, counted_event)
if timecode[-1] % bin_length != 0:
num_bins = int(timecode[-1] // bin_length) + 1
elif timecode[-1] % bin_length == 0:
num_bins = int(timecode[-1] // bin_length)
counts_for_each_bin = [0] * num_bins
for i in range(num_bins):
for event_on in event_on_list:
if (i + 1) != num_bins and (i + 1) * bin_length > timecode[event_on] >= i * bin_length:
counts_for_each_bin[i] += 1
elif (i + 1) == num_bins and timecode[event_on] >= i * bin_length:
counts_for_each_bin[i] += 1
return counts_for_each_bin
def lever_press_lat_gng(timecode, eventcode, lever_on, lever_press):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name for lever presentation
:param lever_press: event name for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, [lever_on, 'EndSession'])
press_latency = []
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if lever_press in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index(lever_press)
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
pass
if len(press_latency) > 0:
return round(statistics.mean(press_latency), 3)
else:
return 0
def RVI_gng_weird(timecode, eventcode, lever_on, lever_press, cue_length):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name for lever presentation
:param lever_press: event name for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, [lever_on, 'EndSession'])
press_latency = []
incorrect_trials = 0
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if lever_press in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index(lever_press)
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
press_latency += [cue_length]
final_press_latency = []
for x in press_latency:
if x > cue_length:
incorrect_trials += 1
else:
final_press_latency += [x]
if len(final_press_latency) > 0:
return round(statistics.mean(final_press_latency), 3), incorrect_trials
else:
return 0, incorrect_trials
def RVI_nogo_latency(timecode, eventcode, lever_on, cue_length):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name or list for lever presentation
:param lever_press: event name or list for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, [lever_on, 'EndSession'])
press_latency = []
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if 'LPressOn' in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index('LPressOn')
if timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx] < cue_length:
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
elif 'RPressOn' in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index('RPressOn')
if timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx] < cue_length:
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
press_latency += [cue_length]
if len(press_latency) > 0:
return round(statistics.mean(press_latency), 3)
else:
return 0
def lever_press_latency_Switch(timecode, eventcode):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param lever_on: event name for lever presentation
:param lever_press: event name for lever press
:return: the mean latency to press the lever in seconds
"""
lever_on = get_events_indices(eventcode, ['LLeverOn', 'RLeverOn', 'EndSession'])
press_latency = []
for i in range(len(lever_on) - 1):
lever_on_idx = lever_on[i]
if len(press_latency) < 10:
if 'LPressOn' in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index('LPressOn')
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
elif 'RPressOn' in eventcode[lever_on_idx:lever_on[i + 1]]:
lever_press_idx = eventcode[lever_on_idx:lever_on[i + 1]].index('RPressOn')
press_latency += [round(timecode[lever_on_idx + lever_press_idx] - timecode[lever_on_idx], 2)]
else:
pass
if len(press_latency) > 0:
return round(statistics.mean(press_latency), 3)
else:
return 0
def response_rate_across_cue_iti(timecode, eventcode, code_on, code_off, counted_behavior):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param code_on: event name for lever presentation
:param code_off: event name for lever press
:param code_off: event name for lever press
:return: 3 lists of cue, iti, and the subtracted responding across seconds
"""
cue_on = get_events_indices(eventcode, [code_on])
cue_off = get_events_indices(eventcode, [code_off])
if len(cue_on) != len(cue_off):
cue_off += get_events_indices(eventcode, ['EndSession'])
iti_on = get_events_indices(eventcode, [code_off, 'StartSession'])
cue_length_sec = int(timecode[cue_off[6]] - timecode[cue_on[6]])
all_cue_length_poke_rates = [0] * cue_length_sec
all_iti_length_poke_rates = [0] * cue_length_sec
for i in range(len(cue_on)):
cue_on_idx = cue_on[i]
cue_off_idx = cue_off[i]
iti_on_idx = iti_on[i]
cue_length_poke_rates = []
iti_length_poke_rates = []
for y in range(int(cue_length_sec)):
pokes = 0
iti_pokes = 0
for x in range(cue_on_idx, cue_off_idx):
if eventcode[x] == counted_behavior and (timecode[cue_on_idx] + y) <= timecode[x] < (
timecode[cue_on_idx] + y + 1):
pokes += 1
else:
pokes += 0
cue_length_poke_rates += [pokes]
for t in range(iti_on_idx, cue_on_idx):
if eventcode[t] == counted_behavior and (timecode[cue_on_idx] - (cue_length_sec - y)) \
<= timecode[t] < (timecode[cue_on_idx] - (cue_length_sec - (y + 1))):
iti_pokes += 1
else:
iti_pokes += 0
iti_length_poke_rates += [iti_pokes]
all_cue_length_poke_rates = [cue_length_poke_rates[i] + all_cue_length_poke_rates[i] for i in
range(len(all_cue_length_poke_rates))]
all_iti_length_poke_rates = [iti_length_poke_rates[i] + all_iti_length_poke_rates[i] for i in
range(len(all_iti_length_poke_rates))]
all_cue_length_poke_rates = [all_cue_length_poke_rates[i] / len(cue_on) for i in
range(len(all_cue_length_poke_rates))]
all_iti_length_poke_rates = [all_iti_length_poke_rates[i] / len(cue_on) for i in
range(len(all_iti_length_poke_rates))]
subtracted_poke_rates = [all_cue_length_poke_rates[i] - all_iti_length_poke_rates[i] for i in
range(len(all_cue_length_poke_rates))]
return all_cue_length_poke_rates, all_iti_length_poke_rates, subtracted_poke_rates
def duration_across_cue_iti(timecode, eventcode, code_on, code_off, counted_behavior_on, counted_behavior_off):
"""
:param timecode: list of times (in seconds) when events occurred
:param eventcode: list of events that happened in a session
:param code_on: event name for lever presentation
:param code_off: event name for lever press
:param code_off: event name for lever press
:return: 3 lists of cue, iti, and the subtracted responding across seconds
"""
cue_on = get_events_indices(eventcode, [code_on])
cue_off = get_events_indices(eventcode, [code_off])
poke_on = get_events_indices(eventcode, [counted_behavior_on])
poke_off = get_events_indices(eventcode, [counted_behavior_off])
if len(cue_on) != len(cue_off):
cue_off += get_events_indices(eventcode, ['EndSession'])
cue_length_sec = int(timecode[cue_off[6]] - timecode[cue_on[6]])
all_cue_length_poke_dur = [0] * int(cue_length_sec)
all_iti_length_poke_dur = [0] * int(cue_length_sec)
for i in range(len(cue_on)):
cue_on_idx = cue_on[i]
cue_off_idx = cue_off[i]
cue_length_poke_dur = []
iti_length_poke_dur = []
for y in range(int(cue_length_sec)):
poke_dur = 0
iti_poke_dur = 0
for c in range(len(poke_off)):
# pokes that span whole seconds
if timecode[poke_on[c]] < (timecode[cue_on_idx] + y) and timecode[poke_off[c]] > \
(timecode[cue_on_idx] + y + 1):
poke_dur += 1
break
# pokes contained within a second
elif (timecode[cue_on_idx] + y) <= timecode[poke_on[c]] < timecode[poke_off[c]] \
< (timecode[cue_on_idx] + y + 1):
poke_dur += timecode[poke_off[c]] - timecode[poke_on[c]]
# pokes that start in a second of a cue
elif (timecode[cue_on_idx] + y) <= timecode[poke_on[c]] < (timecode[cue_on_idx] + y + 1) \
< timecode[poke_off[c]]:
poke_dur += ((timecode[cue_on_idx] + y + 1) - timecode[poke_on[c]])
# pokes that end in a second of a cue
elif timecode[poke_on[c]] < (timecode[cue_on_idx] + y) <= timecode[poke_off[c]] \
< (timecode[cue_on_idx] + y + 1):
poke_dur += (timecode[poke_off[c]] - (timecode[cue_on_idx] + y))
# pokes not occurring in the cue
else:
poke_dur += 0
cue_length_poke_dur += [round(poke_dur, 3)]
for d in range(len(poke_off)):
# pokes that span whole seconds
if timecode[poke_on[d]] < (timecode[cue_on_idx] - (cue_length_sec - y)) and timecode[poke_off[d]] \
> (timecode[cue_on_idx] - (cue_length_sec - (y + 1))):
iti_poke_dur += 1
break
# pokes contained within a second
elif (timecode[cue_on_idx] - (cue_length_sec - y)) <= timecode[poke_on[d]] < timecode[poke_off[d]] \
< (timecode[cue_on_idx] - (cue_length_sec - (y + 1))):
iti_poke_dur += (timecode[poke_off[d]] - timecode[poke_on[d]])
# pokes that start in a second of an ITI
elif (timecode[cue_on_idx] - (cue_length_sec - y)) <= timecode[poke_on[d]] \
< (timecode[cue_on_idx] - (cue_length_sec - (y + 1))) < timecode[poke_off[d]]:
iti_poke_dur += ((timecode[cue_on_idx] - (cue_length_sec - (y + 1))) - timecode[poke_on[d]])
# pokes that end in a second of an ITI
elif timecode[poke_on[d]] < (timecode[cue_on_idx] - (cue_length_sec - y)) <= timecode[poke_off[d]] \
< (timecode[cue_on_idx] - (cue_length_sec - (y + 1))):
iti_poke_dur += (timecode[poke_off[d]] - (timecode[cue_on_idx] - (cue_length_sec - y)))
# pokes not occurring in the ITI
else:
iti_poke_dur += 0
iti_length_poke_dur += [round(iti_poke_dur, 3)]
all_cue_length_poke_dur = [cue_length_poke_dur[i] + all_cue_length_poke_dur[i] for i in
range(len(all_cue_length_poke_dur))]
all_iti_length_poke_dur = [iti_length_poke_dur[i] + all_iti_length_poke_dur[i] for i in
range(len(all_iti_length_poke_dur))]
all_cue_length_poke_dur = [all_cue_length_poke_dur[i] / len(cue_on) for i in
range(len(all_iti_length_poke_dur))]
all_iti_length_poke_dur = [all_iti_length_poke_dur[i] / len(cue_on) for i in
range(len(all_iti_length_poke_dur))]
subtracted_poke_dur = [all_cue_length_poke_dur[i] - all_iti_length_poke_dur[i] for i in
range(len(all_cue_length_poke_dur))]
return all_cue_length_poke_dur, all_iti_length_poke_dur, subtracted_poke_dur
def display_line_graph(data_frame, event_name):
"""
:param data_frame: a long-form DataFrame containing data of interest.
:param event_name: Column from data_frame to be graphed.
:return: Creates a plot object that can be displayed with plt.show()
"""
# Begin by compiling and sorting a list of subject IDs.
try:
subject_ids = set(data_frame.loc[:, 'Subject'])
subject_column_name='Subject'
except:
# If subject IDs aren't where expected, ask the user
subject_column_name = input('Unable to find column "Subject". What is the column name? ')
subject_ids = set(data_frame.loc[:, subject_column_name])
subject_ids = sorted(subject_ids)
# Next, do the EXACT same for days. (Copied for ease of variable naming.)
try:
run_days = set(data_frame.loc[:, 'Day'])
day_column_name='Day'
except:
day_column_name = input('Unable to find column "Day". What is the column name? ')
run_days = set(data_frame.loc[:, day_column_name])
run_days = sorted(run_days)
# Use the verified column names to ensure that data_frame is sorted by Day in ascending order.
data_frame.sort_values(by=[day_column_name], ascending=True, inplace=True)
# Then create and populate the short-form DataFrame
short_form_DF = pd.DataFrame(index=subject_ids, columns=run_days)
plt.figure(event_name)
for mouse in subject_ids:
mouse_idx = data_frame[data_frame[subject_column_name]==mouse].index
raw_mouse_data = data_frame.loc[mouse_idx, event_name].values
try:
short_form_DF.loc[mouse, run_days] = raw_mouse_data
# In the rare case that an animal is run twice in a day (e.g. during trough training)
# There will be a value error. Just drop a data point for ease of graphing here.
except ValueError:
print(f'{mouse} has {len(raw_mouse_data)} datapoints, but there are only {len(run_days)} run days.')
print(data_frame.loc[mouse_idx])
day_to_drop = int(input('Which datapoint would you like to drop (enter 0-ordered index)? '))
raw_mouse_data = np.delete(raw_mouse_data, day_to_drop)
plt.plot(range(1, len(raw_mouse_data)+1), raw_mouse_data, marker='o')
plt.title(event_name)
plt.xlabel('Days')
# The below is taken from https://stackoverflow.com/a/47166787
# Someday, it would be nice to try to adapt this to display the
# subject ID upon mousing over each line.
# x = np.sort(np.random.rand(15))
# y = np.sort(np.random.rand(15))
# names = np.array(list("ABCDEFGHIJKLMNO"))
# norm = plt.Normalize(1,4)
# cmap = plt.cm.RdYlGn
# fig,ax = plt.subplots()
# line, = plt.plot(x,y, marker="o")
# annot = ax.annotate("", xy=(0,0), xytext=(-20,20),textcoords="offset points",
# bbox=dict(boxstyle="round", fc="w"),
# arrowprops=dict(arrowstyle="->"))
# annot.set_visible(False)
# def update_annot(ind):
# x,y = line.get_data()
# annot.xy = (x[ind["ind"][0]], y[ind["ind"][0]])
# text = "{}, {}".format(" ".join(list(map(str,ind["ind"]))),
# " ".join([names[n] for n in ind["ind"]]))
# annot.set_text(text)
# annot.get_bbox_patch().set_alpha(0.4)
# def hover(event):
# vis = annot.get_visible()
# if event.inaxes == ax:
# cont, ind = line.contains(event)
# if cont:
# update_annot(ind)
# annot.set_visible(True)
# fig.canvas.draw_idle()
# else:
# if vis:
# annot.set_visible(False)
# fig.canvas.draw_idle()
# fig.canvas.mpl_connect("motion_notify_event", hover)
# plt.show()
|
[
"statistics.mean",
"tkinter.filedialog.askdirectory",
"pandas.read_csv",
"datetime.datetime.strptime",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"os.path.join",
"pandas.DataFrame.from_dict",
"matplotlib.pyplot.figure",
"glob.glob",
"natsort.natsorted",
"pandas.to_numeric",
"pandas.DataFrame",
"matplotlib.pyplot.title",
"os.path.expanduser"
] |
[((4274, 4307), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_list'}), '(columns=column_list)\n', (4286, 4307), True, 'import pandas as pd\n'), ((5395, 5428), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_list'}), '(columns=column_list)\n', (5407, 5428), True, 'import pandas as pd\n'), ((7180, 7203), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (7198, 7203), False, 'import os\n'), ((7285, 7325), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {'initialdir': 'home'}), '(initialdir=home)\n', (7308, 7325), False, 'from tkinter import filedialog\n'), ((7345, 7383), 'os.path.join', 'os.path.join', (['selected_folder', '"""*.txt"""'], {}), "(selected_folder, '*.txt')\n", (7357, 7383), False, 'import os\n'), ((7994, 8043), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data_dict'], {'orient': '"""index"""'}), "(data_dict, orient='index')\n", (8016, 8043), True, 'import pandas as pd\n'), ((10123, 10156), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_list'}), '(columns=column_list)\n', (10135, 10156), True, 'import pandas as pd\n'), ((10704, 10764), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '""":"""', 'names': "['event', 'timestamp']"}), "(filename, sep=':', names=['event', 'timestamp'])\n", (10715, 10764), True, 'import pandas as pd\n'), ((41480, 41529), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'subject_ids', 'columns': 'run_days'}), '(index=subject_ids, columns=run_days)\n', (41492, 41529), True, 'import pandas as pd\n'), ((41535, 41557), 'matplotlib.pyplot.figure', 'plt.figure', (['event_name'], {}), '(event_name)\n', (41545, 41557), True, 'import matplotlib.pyplot as plt\n'), ((42442, 42463), 'matplotlib.pyplot.title', 'plt.title', (['event_name'], {}), '(event_name)\n', (42451, 42463), True, 'import matplotlib.pyplot as plt\n'), ((42468, 42486), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (42478, 42486), True, 'import matplotlib.pyplot as plt\n'), ((1942, 2001), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['sanitized_string_date', 'date_fmt'], {}), '(sanitized_string_date, date_fmt)\n', (1968, 2001), False, 'import datetime\n'), ((3676, 3718), 'natsort.natsorted', 'natsorted', (['data_folders'], {'key': 'date_sort_key'}), '(data_folders, key=date_sort_key)\n', (3685, 3718), False, 'from natsort import natsorted, ns\n'), ((4674, 4708), 'os.path.join', 'os.path.join', (['folder_selected', '"""*"""'], {}), "(folder_selected, '*')\n", (4686, 4708), False, 'import os\n'), ((5534, 5559), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {}), '()\n', (5557, 5559), False, 'from tkinter import filedialog\n'), ((5583, 5617), 'os.path.join', 'os.path.join', (['folder_selected', '"""*"""'], {}), "(folder_selected, '*')\n", (5595, 5617), False, 'import os\n'), ((7430, 7453), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (7439, 7453), False, 'import glob\n'), ((8304, 8349), 'pandas.to_numeric', 'pd.to_numeric', (['lick_final[c]'], {'errors': '"""coerce"""'}), "(lick_final[c], errors='coerce')\n", (8317, 8349), True, 'import pandas as pd\n'), ((10262, 10287), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {}), '()\n', (10285, 10287), False, 'from tkinter import filedialog\n'), ((10311, 10345), 'os.path.join', 'os.path.join', (['folder_selected', '"""*"""'], {}), "(folder_selected, '*')\n", (10323, 10345), False, 'import os\n'), ((2659, 2718), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['sanitized_string_date', 'date_fmt'], {}), '(sanitized_string_date, date_fmt)\n', (2685, 2718), False, 'import datetime\n'), ((3614, 3651), 'os.path.join', 'os.path.join', (['master_data_folder', '"""*"""'], {}), "(master_data_folder, '*')\n", (3626, 3651), False, 'import os\n'), ((4498, 4523), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {}), '()\n', (4521, 4523), False, 'from tkinter import filedialog\n'), ((4736, 4759), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (4745, 4759), False, 'import glob\n'), ((5645, 5668), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (5654, 5668), False, 'import glob\n'), ((10373, 10396), 'glob.glob', 'glob.glob', (['file_pattern'], {}), '(file_pattern)\n', (10382, 10396), False, 'import glob\n'), ((14850, 14879), 'statistics.mean', 'statistics.mean', (['all_poke_rpm'], {}), '(all_poke_rpm)\n', (14865, 14879), False, 'import statistics\n'), ((14891, 14924), 'statistics.mean', 'statistics.mean', (['all_poke_iti_rpm'], {}), '(all_poke_iti_rpm)\n', (14906, 14924), False, 'import statistics\n'), ((16608, 16637), 'statistics.mean', 'statistics.mean', (['all_poke_rpm'], {}), '(all_poke_rpm)\n', (16623, 16637), False, 'import statistics\n'), ((16649, 16682), 'statistics.mean', 'statistics.mean', (['all_poke_iti_rpm'], {}), '(all_poke_iti_rpm)\n', (16664, 16682), False, 'import statistics\n'), ((18241, 18270), 'statistics.mean', 'statistics.mean', (['all_poke_rpm'], {}), '(all_poke_rpm)\n', (18256, 18270), False, 'import statistics\n'), ((18282, 18315), 'statistics.mean', 'statistics.mean', (['all_poke_iti_rpm'], {}), '(all_poke_iti_rpm)\n', (18297, 18315), False, 'import statistics\n'), ((21873, 21902), 'statistics.mean', 'statistics.mean', (['all_poke_dur'], {}), '(all_poke_dur)\n', (21888, 21902), False, 'import statistics\n'), ((21914, 21947), 'statistics.mean', 'statistics.mean', (['all_cue_duration'], {}), '(all_cue_duration)\n', (21929, 21947), False, 'import statistics\n'), ((21972, 22005), 'statistics.mean', 'statistics.mean', (['all_iti_poke_dur'], {}), '(all_iti_poke_dur)\n', (21987, 22005), False, 'import statistics\n'), ((22017, 22050), 'statistics.mean', 'statistics.mean', (['all_iti_duration'], {}), '(all_iti_duration)\n', (22032, 22050), False, 'import statistics\n'), ((23607, 23637), 'statistics.mean', 'statistics.mean', (['press_latency'], {}), '(press_latency)\n', (23622, 23637), False, 'import statistics\n'), ((24768, 24798), 'statistics.mean', 'statistics.mean', (['press_latency'], {}), '(press_latency)\n', (24783, 24798), False, 'import statistics\n'), ((28135, 28165), 'statistics.mean', 'statistics.mean', (['press_latency'], {}), '(press_latency)\n', (28150, 28165), False, 'import statistics\n'), ((30837, 30867), 'statistics.mean', 'statistics.mean', (['press_latency'], {}), '(press_latency)\n', (30852, 30867), False, 'import statistics\n'), ((32128, 32158), 'statistics.mean', 'statistics.mean', (['press_latency'], {}), '(press_latency)\n', (32143, 32158), False, 'import statistics\n'), ((13134, 13172), 'statistics.mean', 'statistics.mean', (['latency_dip_retrieval'], {}), '(latency_dip_retrieval)\n', (13149, 13172), False, 'import statistics\n'), ((29334, 29370), 'statistics.mean', 'statistics.mean', (['final_press_latency'], {}), '(final_press_latency)\n', (29349, 29370), False, 'import statistics\n'), ((42319, 42357), 'numpy.delete', 'np.delete', (['raw_mouse_data', 'day_to_drop'], {}), '(raw_mouse_data, day_to_drop)\n', (42328, 42357), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.