code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import numpy as np
import matplotlib.pyplot as plt
import random
def mutation(pop, number_of_individuals, F):
index1 = np.random.randint(number_of_individuals)
index2 = np.random.randint(number_of_individuals)
index3 = np.random.randint(number_of_individuals)
# print("1: ", index1)
# print("2: ", index2)
# print("3: ", index3)
mut_vector = (pop[index1] - pop[index2]*F + pop[index3])
# print("mut: ", mut_vector)
return mut_vector
def crossover(father, mut_vector, number_of_variables):
child = [father[i] if np.random.rand() < 0.8 else mut_vector[i] for i in range(number_of_variables)]
# print("child: ", child)
return child
def evaluation(variables):
return -np.sum(np.square(variables))
def ackley(var):
length = len(var)
tmp1 = 20. - 20. * np.exp(-0.2 * np.sqrt(1. / length * np.sum(np.square(var))))
tmp2 = np.e - np.exp(1. / length * np.sum(np.cos(np.array(var) * 2. * np.pi)))
return -tmp1-tmp2
def salomon(var):
var = np.array(var)
return -(1.0 - np.cos(2.0 * np.pi * np.sqrt(sum(var ** 2.0))) + 0.1 * np.sqrt(sum(var ** 2.0)))
class DE:
def __init__(self, number_of_variables, number_of_individuals, F, evaluation):
self.number_of_variables = number_of_variables
self.number_of_individuals = number_of_individuals
self.pop = np.random.rand(number_of_individuals, number_of_variables)
self.F = F
self.evaluation = evaluation
self.generations = 1000
def optimize(self):
graph = []
for gen in range(self.generations):
pop_eval = []
# print("gen: ", gen)
for index, individual in enumerate(self.pop):
# print("index: ", index)
# print("indiv: ", individual)
# print("pop: ", self.pop)
mut_vector = mutation(self.pop, self.number_of_individuals, self.F)
child = crossover(individual, mut_vector, self.number_of_variables)
if self.evaluation(child) > self.evaluation(individual):
self.pop[index] = child
pop_eval.append(self.evaluation(self.pop[index]))
avg_evaluation = np.mean(pop_eval)
print(avg_evaluation)
final_de = avg_evaluation
graph.append(avg_evaluation)
plt.plot(graph)
plt.draw()
plt.pause(0.00001)
plt.clf()
plt.plot(graph)
plt.show()
def exercise_4(inputs): # DO NOT CHANGE THIS LINE
"""
This functions receives the input in the parameter 'inputs'.
Change the code, so that the output is sqaure of the given input.
Output should be the name of the class.
"""
F = 1.0
number_of_variables = 2
number_of_individuals = 100
de = DE(number_of_variables, number_of_individuals, F, ackley)
de.optimize()
output = inputs
return output # DO NOT CHANGE THIS LINE
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"numpy.square",
"matplotlib.pyplot.draw",
"numpy.random.randint",
"numpy.array",
"numpy.mean",
"numpy.random.rand",
"matplotlib.pyplot.pause"
] |
[((125, 165), 'numpy.random.randint', 'np.random.randint', (['number_of_individuals'], {}), '(number_of_individuals)\n', (142, 165), True, 'import numpy as np\n'), ((179, 219), 'numpy.random.randint', 'np.random.randint', (['number_of_individuals'], {}), '(number_of_individuals)\n', (196, 219), True, 'import numpy as np\n'), ((233, 273), 'numpy.random.randint', 'np.random.randint', (['number_of_individuals'], {}), '(number_of_individuals)\n', (250, 273), True, 'import numpy as np\n'), ((1011, 1024), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (1019, 1024), True, 'import numpy as np\n'), ((1354, 1412), 'numpy.random.rand', 'np.random.rand', (['number_of_individuals', 'number_of_variables'], {}), '(number_of_individuals, number_of_variables)\n', (1368, 1412), True, 'import numpy as np\n'), ((2466, 2481), 'matplotlib.pyplot.plot', 'plt.plot', (['graph'], {}), '(graph)\n', (2474, 2481), True, 'import matplotlib.pyplot as plt\n'), ((2490, 2500), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2498, 2500), True, 'import matplotlib.pyplot as plt\n'), ((729, 749), 'numpy.square', 'np.square', (['variables'], {}), '(variables)\n', (738, 749), True, 'import numpy as np\n'), ((2221, 2238), 'numpy.mean', 'np.mean', (['pop_eval'], {}), '(pop_eval)\n', (2228, 2238), True, 'import numpy as np\n'), ((2364, 2379), 'matplotlib.pyplot.plot', 'plt.plot', (['graph'], {}), '(graph)\n', (2372, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2392, 2402), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2400, 2402), True, 'import matplotlib.pyplot as plt\n'), ((2415, 2431), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-05)'], {}), '(1e-05)\n', (2424, 2431), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2455), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2453, 2455), True, 'import matplotlib.pyplot as plt\n'), ((555, 571), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (569, 571), True, 'import numpy as np\n'), ((858, 872), 'numpy.square', 'np.square', (['var'], {}), '(var)\n', (867, 872), True, 'import numpy as np\n'), ((929, 942), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (937, 942), True, 'import numpy as np\n')]
|
# Copyright 2017. <NAME>. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import h5py
import pandas as pd
import numpy as np
MAGIC_ATTR = 'magic'
MAGIC_VAL = 0x0A7A
VERSION_ATTR = 'version'
VERSION_NA = 'NA'
VERSION_CURRENT = '0.1'
try:
ver_split = VERSION_CURRENT.split('.')
VERSION_MAJOR = ver_split[0]
VERSION_MINOR = ver_split[1]
except (IndexError, AttributeError) as err:
VERSION_MAJOR = 0
VERSION_MINOR = 1
def listify(files):
# TODO: change this to include any iterable datastructures (sets, panda sequences, etc)
if not isinstance(files, (list, tuple)):
return [files]
else:
return files
def load_h5(h5file, mode='r'):
# TODO: Allow for h5py.Group also
if isinstance(h5file, h5py.File):
return h5file
return h5py.File(h5file, mode)
def load_csv(csvfile):
# TODO: make the separator more flexible
if isinstance(csvfile, pd.DataFrame):
return csvfile
# TODO: check if it is csv object and convert to a pd dataframe
return pd.read_csv(csvfile, sep=' ', na_values='NONE')
def get_attribute_h5(h5obj, attribut_name, default=None):
val = h5obj.attrs.get(attribut_name, default)
if using_py3 and isinstance(val, bytes):
# There is an but with h5py returning unicode/str based attributes as bytes
val = val.decode()
return val
def check_magic(hdf5_file):
"""Check the magic attribute exists according to the sonata format"""
h5_file_obj = load_h5(hdf5_file)
if MAGIC_ATTR not in h5_file_obj.attrs:
raise Exception('File {} missing top-level \"{}\" attribute.'.format(h5_file_obj.filename, MAGIC_ATTR))
elif np.uint32(get_attribute_h5(hdf5_file, MAGIC_ATTR)) != MAGIC_VAL:
raise Exception('File {} has unexpected magic value (expected {})'.format(h5_file_obj.filename, MAGIC_VAL))
return True
def get_version(hdf5_file):
h5_file_obj = load_h5(hdf5_file)
if VERSION_ATTR not in h5_file_obj.attrs:
return VERSION_NA
else:
version_val = get_attribute_h5(h5_file_obj, VERSION_ATTR)
version_str = str(version_val[0])
for ver_sub in version_val[1:]:
version_str += '.{}'.format(ver_sub)
return version_str
def add_hdf5_magic(hdf5_handle):
hdf5_handle['/'].attrs['magic'] = np.uint32(0x0A7A)
def add_hdf5_version(hdf5_handle):
hdf5_handle['/'].attrs['version'] = [np.uint32(VERSION_MAJOR), np.uint32(VERSION_MINOR)]
def get_node_ids(nodes_path, population):
# Used by PoissonSpikesGenerator
with h5py.File(nodes_path, 'r') as h5:
node_ids = h5['/nodes'][population]['node_id'][()]
return node_ids
if sys.version_info[0] == 3:
using_py3 = True
range_itr = range
else:
using_py3 = False
range_itr = xrange
|
[
"pandas.read_csv",
"h5py.File",
"numpy.uint32"
] |
[((2252, 2275), 'h5py.File', 'h5py.File', (['h5file', 'mode'], {}), '(h5file, mode)\n', (2261, 2275), False, 'import h5py\n'), ((2491, 2538), 'pandas.read_csv', 'pd.read_csv', (['csvfile'], {'sep': '""" """', 'na_values': '"""NONE"""'}), "(csvfile, sep=' ', na_values='NONE')\n", (2502, 2538), True, 'import pandas as pd\n'), ((3772, 3787), 'numpy.uint32', 'np.uint32', (['(2682)'], {}), '(2682)\n', (3781, 3787), True, 'import numpy as np\n'), ((3868, 3892), 'numpy.uint32', 'np.uint32', (['VERSION_MAJOR'], {}), '(VERSION_MAJOR)\n', (3877, 3892), True, 'import numpy as np\n'), ((3894, 3918), 'numpy.uint32', 'np.uint32', (['VERSION_MINOR'], {}), '(VERSION_MINOR)\n', (3903, 3918), True, 'import numpy as np\n'), ((4010, 4036), 'h5py.File', 'h5py.File', (['nodes_path', '"""r"""'], {}), "(nodes_path, 'r')\n", (4019, 4036), False, 'import h5py\n')]
|
from nltk.corpus import stopwords
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem import SnowballStemmer, PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from . import __path__ as ROOT_PATH
from nltk.tokenize import word_tokenize, sent_tokenize
from copy import copy
from glob import glob
import numpy as np
import string
import re
def mytokenizer(text, remove_stops=True, stop_words='nltk', stemming='snowball', drop_punc=True,
drop_digits=True, stem=True, otherpunc='', keep_puncs='', tokensonly=True):
'''
This is a custom tokenizer. We make tokens in this order:
1) Remove URLs (this is unchangeable),
2) Removing punctuation,
3) Lowercase-ing,
4) Removing stopwords (see the list in the data folder), ... It assumes the English language.
5) Applying a stemmer.
* If using Word2Vec, do not remove stopwords. According to a Kaggle site 'To train Word2Vec it is better not to
remove stop words because the algorithm relies on the broader context of the sentence in order to produce
high-quality word vectors'. You can try both -- see what happens.
:param text: str, This is the text string to be cleaned and tokenized.
:param remove_stops: bool, Just decide if you want to remove stop words or not. Default is True
:param stop_words: str, in ('nltk', 'google', 'ranksnl'). The 'nltk' stopwords are the default NLTK ones,
and the others come from www.ranks.nl (google, by way of). The Google list is by far the longest.
:param stemming: str, in ('snowball', 'porter', or 'lancaster')
:param drop_puncnums: bool, Self-explanatory. (drop the punctuation and the numbers)
:param stem: bool, Self-explanatory.
:param keep_puncs: (str), A string of punctuation characters you want to keep in the text.
:return: (tuple), [0] the cleaned text on its own and [1] the tokenized string values. Note, the clean text will
only have cleaned up the number sand punctuation.
'''
# with open(f'{ROOT_PATH[0]}/data/punc.txt', 'r') as f:
# otherpunc = f.readlines()
#
# otherpunc = ''.join(set([x.strip() for x in otherpunc]))
punctuation = otherpunc + string.punctuation
punctuation = ''.join([x for x in punctuation if x not in list(keep_puncs)])
urlpat = '(https?:\/\/)(\s)*(www\.)?(\s)*(\w+\.)*([\w\-\s]+\/)*([\w\-]+)\/?(\??\w+\s*=\w+\&?)*'
# Define substitution methods
clean_text = copy(text)
clean_text = re.sub(urlpat, '', clean_text)
remove_punct = str.maketrans('', '', punctuation)
if drop_punc:
clean_text.replace('--', ' ')
clean_text = clean_text.translate(remove_punct)
if drop_digits:
remove_digit = str.maketrans('', '', string.digits)
clean_text = clean_text.translate(remove_digit)
clean_text = clean_text.lower()
clean_tokens = word_tokenize(clean_text)
if remove_stops:
if stop_words == 'google':
with open(f'{ROOT_PATH[0]}/data/sw_google.txt', 'r') as f:
stop_words = f.readlines()
stop_words = [word.strip() for word in stop_words]
elif stop_words == 'ranksnl':
with open(f'{ROOT_PATH[0]}/data/sw_ranksnl.txt', 'r') as f:
stop_words = f.readlines()
stop_words = [word.strip() for word in stop_words]
else:
stop_words = stopwords.words('english')
# Some of the lists of stopwords haven't removed the punctuation. :| (neutral face)
stop_words = [word.translate(remove_punct) for word in stop_words]
stop_words = set(stop_words)
clean_tokens = [word for word in clean_tokens if word not in stop_words]
stemmer = SnowballStemmer('english', ignore_stopwords=True)
if stemming == 'porter':
stemmer = PorterStemmer()
if stemming == 'lancaster':
stemmer = LancasterStemmer()
if stem:
clean_tokens = [stemmer.stem(y) for y in clean_tokens]
if tokensonly:
return clean_tokens
else:
return clean_text, clean_tokens
def vectorize(X, tokenizer=mytokenizer, use_tfidf=True, Tfidf_args=None, CountV_args=None):
'''
!RETURNS A TUPLE, DUDE!
This is a custom vectorizer which does a TF-IDF transformation by default. It returns a scipy sparse matrix to be
used with sklearn machine learning functions, and the vectorizer that was trained on it (using the parameters
defined).
:param X: array, These are the data to be transformed.
:param tokenizer: func, Tokenizer to use. Defaults to the default of mytokenizer
:param use_tfidf: bool, Run the TF-IDF transformation, or not.
:param Tfidf_args: dict, Parameters and values to pass to the TfidfVectorizer function
:param CountV_args: dict, Parameters and values to pass to the CountVectorizer function
:return: tuple, [0] scipy.sparse.csr.csr_matrix, the transformed matrix in sparse format, [1] The vectorizer.
'''
if not Tfidf_args:
Tfidf_args = dict()
if not CountV_args:
CountV_args = dict()
vec = TfidfVectorizer(tokenizer=tokenizer, **Tfidf_args)
if not use_tfidf:
vec = CountVectorizer(tokenizer=tokenizer, **CountV_args)
X_vec = vec.fit_transform(X)
return X_vec, vec
def doc2sent(X_docs, remove_stops=False, stop_words='nltk', stemming='snowball', drop_puncnums=True, stem=True):
'''
This is mostly to prepare a list of documents for building a Word2Vec model. If you have data where each record
is a piece of text (i.e., a document), use this function to output a new array where each record is a list of
words of a sentence.
:param X_docs: (np.array, pd.DataFrame) This is the original data, with each record a document text.
:param remove_stops: (bool, option) Do you want to remove stop words? In some cases, it may work out best to keep
this as True, but for our purposes, we kept it as False.
:param stop_words: (str, optional) In ('nltk', 'short', 'med', or 'long'). The 'nltk' stopwords are the default
NLTK ones,
and the others come from www.ranks.nl. short/med/long refers to the amount of stopwords in the list.
:param stemming: (str, optional) In ('snowball', 'porter', or 'lancaster')
:param drop_puncnums: (bool, optional) Drop the punctuation and the numbers
:param stem: (bool, optional) Self-explanatory.
:return: (list) A list of all the sentences for each document in tokenized format.
'''
X_sent = X_docs.apply(sent_tokenize)
# Take list of lists, compile the union.
X_sent = [item for sublist in X_sent for item in sublist]
# Word2Vec wants sentences as lists of words
X_sent_tokens = [mytokenizer(text, remove_stops=remove_stops,
stop_words=stop_words, stemming=stemming,
drop_puncnums=drop_puncnums, stem=stem) for text in X_sent]
return X_sent_tokens
def generator(data, lookback, delay, min_index=0, max_index=None, shuffle=False, batch_size=128, step=6):
'''
This is adopted from <NAME> in his guide to Keras.
:param data:
:param lookback:
:param delay:
:param min_index:
:param max_index:
:param shuffle:
:param batch_size:
:param step:
:return:
'''
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows), lookback // step, data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
def sequence(tokens, tokens_unique=None, token_indices=None, maxlen=10, step=1):
'''
This is adopted from the Keras code provided by <NAME> in his guide to Keras. Originally,
it was meant to be character-based, but this is adopted to be either.
:param maxlen: (int) This is the maximum length of the X sequences in 'kind' (e.g., if kind='words',
then this is the maximum value for "pre-sequences" like ['hello', 'im'] --> 'leon' would have maxlen of 2 if
we're using kind='words')
:param step: (int) Overlap. For each of the "pre-sequences", how much should the next overlap with the
previous. In other words, how many of the first words of the previous should *not* be in the next?
:return: None. This sets up x and y for the model to train on
'''
sentences = []
next_tokens = []
maxlen = maxlen
if not token_indices:
token_indices = dict((t, i) for i, t in enumerate(tokens_unique))
# Here we make our sequences which correspond to one another
for i in range(0, len(tokens) - maxlen, step):
sentences.append(tokens[i: i + maxlen])
next_tokens.append(tokens[i + maxlen])
# Basically, we want to have our input data (x) in tensor format:
# sentence_1: [ [token_1_vector] [token_2_vector] ... [token_t_vector] ... [token_k_vector] ]
# sentence_2: [ [token_1_vector] [token_2_vector] ... [token_t_vector] ... [token_k_vector] ]
# ...
# sentence_i: [ [token_1_vector] [token_2_vector] ... [token_t_vector] ... [token_k_vector] ]
# ...
# sentence_n: [ [token_1_vector] [token_2_vector] ... [token_t_vector] ... [token_k_vector] ]
# NOTE: sentence_t is just the sentence that starts with the `step`th token of sentence_t-1 of length k.
# Where each sentence i has k = maxlen tokens, and [token_t_vector] is a one hot vector of dimension
# len(tokens_unique), with a 1 (True) if the token_t is that token, and 0 (False) otherwise. We want
# our data like this because in the end, we want a probability distribution among the possible token values.
x = np.zeros((len(sentences), maxlen, len(tokens_unique)), dtype=np.bool)
y = np.zeros((len(sentences), len(tokens_unique)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, token in enumerate(sentence):
x[i, t, token_indices[token]] = 1
y[i, token_indices[next_tokens[i]]] = 1
return x, y
class IterSentences(object):
def __init__(self, dirname='./blah', min_sent_char=10, maxtexts=None):
'''
This makes an iterator which goes through all of the *.txt files in a directory, removes punctuation (except
for periods, exclamation marks, and question marks), lowercases, and yields a list of the words in each
sentence of each text. For example, if there are two files in a directory, and a subdirectory with another
text file in it, this will yield lists of the words in the first file, then the next file, the next file,
and then the words in the file in the subdirectory. I.e., this function works recursively.
:param dirname: (str) The directory with all the texts in it.
:param min_sent_char: (int) The minimum number of characters in a 'sentence'. At the time of making this,
we have a few 'sentences' which end up being just periods or question marks, because of the way the data was
gathered.
:param maxtexts: (int) The maximum number of texts to go through. By default, this will go through all of the
texts in the directory. It's best only to use this if you're diagnosing.
'''
self.globs = glob(dirname + '/**/*.txt', recursive=True)
if maxtexts:
self.maxtexts = maxtexts
else:
self.maxtexts = len(self.globs)
self.min_sent_char = min_sent_char
def __iter__(self):
for fname in self.globs[:self.maxtexts]:
with open(fname, encoding='utf-8', errors='ignore') as f:
text = f.read()
text_clean, _ = mytokenizer(text, remove_stops=False, stem=False, keep_puncs='!?.', tokensonly=False)
sentences = [sent for sent in sent_tokenize(text_clean) if len(sent) > self.min_sent_char]
for sentence in sentences:
yield [word for word in sentence.split() if word not in ['!', '?', '.']]
|
[
"sklearn.feature_extraction.text.CountVectorizer",
"nltk.stem.PorterStemmer",
"sklearn.feature_extraction.text.TfidfVectorizer",
"nltk.stem.SnowballStemmer",
"copy.copy",
"nltk.stem.lancaster.LancasterStemmer",
"nltk.tokenize.sent_tokenize",
"numpy.random.randint",
"nltk.corpus.stopwords.words",
"glob.glob",
"re.sub",
"nltk.tokenize.word_tokenize"
] |
[((2495, 2505), 'copy.copy', 'copy', (['text'], {}), '(text)\n', (2499, 2505), False, 'from copy import copy\n'), ((2523, 2553), 're.sub', 're.sub', (['urlpat', '""""""', 'clean_text'], {}), "(urlpat, '', clean_text)\n", (2529, 2553), False, 'import re\n'), ((2915, 2940), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['clean_text'], {}), '(clean_text)\n', (2928, 2940), False, 'from nltk.tokenize import word_tokenize, sent_tokenize\n'), ((3768, 3817), 'nltk.stem.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {'ignore_stopwords': '(True)'}), "('english', ignore_stopwords=True)\n", (3783, 3817), False, 'from nltk.stem import SnowballStemmer, PorterStemmer\n'), ((5136, 5186), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'tokenizer': 'tokenizer'}), '(tokenizer=tokenizer, **Tfidf_args)\n', (5151, 5186), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((3866, 3881), 'nltk.stem.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (3879, 3881), False, 'from nltk.stem import SnowballStemmer, PorterStemmer\n'), ((3933, 3951), 'nltk.stem.lancaster.LancasterStemmer', 'LancasterStemmer', ([], {}), '()\n', (3949, 3951), False, 'from nltk.stem.lancaster import LancasterStemmer\n'), ((5224, 5275), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'tokenizer': 'tokenizer'}), '(tokenizer=tokenizer, **CountV_args)\n', (5239, 5275), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((11794, 11837), 'glob.glob', 'glob', (["(dirname + '/**/*.txt')"], {'recursive': '(True)'}), "(dirname + '/**/*.txt', recursive=True)\n", (11798, 11837), False, 'from glob import glob\n'), ((7498, 7565), 'numpy.random.randint', 'np.random.randint', (['(min_index + lookback)', 'max_index'], {'size': 'batch_size'}), '(min_index + lookback, max_index, size=batch_size)\n', (7515, 7565), True, 'import numpy as np\n'), ((3440, 3466), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (3455, 3466), False, 'from nltk.corpus import stopwords\n'), ((12329, 12354), 'nltk.tokenize.sent_tokenize', 'sent_tokenize', (['text_clean'], {}), '(text_clean)\n', (12342, 12354), False, 'from nltk.tokenize import word_tokenize, sent_tokenize\n')]
|
# script for generating user-user projection.
# copied from anthony's notebook mostly, with modifications to split users by timestamp
from snap_import_user_projection import UnimodalUserProjection
from pyspark.sql import SparkSession, functions as F
spark = SparkSession.builder.getOrCreate()
input_path = "src/data/processed/enwiki-meta-compact"
# use this as a smaller sample to test
#input_path = "src/data/processed/enwiki-meta-compact/part-00000-b9d9476b-cc88-44c4-8b82-f39efb715f54-c000.snappy.parquet"
model = UnimodalUserProjection(spark).extract(input_path).transform()
# modify to append quarter and last digit of year to user id
# note that conversion process is quite slow if full year is included (100x more nodes to not relabel)
block_list = spark.sql("""
with block_list as (
select
article_id,
concat(year(edit_date), '-', quarter(edit_date)) as edit_date,
collect_set(cast(concat(quarter(edit_date), mod(year(edit_date),10), user_id) AS bigint)) as user_set
from bipartite
group by 1,2
)
select
article_id,
edit_date,
size(user_set) as n_users,
user_set
from block_list
""")
block_list.cache()
# calculate markov bounds for cliques of size 1-n based on variables
from scipy.optimize import fsolve
import numpy as np
from pyspark.sql import types as T
from itertools import combinations
from random import random
n = 1732
epsilon = 0.01
# loop over all n using previous value as seed
any_bound = {}
all_bound = {}
p_one = 1
p_all = 1
for k in range(2,n+1):
func_one = lambda p: ((1-p) ** (k-1)) / epsilon - 1
func_any = lambda p: (1 - ((1- ((1-p) ** (k-1))) ** k)) / epsilon - 1
p_one = fsolve(func_one,p_one)[0]
p_all = fsolve(func_any,p_all)[0]
any_bound[k] = p_one
all_bound[k] = p_all
@F.udf(T.ArrayType(T.ArrayType(T.LongType())))
def all_edges(user_set):
return list(combinations(sorted(user_set), 2))
#block_list.selectExpr("n_users*(n_users-1)/2 as n_edges").selectExpr("sum(n_edges)").show()
bounds = any_bound
# returns n=ceil(k(k-1)/2 * p) edges
def get_n_edges(user_set, k, p):
edge_set = set()
edge_list = []
n = int(np.ceil(k*(k-1)/2 * p))
while len(edge_set) < n:
edge = np.sort(np.random.choice(k,2,replace=False))
es = str(edge)
if es not in edge_set:
edge_set.add(es)
edge_list.append(edge)
return np.array(edge_list)
@F.udf(T.ArrayType(T.ArrayType(T.LongType())))
def sample_edges(user_set):
k = len(user_set)
if k < 2:
return []
p = bounds[k]
return [c for c in combinations(sorted(user_set), 2) if random() < p]
edgelist = (
block_list
.select(F.explode(sample_edges("user_set")).alias("edges"))
.select(F.col("edges").getItem(0).alias("e1"), F.col("edges").getItem(1).alias("e2"))
.groupby("e1", "e2")
.agg(F.expr("count(*) as weight"))
)
edgelist.write.option("sep"," ").csv("src/data/processed/user-network-full")
|
[
"numpy.ceil",
"pyspark.sql.functions.expr",
"pyspark.sql.SparkSession.builder.getOrCreate",
"scipy.optimize.fsolve",
"pyspark.sql.types.LongType",
"random.random",
"numpy.array",
"snap_import_user_projection.UnimodalUserProjection",
"pyspark.sql.functions.col",
"numpy.random.choice"
] |
[((259, 293), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ([], {}), '()\n', (291, 293), False, 'from pyspark.sql import SparkSession, functions as F\n'), ((2391, 2410), 'numpy.array', 'np.array', (['edge_list'], {}), '(edge_list)\n', (2399, 2410), True, 'import numpy as np\n'), ((2850, 2878), 'pyspark.sql.functions.expr', 'F.expr', (['"""count(*) as weight"""'], {}), "('count(*) as weight')\n", (2856, 2878), True, 'from pyspark.sql import SparkSession, functions as F\n'), ((1674, 1697), 'scipy.optimize.fsolve', 'fsolve', (['func_one', 'p_one'], {}), '(func_one, p_one)\n', (1680, 1697), False, 'from scipy.optimize import fsolve\n'), ((1712, 1735), 'scipy.optimize.fsolve', 'fsolve', (['func_any', 'p_all'], {}), '(func_any, p_all)\n', (1718, 1735), False, 'from scipy.optimize import fsolve\n'), ((2149, 2177), 'numpy.ceil', 'np.ceil', (['(k * (k - 1) / 2 * p)'], {}), '(k * (k - 1) / 2 * p)\n', (2156, 2177), True, 'import numpy as np\n'), ((1820, 1832), 'pyspark.sql.types.LongType', 'T.LongType', ([], {}), '()\n', (1830, 1832), True, 'from pyspark.sql import types as T\n'), ((2225, 2262), 'numpy.random.choice', 'np.random.choice', (['k', '(2)'], {'replace': '(False)'}), '(k, 2, replace=False)\n', (2241, 2262), True, 'import numpy as np\n'), ((2443, 2455), 'pyspark.sql.types.LongType', 'T.LongType', ([], {}), '()\n', (2453, 2455), True, 'from pyspark.sql import types as T\n'), ((519, 548), 'snap_import_user_projection.UnimodalUserProjection', 'UnimodalUserProjection', (['spark'], {}), '(spark)\n', (541, 548), False, 'from snap_import_user_projection import UnimodalUserProjection\n'), ((2619, 2627), 'random.random', 'random', ([], {}), '()\n', (2625, 2627), False, 'from random import random\n'), ((2738, 2752), 'pyspark.sql.functions.col', 'F.col', (['"""edges"""'], {}), "('edges')\n", (2743, 2752), True, 'from pyspark.sql import SparkSession, functions as F\n'), ((2777, 2791), 'pyspark.sql.functions.col', 'F.col', (['"""edges"""'], {}), "('edges')\n", (2782, 2791), True, 'from pyspark.sql import SparkSession, functions as F\n')]
|
"""
Class for walker with approximate average-patterns, in particular the
approximate MFPT from root to target.
"""
import pattern_walker as rw
import numpy as np
import networkx as nx
__all__ = [
'MF_patternWalker', 'overlap_MF_patternWalker'
]
class MF_patternWalker(rw.fullProbPatternWalker):
"""
Have mean-field computation for pattern_walker all in one place. args and
kwargs are the relevant parameters for the patterwalker.
"""
def __init__(self,c=4,h=3,*args,**params):
self.c=c
self.h=h
super(MF_patternWalker,self).__init__(*args,**params)
self.set_weights()
self.set_coordinates()
def Q_power(self,k,aj=None):
if aj is None:
aj=self.a_high
Gamma=self.Gamma
if aj>0:
out= np.array(
[[1-aj*(1-(1-Gamma)**k),aj*(1-(1-Gamma)**k)],
[(1-aj)*(1-(1-Gamma)**k),1-(1-aj)*(1-(1-Gamma)**k)]]
)
elif aj==0:
out=np.linalg.matrix_power(np.array( [[1,0],[Gamma,1-Gamma]] ),k)
return out
def R_up(self,aj=None,a=None,Gammap=None):
if aj is None:
ajl=self.a_high
if a is None:
a=self.a_root
if Gammap is None:
Gammap=self.Gamma_root
if aj>0 and aj<1:
out= np.array(
[[(1-a)*(1-Gammap)/(1-aj),1-(1-a)*(1-Gammap)/(1-aj)],
[(1-a)/aj*Gammap,1-(1-a)/aj*Gammap]]
)
elif aj==0:
out=np.array(
[[(1-a)*(1-Gammap),1-(1-a)*(1-Gammap)],
[0.,1.]]
)
elif aj==1:
out=np.array(
[[0.,1.],
[0.,1.]]
)
return out
def R_down(self,aj=None,a=None,Gammap=None):
if aj is None:
aj=self.a_high
if a is None:
a=self.a_root
if Gammap is None:
Gammap=self.Gamma_root
if a>0:
return np.array(
[[1-Gammap,Gammap],
[1-(aj-(1-a)*Gammap)/a,(aj-(1-a)*Gammap)/a]]
)
elif a==0:
out=np.array(
[[1-Gammap,Gammap],
[0.,1.]]
)
return out
#for full overlap, the pattern distance rates at graph distance k from the target. Cases separated depending
#whether the root needs to be involved
#k includes the upward root link if applicable, m includes the downwards root link, if applicable
#so the sum of k and m has to be the graph distance
def f(self,k,up=0,down=0,m=0,mu=0,ajl=None,ajr=None):
if ajl is None:
ajl=self.a_high
if ajr is None:
ajr=self.a_high
a=self.a_root
Gamma=self.Gamma
Gammap=self.Gamma_root
out=0.
if k>0 and up+down+m==0:
# NOTE: seems correct
out=2*ajl*(1-ajl)*(1-(1-Gamma)**k)
#out=self.Q_power(k,aj=ajl)
#out = ajl*out[1,0]+(1-ajl)*out[0,1]
elif up==1 and down+m==0:
# NOTE: seems okay
if a:
out=a+ajl-2*a*ajl+2*(1-a)*(1-Gamma)**k*(Gammap-ajl)
else:
out=Gammap
# out=self.Q_power(k,aj=ajl).dot(self.R_up(aj=ajl))
# out = ajl*out[1,0]+(1-ajl)*out[0,1]
elif up==down==1:
# NOTE: seems correct
#out=2/a*(1-Gamma)**(k+m)*((1-a)*(Gammap*(ajl+ajr)-Gammap**2)-ajl*ajr)+ajl+ajr+2*ajl*ajr*(1-(1-Gamma)**(k+m))
if a:
out=2/a*(1-Gamma)**(k+m)*((1-a)*Gammap*(ajl+ajr-Gammap)-ajl*ajr)+ajl+ajr-2*ajl*ajr*(1-(1-Gamma)**(k+m))
else:
# out=(1-Gammap)*(2*Gammap+a*(1-Gamma)**m*(1-2*Gammap))
out=2*(1-Gammap)*(Gammap-a*(1-Gamma)**m)+a*(1-Gammap)*(1-Gamma)**m
# out=self.Q_power(k,aj=ajl).dot(self.R_up(aj=ajl)).dot(self.R_down(aj=ajr)).dot(self.Q_power(m,aj=ajr))
# out = ajl*out[1,0]+(1-ajl)*out[0,1]
elif up==0 and down==1:
#out=a+ajr-2*a*ajr+2*(1-a)*(1-Gamma)**m*(Gammap-ajr)
# out=self.R_down(aj=ajr).dot(self.Q_power(m,aj=ajr))
# out = a*out[1,0]+(1-a)*out[0,1]
if a:
# NOTE: seems alright (speccial case to be tested)
out=2*a*(1-a)*(1-(1-Gamma)**m)+2*(1-a)*(1-Gamma)**m*Gammap
else:
out=Gammap
elif down==0:
# NOTE: should be alright
# out=self.Q_power(m,aj=ajr)
# out = ajr*out[1,0]+(1-ajr)*out[0,1]
out=2*ajr*(1-ajr)*(1-(1-Gamma)**m)
return out
def epsilon(self,f2,fk,fk2):
# NOTE: in the notation of the paper this is epsilon-1, to avoid
# too many distinct cases
L=self.pattern_len
out=0.
if fk==0:
if f2==0:
out=0.
else:
out=f2*self.pattern_len
else:
# f2=f2/fk2
dk1_inv_exp=(1-(1-fk)**(self.pattern_len+1))/((self.pattern_len+1)*fk)
out=-1+(1+L*f2*fk2/(1-fk)-fk2*(1-fk-f2)/(fk*(1-fk)))*dk1_inv_exp+fk2*(1-fk-f2)/(fk*(1-fk))
return out
def root_cluster_eq_ratio(self):
#eq prob of cluster divided by eq prob of articulation pt, here the root itself
bias_list=[
1+self.epsilon(
self.f(0,1,1,0),\
self.f(self.h-1,0,0,0),\
self.f(self.h-1,1,1,0)
),\
1+self.epsilon(
self.f(0,0,1,1),\
self.f(self.h-1,1,0,0),\
self.f(self.h-1,1,1,1))
]+[
1+self.epsilon(
self.f(0,0,0,2),\
self.f(self.h-1,1,1,m),\
self.f(self.h-1,1,1,m+2) )
for m in range(self.h-1)
]
out=1+(self.c-1)/(self.c-1+bias_list[0])*\
(
np.sum([
self.c**l*(self.c+bias_list[l+1])/\
np.prod( [bias_list[k+1] for k in range(l+1)])
for l in range(self.h-1)
])+\
self.c**(self.h-1)/np.prod( [bias_list[l+1] for l in range(self.h-1)])
)
return out
def sub_root_cluster_eq_ratio(self):
#eq prob of cluster divided by eq prob of articulation pt, here the node under the root
#just under the root things are a bit messy, hence the following epsilons
e_r=1+self.epsilon(self.f(2,0,0,0),self.f(self.h-2,0,0,0),self.f(self.h,0,0,0))
e_u=1+self.epsilon(self.f(1,1,0,0),self.f(self.h-2,0,0,0),self.f(self.h-1,1,0,0))
bias_list=[ e_r,e_u ]+[ 1+self.epsilon( self.f(2,0,0,0),self.f(self.h-1+l,0,0,0),self.f(self.h+1+l,0,0,0) ) for l in range(self.h-2) ]
out=1+(self.c-1)*e_u/( (self.c-1)*e_u+e_r+e_r*e_u )*\
(
np.sum([
self.c**l*(self.c+bias_list[l+2])/np.prod( [bias_list[m+2] for m in range(l+1)])
for l in range(self.h-2)
])+\
self.c**(self.h-2)/np.prod( [bias_list[l+2] for l in range(self.h-2)])
)
return out
def eq_ratio(self,k):
#eq prob of cluster divided by eq prob of articulation pt at height k over target
if k==0:
return 1.
else:
bias_list=[ 1+self.epsilon( self.f(2,0,0,0),self.f(k-1+l,0,0,0),self.f(k+1+l,0,0,0) ) for l in range(k) ]
out=1+ (self.c-1)/(self.c+bias_list[0])*\
(
np.sum([
self.c**l*(self.c+bias_list[l+1])/np.prod( [ bias_list[m+1] for m in range(l+1)])
for l in range(k-1)
])+\
self.c**(k-1)/np.prod( [ bias_list[l+1] for l in range(k-1)])
)
return out
def MF_mfpt(self,ajl=None,ajr=None,a=None,Gamma=None,Gammap=None,**kwargs):
#just under the root things are a bit messy, hence the following epsilons
e_r=1+self.epsilon(self.f(2,0,0,0),self.f(self.h-2,0,0,0),self.f(self.h,0,0,0))
e_u=1+self.epsilon(self.f(1,1,0,0),self.f(self.h-2,0,0,0),self.f(self.h-1,1,0,0))
cord_weight_list =\
[
1+self.epsilon(
self.f(0,1,1,0),\
self.f(self.h-1,0,0,0),\
self.f(self.h-1,1,1,0)
)
]+\
[ e_u ]+\
[
1+self.epsilon(
self.f(2,0,0,0),\
self.f(self.h-k-1,0,0,0),\
self.f(self.h-k+1,0,0,0)
)
for k in range(2,self.h+1)
]
numerators=[ self.c-1+cord_weight_list[0] ]+[(e_u*(self.c-1)+e_r+e_r*e_u)/e_r]+[self.c+cord_weight_list[k] for k in range(2,self.h+1)]
eq_ratio_list = [self.root_cluster_eq_ratio(), self.sub_root_cluster_eq_ratio()]+[ self.eq_ratio(self.h-k) for k in range(2,self.h+1) ]
out = np.sum(np.sum( [[eq_ratio_list[k]*numerators[k]/np.prod(cord_weight_list[k:i]) for k in range(i)] for i in range(self.h+1)] ))
return out
def MTM(self, number_samples: int,nodelist: list=None) ->np.array:
#return the average transition matrix, sampled over number_samples interations.
W=np.zeros( (len(self),len(self)) )
if nodelist is None:
nodelist=[self.root]+list( self.nodes -set([self.root,self.target_node]) )+[self.target_node]
for _ in range(number_samples):
self.reset_patterns()
W_temp=nx.to_numpy_array(self,nodelist=nodelist)
if (np.sum(W_temp,axis=-1)!=1).any:
W_temp=np.diag(1/np.sum(W_temp,axis=-1)).dot(W_temp)
W+=W_temp
W/=number_samples
return W
def MTM_mfpt(self, number_samples: int=0, nodelist: list=None) -> np.float():
"""
Calculates MFPT based on mean transition matrix, MTM. If number_samples=0,
the approximate function approx_MTM is used. Else we sample the MTM.
"""
out=0
if nodelist is None:
nodelist=[self.root]+list( self.nodes -set([self.root,self.target_node]) )+[self.target_node]
if number_samples:
out=self.MTM(number_samples,nodelist=nodelist)
else:
_,out=self.approx_MTM(nodelist=nodelist)
out = np.sum( np.linalg.inv( np.eye(len(self)-1) - out[:-1,:-1] ),axis=-1 )[0]
return out
def approx_MTM(self,nodelist=None):
if nodelist is None:
nodelist=[self.root]+list( self.nodes -set([self.root,self.target_node]) )+[self.target_node]
out=nx.DiGraph()
out.add_nodes_from(self.nodes)
for node in self.nodes:
weights=self.mean_out_weights(node)
out.add_weighted_edges_from(weights)
return out,nx.to_numpy_array(out,nodelist=nodelist)
def mean_out_weights(self,node):
neigh=list(self.neighbors(node))
out=[]
try:
toward_target=nx.shortest_path(self,node,self.target_node)[1]
except IndexError:
pass
if len(neigh)== 1:
out=[ (node,*neigh,1.) ]
elif node==self.root:
#the mu's aren't strictly required here, but we need them for
#the overlap case
weights=[(1+\
self.epsilon(
self.f(0,1,1,0,mu),\
self.f(self.h-1,0,0,0,mu),\
self.f(self.h-1,1,1,0,mu))
)
for mu in range(2,self.c+1)
]
e_0=np.prod(weights)
normalisation=1/(e_0+\
np.sum([ e_0/weight for weight in weights ])
)
#counter-clockwise. First towards target, then the other parts in order
out.append((node,toward_target,e_0*normalisation))
for neighbor in set(neigh)-set([toward_target]):
part=self.nodes[neighbor]['coordinates'][4]
out.append((node,neighbor,e_0*normalisation/weights[part-2]))
elif self.root in neigh and self.nodes[node]['coordinates'][2]==0:
e_r=1+self.epsilon(self.f(2,0,0,0),self.f(self.h-2,0,0,0),self.f(self.h,0,0,0))
e_u=1+self.epsilon(self.f(1,1,0,0),self.f(self.h-2,0,0,0),self.f(self.h-1,1,0,0))
normalisation=1/(e_u*(self.c-1)+e_r+e_r*e_u)
for neighbor in neigh:
#counter-clockwise, starting from the target, then other children,
#finally root
if neighbor ==toward_target:
out.append( (node,neighbor,e_u*e_r*normalisation ) )
elif not neighbor==self.root:
out.append( (node,neighbor,e_u*normalisation ) )
else:
out.append( (node,neighbor,e_r*normalisation ) )
elif self.root in neigh:
coordinates=list(self.nodes[node]['coordinates'])
coordinates[2]=0
e=1+self.epsilon(self.f(0,0,1,1,coordinates[4]),self.f(self.h-1,1,0,0,coordinates[4]),self.f(self.h-1,1,1,1,coordinates[4]))
for neighbor in neigh:
#counter-clockwise. first root (=targetwards), then children
if neighbor==self.root:
out.append( (node,neighbor, e/(self.c+e) ) )
else:
out.append( ( node,neighbor,1/(self.c+e) ) )
else:
coordinates=list(self.nodes[node]['coordinates'])
short_path=[2,0,0,0]
if coordinates[3]>0:
coordinates[3]-=1
short_path=[0,0,0,2]
else:
coordinates[0]-=1
e=1+self.epsilon(self.f(*short_path,coordinates[-1]),self.f( *coordinates ),self.f( *[coordinates[i]+short_path[i] for i in range(4)] ))
# TODO: following line with tightness 0/1 suitable for unit test
#print('e:',e,self.nodes[node]['coordinates'],self.f(*coordinates))
for neighbor in neigh:
#counter-clockwise. first targetwards, then children
if neighbor==toward_target:
out.append(( node,neighbor,e/(self.c+e)))
else:
out.append(( node,neighbor,1/(self.c+e) ))
return out
def diffusive_mfpt(self):
#Return MFPT for diffusive walker on the same graph
h=self.h
c=self.c
return h*(2*c**(h+1)/(c-1)-1)-2*c*(c**h-1)/(c-1)**2
class overlap_MF_patternWalker(MF_patternWalker):
#does all of the above with the correct parameters as given by G
def __init__(self,c=4,h=3,*args,**params):
self.c=c
self.h=h
super(overlap_MF_patternWalker,self).__init__(c,h,*args,**params)
if self.overlap>self.pattern_len*(self.c-1)/(2*self.c):
self.overlap=(self.pattern_len-int(self.pattern_len/self.c))/2
self.O_list=np.array([
max(0,int(self.pattern_len/self.c)*(2-i)+2*self.overlap)+\
max(0,i*int(self.pattern_len/self.c)-self.pattern_len+2*self.overlap)
for i in range(2,c+1)])
self.U_list=np.array([
max(0,-int(self.pattern_len/self.c)*(2-i)-2*self.overlap)+\
max(0,-i*int(self.pattern_len/self.c)+self.pattern_len-2*self.overlap)
for i in range(2,c+1)])
self.O_hh=np.sum(self.O_list)/(self.pattern_len*(self.c-1))
self.O_ll=np.sum(self.U_list)/(self.pattern_len*(self.c-1))
self.O_hl=(1-self.O_hh-self.O_ll)/2
self.O_lh=self.O_hl
def f(self,k,up,down,m,mu=2,**kwargs):
a_h=self.a_high
a_l=self.a_low
a=self.a_root
Gamma=self.Gamma
Gammap=self.Gamma_root
out=0.
coordinates=(k,up,down,m)
if up==0 and down+m==0:
#target branch
f_h=MF_patternWalker.f(self,*coordinates,ajl=a_h,ajr=a_h)
f_l=MF_patternWalker.f(self,*coordinates,ajl=a_l,ajr=a_l)
out=f_h*(self.pattern_len/self.c+2*self.overlap)/self.pattern_len+\
f_l*(
self.pattern_len-self.pattern_len/self.c-2*self.overlap
)/self.pattern_len
elif up==1 and down==0:
#target branch up to root
f_h=MF_patternWalker.f(self,*coordinates,ajl=a_h,ajr=a)
f_l=MF_patternWalker.f(self,*coordinates,ajl=a_l,ajr=a)
out=f_h*(self.pattern_len/self.c+2*self.overlap)/self.pattern_len+\
f_l*(
self.pattern_len-self.pattern_len/self.c-2*self.overlap
)/self.pattern_len
elif up==0 and down==1:
#root down to non-target branch
f_h=MF_patternWalker.f(self,*coordinates,ajl=a,ajr=a_h)
f_l=MF_patternWalker.f(self,*coordinates,ajl=a,ajr=a_l)
out=f_h*(self.pattern_len/self.c+2*self.overlap)/self.pattern_len+\
f_l*(
self.pattern_len-self.pattern_len/self.c-2*self.overlap
)/self.pattern_len
elif up+k==0 and down==0:
#non-targget branch
f_h=MF_patternWalker.f(self,*coordinates,ajl=a_h,ajr=a_h)
f_l=MF_patternWalker.f(self,*coordinates,ajl=a_l,ajr=a_l)
out=f_h*(self.pattern_len/self.c+2*self.overlap)/self.pattern_len+\
f_l*(
self.pattern_len-self.pattern_len/self.c-2*self.overlap
)/self.pattern_len
else:
#from target branch over root to non-target branch
f_hh=MF_patternWalker.f(self,*coordinates,ajl=a_h,ajr=a_h)
f_hl=MF_patternWalker.f(self,*coordinates,ajl=a_h,ajr=a_l)
f_lh=MF_patternWalker.f(self,*coordinates,ajl=a_l,ajr=a_h)
f_ll=MF_patternWalker.f(self,*coordinates,ajl=a_l,ajr=a_l)
if mu==0:
#is this the case where we don't care?
out=self.O_hh*f_hh+self.O_lh*f_lh+self.O_hl*f_hl+self.O_ll*f_ll
else:
out=self.O_list[mu-2]/self.pattern_len*f_hh+\
(self.pattern_len-self.U_list[mu-2]-self.O_list[mu-2])/self.pattern_len*\
(f_hl+f_lh)/2+self.U_list[mu-2]/self.pattern_len*f_ll
return out
def root_cluster_eq_ratio(self):
#eq prob of cluster divided by eq prob of articulation pt, here the root itself
# NOTE: This could be done using the mean weight function directly,
#but doing it this way, we see if the cancellations in our formula are right
branch_weights=[(1+\
self.epsilon(
self.f(0,1,1,0,mu),\
self.f(self.h-1,0,0,0,mu),\
self.f(self.h-1,1,1,0,mu))
)
for mu in range(2,self.c+1)
]
e_0=np.prod(branch_weights)
e_r_list=[e_0/weight for weight in branch_weights]
branch_normalisation=1/(e_0+np.sum(e_r_list))
bias_dict={
mu: [e_0,e_r_list[mu-2], 1+self.epsilon(self.f(0,0,1,1,mu),\
self.f(self.h-1,1,0,0,mu),self.f(self.h-1,1,1,1,mu))]\
+[
1+self.epsilon(
self.f(0,0,0,2,mu),\
self.f(self.h-1,1,1,l,mu),\
self.f(self.h-1,1,1,l+2,mu)
)
for l in range(self.h-2)
]
for mu in range(2,self.c+1)
}
out=1+np.sum([branch_normalisation*bias_dict[mu][1]*\
(
np.sum([
self.c**l*(self.c+bias_dict[mu][l+2])/\
np.prod([bias_dict[mu][k+2] for k in range(l+1)])
for l in range(self.h-1)
])+\
self.c**(self.h-1)/\
np.prod([bias_dict[mu][l+2] for l in range(self.h-1)])
)
for mu in range(2,self.c+1)
])
return out
def MF_mfpt(self,ajl=None,ajr=None,a=None,Gamma=None,Gammap=None,**kwargs):
#just under the root things are a bit messy, hence the following epsilons
branch_weights=[(1+\
self.epsilon(
self.f(0,1,1,0,mu),\
self.f(self.h-1,0,0,0,mu),\
self.f(self.h-1,1,1,0,mu))
)
for mu in range(2,self.c+1)
]
e_root=np.prod(branch_weights)
e_root_norm=e_root+np.sum([
e_root/weight for weight in branch_weights])
e_r=1+self.epsilon(
self.f(2,0,0,0),\
self.f(self.h-2,0,0,0),\
self.f(self.h,0,0,0)
)
e_u=1+self.epsilon(
self.f(1,1,0,0),\
self.f(self.h-2,0,0,0),\
self.f(self.h-1,1,0,0)
)
cord_weight_list = \
[ e_root ]+\
[ e_u ]+\
[
(1+self.epsilon(
self.f(2,0,0,0),\
self.f(self.h-k-1,0,0,0),\
self.f(self.h-k+1,0,0,0))
)
for k in range(2,self.h)
]
numerators=[e_root_norm]+[((self.c-1)*e_u+e_r*e_u+e_r)/e_r]+[self.c+cord_weight_list[k] for k in range(2,self.h)]
eq_ratio_list = [ self.root_cluster_eq_ratio() ] + [self.sub_root_cluster_eq_ratio()] +[ self.eq_ratio(self.h-k) for k in range(2,self.h) ]
out = np.sum(np.sum( [[eq_ratio_list[k]*numerators[k]/np.prod(cord_weight_list[k:i]) for k in range(i)] for i in range(1,self.h+1)] ))
return out
|
[
"numpy.sum",
"numpy.float",
"networkx.shortest_path",
"numpy.array",
"networkx.to_numpy_array",
"networkx.DiGraph",
"numpy.prod"
] |
[((10013, 10023), 'numpy.float', 'np.float', ([], {}), '()\n', (10021, 10023), True, 'import numpy as np\n'), ((10809, 10821), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (10819, 10821), True, 'import networkx as nx\n'), ((18976, 18999), 'numpy.prod', 'np.prod', (['branch_weights'], {}), '(branch_weights)\n', (18983, 18999), True, 'import numpy as np\n'), ((20545, 20568), 'numpy.prod', 'np.prod', (['branch_weights'], {}), '(branch_weights)\n', (20552, 20568), True, 'import numpy as np\n'), ((808, 963), 'numpy.array', 'np.array', (['[[1 - aj * (1 - (1 - Gamma) ** k), aj * (1 - (1 - Gamma) ** k)], [(1 - aj) *\n (1 - (1 - Gamma) ** k), 1 - (1 - aj) * (1 - (1 - Gamma) ** k)]]'], {}), '([[1 - aj * (1 - (1 - Gamma) ** k), aj * (1 - (1 - Gamma) ** k)], [\n (1 - aj) * (1 - (1 - Gamma) ** k), 1 - (1 - aj) * (1 - (1 - Gamma) ** k)]])\n', (816, 963), True, 'import numpy as np\n'), ((1358, 1500), 'numpy.array', 'np.array', (['[[(1 - a) * (1 - Gammap) / (1 - aj), 1 - (1 - a) * (1 - Gammap) / (1 - aj)],\n [(1 - a) / aj * Gammap, 1 - (1 - a) / aj * Gammap]]'], {}), '([[(1 - a) * (1 - Gammap) / (1 - aj), 1 - (1 - a) * (1 - Gammap) /\n (1 - aj)], [(1 - a) / aj * Gammap, 1 - (1 - a) / aj * Gammap]])\n', (1366, 1500), True, 'import numpy as np\n'), ((2073, 2173), 'numpy.array', 'np.array', (['[[1 - Gammap, Gammap], [1 - (aj - (1 - a) * Gammap) / a, (aj - (1 - a) *\n Gammap) / a]]'], {}), '([[1 - Gammap, Gammap], [1 - (aj - (1 - a) * Gammap) / a, (aj - (1 -\n a) * Gammap) / a]])\n', (2081, 2173), True, 'import numpy as np\n'), ((9718, 9760), 'networkx.to_numpy_array', 'nx.to_numpy_array', (['self'], {'nodelist': 'nodelist'}), '(self, nodelist=nodelist)\n', (9735, 9760), True, 'import networkx as nx\n'), ((11009, 11050), 'networkx.to_numpy_array', 'nx.to_numpy_array', (['out'], {'nodelist': 'nodelist'}), '(out, nodelist=nodelist)\n', (11026, 11050), True, 'import networkx as nx\n'), ((15549, 15568), 'numpy.sum', 'np.sum', (['self.O_list'], {}), '(self.O_list)\n', (15555, 15568), True, 'import numpy as np\n'), ((15617, 15636), 'numpy.sum', 'np.sum', (['self.U_list'], {}), '(self.U_list)\n', (15623, 15636), True, 'import numpy as np\n'), ((20597, 20653), 'numpy.sum', 'np.sum', (['[(e_root / weight) for weight in branch_weights]'], {}), '([(e_root / weight) for weight in branch_weights])\n', (20603, 20653), True, 'import numpy as np\n'), ((1558, 1634), 'numpy.array', 'np.array', (['[[(1 - a) * (1 - Gammap), 1 - (1 - a) * (1 - Gammap)], [0.0, 1.0]]'], {}), '([[(1 - a) * (1 - Gammap), 1 - (1 - a) * (1 - Gammap)], [0.0, 1.0]])\n', (1566, 1634), True, 'import numpy as np\n'), ((2246, 2290), 'numpy.array', 'np.array', (['[[1 - Gammap, Gammap], [0.0, 1.0]]'], {}), '([[1 - Gammap, Gammap], [0.0, 1.0]])\n', (2254, 2290), True, 'import numpy as np\n'), ((11183, 11229), 'networkx.shortest_path', 'nx.shortest_path', (['self', 'node', 'self.target_node'], {}), '(self, node, self.target_node)\n', (11199, 11229), True, 'import networkx as nx\n'), ((11763, 11779), 'numpy.prod', 'np.prod', (['weights'], {}), '(weights)\n', (11770, 11779), True, 'import numpy as np\n'), ((19095, 19111), 'numpy.sum', 'np.sum', (['e_r_list'], {}), '(e_r_list)\n', (19101, 19111), True, 'import numpy as np\n'), ((1047, 1085), 'numpy.array', 'np.array', (['[[1, 0], [Gamma, 1 - Gamma]]'], {}), '([[1, 0], [Gamma, 1 - Gamma]])\n', (1055, 1085), True, 'import numpy as np\n'), ((1716, 1750), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0.0, 1.0]]'], {}), '([[0.0, 1.0], [0.0, 1.0]])\n', (1724, 1750), True, 'import numpy as np\n'), ((9776, 9799), 'numpy.sum', 'np.sum', (['W_temp'], {'axis': '(-1)'}), '(W_temp, axis=-1)\n', (9782, 9799), True, 'import numpy as np\n'), ((11831, 11877), 'numpy.sum', 'np.sum', (['[(e_0 / weight) for weight in weights]'], {}), '([(e_0 / weight) for weight in weights])\n', (11837, 11877), True, 'import numpy as np\n'), ((9186, 9216), 'numpy.prod', 'np.prod', (['cord_weight_list[k:i]'], {}), '(cord_weight_list[k:i])\n', (9193, 9216), True, 'import numpy as np\n'), ((21629, 21659), 'numpy.prod', 'np.prod', (['cord_weight_list[k:i]'], {}), '(cord_weight_list[k:i])\n', (21636, 21659), True, 'import numpy as np\n'), ((9841, 9864), 'numpy.sum', 'np.sum', (['W_temp'], {'axis': '(-1)'}), '(W_temp, axis=-1)\n', (9847, 9864), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
X = np.linspace(-np.pi, np.pi, 256)
C = np.cos(X)
S = np.sin(X)
plt.plot(X, C)
plt.plot(X, S)
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.sin",
"numpy.cos",
"numpy.linspace"
] |
[((59, 90), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(256)'], {}), '(-np.pi, np.pi, 256)\n', (70, 90), True, 'import numpy as np\n'), ((96, 105), 'numpy.cos', 'np.cos', (['X'], {}), '(X)\n', (102, 105), True, 'import numpy as np\n'), ((111, 120), 'numpy.sin', 'np.sin', (['X'], {}), '(X)\n', (117, 120), True, 'import numpy as np\n'), ((124, 138), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'C'], {}), '(X, C)\n', (132, 138), True, 'import matplotlib.pyplot as plt\n'), ((140, 154), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'S'], {}), '(X, S)\n', (148, 154), True, 'import matplotlib.pyplot as plt\n'), ((158, 168), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (166, 168), True, 'import matplotlib.pyplot as plt\n')]
|
'''
Author: jianzhnie
Date: 2022-01-19 17:15:05
LastEditTime: 2022-03-04 18:30:51
LastEditors: jianzhnie
Description:
'''
import json
import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm.auto import tqdm
from nlptoolkit.data.utils.utils import PAD_TOKEN, get_loader
from nlptoolkit.data.vocab import save_vocab
from nlptoolkit.datasets.elmodataset import BiLMDataset, load_corpus
from nlptoolkit.models.elmo.elmo_model import BiLM
sys.path.append('../../')
configs = {
'max_tok_len':
50,
'train_file':
'/home/robin/jianzh/nlp-toolkit/examples/data/fra-eng/english.txt', # path to your training file, line-by-line and tokenized
'model_path':
'/home/robin/jianzh/nlp-toolkit/work_dir/elmo',
'char_embedding_dim':
50,
'char_conv_filters': [[1, 32], [2, 32], [3, 32], [4, 32], [5, 32], [6, 32],
[7, 32]],
'num_highways':
2,
'projection_dim':
512,
'hidden_dim':
1024,
'num_layers':
2,
'batch_size':
32,
'dropout_prob':
0.1,
'learning_rate':
0.0004,
'clip_grad':
5,
'num_epoch':
10
}
if __name__ == '__main__':
corpus_w, corpus_c, vocab_w, vocab_c = load_corpus(configs['train_file'])
train_data = BiLMDataset(corpus_w, corpus_c, vocab_w, vocab_c)
train_loader = get_loader(train_data, configs['batch_size'])
criterion = nn.CrossEntropyLoss(ignore_index=vocab_w[PAD_TOKEN],
reduction='sum')
print('Building BiLM model')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = BiLM(configs, vocab_w, vocab_c)
model.to(device)
optimizer = optim.Adam(filter(lambda x: x.requires_grad,
model.parameters()),
lr=configs['learning_rate'])
model.train()
for epoch in range(configs['num_epoch']):
total_loss = 0
total_tags = 0 # number of valid predictions
for batch in tqdm(train_loader, desc=f'Training Epoch {epoch}'):
batch = [x.to(device) for x in batch]
inputs_w, inputs_c, seq_lens, targets_fw, targets_bw = batch
optimizer.zero_grad()
outputs_fw, outputs_bw = model(inputs_c, seq_lens)
loss_fw = criterion(outputs_fw.view(-1, outputs_fw.shape[-1]),
targets_fw.view(-1))
loss_bw = criterion(outputs_bw.view(-1, outputs_bw.shape[-1]),
targets_bw.view(-1))
loss = (loss_fw + loss_bw) / 2.0
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(),
configs['clip_grad'])
optimizer.step()
total_loss += loss_fw.item()
total_tags += seq_lens.sum().item()
train_ppl = np.exp(total_loss / total_tags)
print(f'Train PPL: {train_ppl:.2f}')
# save BiLM encoders
model.save_pretrained(configs['model_path'])
# save configs
json.dump(configs,
open(os.path.join(configs['model_path'], 'configs.json'), 'w'))
# save vocabularies
save_vocab(vocab_w, os.path.join(configs['model_path'], 'word.dic'))
save_vocab(vocab_c, os.path.join(configs['model_path'], 'char.dic'))
|
[
"sys.path.append",
"nlptoolkit.models.elmo.elmo_model.BiLM",
"nlptoolkit.data.utils.utils.get_loader",
"torch.nn.CrossEntropyLoss",
"tqdm.auto.tqdm",
"torch.cuda.is_available",
"nlptoolkit.datasets.elmodataset.BiLMDataset",
"numpy.exp",
"nlptoolkit.datasets.elmodataset.load_corpus",
"os.path.join"
] |
[((496, 521), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (511, 521), False, 'import sys\n'), ((1250, 1284), 'nlptoolkit.datasets.elmodataset.load_corpus', 'load_corpus', (["configs['train_file']"], {}), "(configs['train_file'])\n", (1261, 1284), False, 'from nlptoolkit.datasets.elmodataset import BiLMDataset, load_corpus\n'), ((1302, 1351), 'nlptoolkit.datasets.elmodataset.BiLMDataset', 'BiLMDataset', (['corpus_w', 'corpus_c', 'vocab_w', 'vocab_c'], {}), '(corpus_w, corpus_c, vocab_w, vocab_c)\n', (1313, 1351), False, 'from nlptoolkit.datasets.elmodataset import BiLMDataset, load_corpus\n'), ((1371, 1416), 'nlptoolkit.data.utils.utils.get_loader', 'get_loader', (['train_data', "configs['batch_size']"], {}), "(train_data, configs['batch_size'])\n", (1381, 1416), False, 'from nlptoolkit.data.utils.utils import PAD_TOKEN, get_loader\n'), ((1434, 1503), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'ignore_index': 'vocab_w[PAD_TOKEN]', 'reduction': '"""sum"""'}), "(ignore_index=vocab_w[PAD_TOKEN], reduction='sum')\n", (1453, 1503), True, 'import torch.nn as nn\n'), ((1659, 1690), 'nlptoolkit.models.elmo.elmo_model.BiLM', 'BiLM', (['configs', 'vocab_w', 'vocab_c'], {}), '(configs, vocab_w, vocab_c)\n', (1663, 1690), False, 'from nlptoolkit.models.elmo.elmo_model import BiLM\n'), ((2048, 2098), 'tqdm.auto.tqdm', 'tqdm', (['train_loader'], {'desc': 'f"""Training Epoch {epoch}"""'}), "(train_loader, desc=f'Training Epoch {epoch}')\n", (2052, 2098), False, 'from tqdm.auto import tqdm\n'), ((2919, 2950), 'numpy.exp', 'np.exp', (['(total_loss / total_tags)'], {}), '(total_loss / total_tags)\n', (2925, 2950), True, 'import numpy as np\n'), ((3239, 3286), 'os.path.join', 'os.path.join', (["configs['model_path']", '"""word.dic"""'], {}), "(configs['model_path'], 'word.dic')\n", (3251, 3286), False, 'import os\n'), ((3312, 3359), 'os.path.join', 'os.path.join', (["configs['model_path']", '"""char.dic"""'], {}), "(configs['model_path'], 'char.dic')\n", (3324, 3359), False, 'import os\n'), ((1609, 1634), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1632, 1634), False, 'import torch\n'), ((3132, 3183), 'os.path.join', 'os.path.join', (["configs['model_path']", '"""configs.json"""'], {}), "(configs['model_path'], 'configs.json')\n", (3144, 3183), False, 'import os\n')]
|
import os
import re
import pandas as pd
#%matplotlib inline
from datetime import datetime
from PIL import Image
import numpy as np
import sys
import shutil
from distutils import dir_util
import re
import glob
import chainer
import chainer.links as L
import chainer.functions as F
import chainer.cuda as cuda
from chainer.dataset import convert
import yama_resnet50_revised as yama_net
import cupy
import argparse
from util import load_data, make_learning_image, make_accuracy_image
from cupy_augmentation import chw_cupy_random_rotate, chw_cupy_random_flip, cupy_augmentation
ordinal_dict={"1":"1st","2":"2nd","3":"3rd","4":"4th","5":"5th"}
class NNModel():
def __init__(self, model_type,optm_type,prefix, dataset, gpu, flag_train, epoch, batchsize,alpha,
lr, dr, n_out, save_dir, save_interval, entropy_weight,patience_limit,resume_path=None):
self.lr = lr
self.alpha = alpha
self.gpu = gpu
self.prefix = prefix
self.epoch = epoch
self.save_dir = save_dir
self.batchsize = batchsize
self.flag_train = flag_train
self.save_interval = save_interval
self.entropy_weight = entropy_weight
self.patience_limit = patience_limit
self.n_out = n_out
if model_type == 0: # Alex
self.model = net.Alex(dr=dr, n_out=n_out,entropy_weight=entropy_weight)
elif model_type == 1: # Google
self.model = net.GoogLeNetBN(n_out=n_out,entropy_weight=entropy_weight)
elif model_type == 2: # ResNet50
self.model = net.Resnet50(n_out=n_out,entropy_weight=entropy_weight)
elif model_type == 3: # ResNet152_transfer
self.model = net.ResNet152_transfer(n_out=n_out,entropy_weight=entropy_weight)
#self.model.predictor.base.disable_update()
elif model_type == 4:
self.model = yama_net.ResNet50Layers_transfer(n_out=n_out,entropy_weight=entropy_weight)
else:
print('wrong model type!')
exit()
# gpu check
if gpu >= 0:
#print('hoge')
#chainer.backends.cuda.get_device_from_id(gpu).use()
#print('hogehoge')
chainer.cuda.get_device_from_id(gpu).use()
#print(chainer.cuda.get_device_from_id(1))
self.model.to_gpu()
#self.model.to_gpu(gpu)
# to train and valid (usually yes)
if self.flag_train:
if optm_type == 'adam': # 'ADAM'
self.optimizer = chainer.optimizers.Adam(alpha=args.alpha)
self.optimizer.setup(self.model)
elif optm_type == 'momentum': # 'MomentumSGD'
self.optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
self.optimizer.setup(self.model)
else:
print('no optimizer set!')
exit()
# to resume from serialized model
if resume_path is not None:
try:
chainer.serializers.load_npz(resume_path + '.model', self.model)
chainer.serializers.load_npz(resume_path + '.state', self.optimizer)
print('successfully resume model')
except:
print('WARN: cannot resume model')
# prepare dataset
self.train, self.valid = dataset[0], dataset[1]
#print(len(dataset[0]))
self.N_train, self.N_valid = len(train), len(valid)
#print(self.N_train,self.N_valid)
# data iterator
'''
self.train_iter = chainer.iterators.SerialIterator(self.train, self.batchsize, repeat=True, shuffle=True)
self.valid_iter = chainer.iterators.SerialIterator(self.valid, self.batchsize, repeat=False, shuffle=False)
'''
self.train_iter = chainer.iterators.\
MultithreadIterator(train, self.batchsize, repeat=True, shuffle=True)
self.valid_iter = chainer.iterators.\
MultithreadIterator(valid, self.batchsize, repeat=False, shuffle=False)
'''
self.train_iter = chainer.iterators.\
MultiprocessIterator(train, self.batchsize, repeat=True, shuffle=True,
n_processes=4, n_prefetch=8)
self.valid_iter = chainer.iterators.\
MultiprocessIterator(valid, self.batchsize, repeat=False, shuffle=False,
n_processes=4, n_prefetch=8)
'''
def run(self):
train_losses, valid_losses, train_accs, valid_accs = [], [], [], []
#train_f1_scores, valid_f1_scores = [], []
train_precision_scores, valid_precision_scores = [], []
train_recall_scores, valid_recall_scores = [], []
train_f1_scores, valid_f1_scores = [], []
for j in range(self.n_out):
train_precision_scores.append([])
train_recall_scores.append([])
train_f1_scores.append([])
valid_precision_scores.append([])
valid_recall_scores.append([])
valid_f1_scores.append([])
sum_train_loss, sum_train_accuracy = 0, 0
all_train_t, all_valid_t=cupy.empty((0),cupy.int32),cupy.empty((0),cupy.int32)
all_train_y, all_valid_y=cupy.empty((0,args.n_out),cupy.float32),cupy.empty((0,args.n_out),cupy.float32)
best_valid_loss = np.inf
best_valid_acc = np.inf
#early stopping counter
patience_counter=0
while self.train_iter.epoch < self.epoch:
# train phase
batch = self.train_iter.next()
if self.flag_train:
# step by step update
x_array, t_array = convert.concat_examples(batch, self.gpu)
#print('x_array',x_array.shape,type(x_array))
#print('t_array',t_array.shape,type(t_array))
all_train_t=cupy.hstack([all_train_t,t_array])
x, t = chainer.Variable(x_array), chainer.Variable(t_array)
x=cupy_augmentation(x)#added at 2019/09/10
self.model.cleargrads()
y, loss, accuracy ,_ = self.model(x, t)
#y_for_f1=cupy.argmax(y.data,axis=1)
all_train_y=cupy.vstack([all_train_y,y.data])
#print(all_train_y.shape
loss.backward()
self.optimizer.update()
sum_train_loss += float(loss.data) * len(t.data)
sum_train_accuracy += float(accuracy.data) * len(t.data)
#train_f1_score=F.classification_s
# valid phase
if self.train_iter.is_new_epoch:
# Return objects Loss
mean_train_loss = sum_train_loss / self.N_train
train_losses.append(mean_train_loss)
# Return objects Acc
mean_train_acc = sum_train_accuracy / self.N_train
train_accs.append(mean_train_acc)
# Return objects f1_score
#train_f1_score=F.classification_summary(all_train_y,all_train_t)[2][1]
#print(train_f1_score)
#train_f1_scores.append(train_f1_score)
for j in range(self.n_out):
train_precision_score=F.classification_summary(all_train_y,all_train_t)[0][j]
train_precision_scores[j].append(train_precision_score)
train_recall_score=F.classification_summary(all_train_y,all_train_t)[1][j]
train_recall_scores[j].append(train_recall_score)
train_f1_score=F.classification_summary(all_train_y,all_train_t)[2][j]
train_f1_scores[j].append(train_f1_score)
sum_valid_accuracy, sum_valid_loss = 0, 0
all_train_t, all_valid_t=cupy.empty((0),cupy.int32),cupy.empty((0),cupy.int32)
all_train_y, all_valid_y=cupy.empty((0,args.n_out),cupy.float32),cupy.empty((0,args.n_out),cupy.float32)
for batch in self.valid_iter:
x_array, t_array = convert.concat_examples(batch, self.gpu)
all_valid_t=cupy.hstack([all_valid_t,t_array])
#t_for_f1=np.argmax(t_array,axis=1)
x, t = chainer.Variable(x_array), chainer.Variable(t_array)
with chainer.using_config('train', False), chainer.no_backprop_mode():
y, loss, accuracy,f1_score = self.model(x, t)
#y_for_f1=cupy.argmax(y.data,axis=1)
#print('y_for_f1',y_for_f1.shape)
sum_valid_loss += float(loss.data) * len(t.data)
sum_valid_accuracy += float(accuracy.data) * len(t.data)
all_valid_y=cupy.vstack([all_valid_y,y.data])
# Return objects Loss
mean_valid_loss = sum_valid_loss / self.N_valid
valid_losses.append(mean_valid_loss)
# Return objects valid
mean_valid_acc = sum_valid_accuracy / self.N_valid
valid_accs.append(mean_valid_acc)
# Return objects f1_score
#print(all_valid_y.dtype,all_valid_t.dtype)
#print(all_valid_y.shape,all_valid_t.shape)
#print(np.max(all_valid_t))
#print(F.classification_summary(all_valid_y,all_valid_t))
for j in range(self.n_out):
valid_precision_score=F.classification_summary(all_valid_y,all_valid_t)[0][j]
valid_precision_scores[j].append(valid_precision_score)
valid_recall_score=F.classification_summary(all_valid_y,all_valid_t)[1][j]
valid_recall_scores[j].append(valid_recall_score)
valid_f1_score=F.classification_summary(all_valid_y,all_valid_t)[2][j]
valid_f1_scores[j].append(valid_f1_score)
self.valid_iter.reset()
if mean_valid_loss < best_valid_loss:
# update best
best_valid_loss = mean_valid_loss
best_valid_acc = mean_valid_acc
#print(train_f1_score.data)
print("e %d/%d, train_loss %f, valid_loss(Best) %f, train_accuracy %f, valid_accuracy %f ,train_f1 %f , valid_f1 %f" % (
self.train_iter.epoch, args.epoch, mean_train_loss, best_valid_loss,
mean_train_acc, mean_valid_acc,train_f1_score.data,valid_f1_score.data))
save_flag = 1
patience_counter = 0#Important! reset the patience_counter
else:
patience_counter = patience_counter+1
print('patience_counter is accumulated, counter is '+ str(patience_counter))
save_flag = 0
print("e %d/%d, train_loss %f, valid_loss %f, train_accuracy %f, valid_accuracy %f ,train_f1 %f, valid_f1 %f" % (
self.train_iter.epoch, args.epoch, mean_train_loss, mean_valid_loss,
mean_train_acc, mean_valid_acc, train_f1_score.data, valid_f1_score.data))
sum_train_loss, sum_train_accuracy = 0, 0
all_train_t, all_valid_t=cupy.empty((0),cupy.int32),cupy.empty((0),cupy.int32)
all_train_y, all_valid_y=cupy.empty((0,args.n_out),cupy.float32),cupy.empty((0,args.n_out),cupy.float32)
if self.save_interval > 0 :
if self.train_iter.epoch % self.save_interval == 0 or self.train_iter.epoch == self.epoch or save_flag == 1:
try:
chainer.serializers.save_npz(save_dir + '/' + self.prefix + "_e" + str(self.train_iter.epoch) + '.model', self.model)
chainer.serializers.save_npz(save_dir + '/' + self.prefix + "_e" + str(self.train_iter.epoch) + '.state', self.optimizer)
print('Successfully saved model')
except:
print('WARN: saving model ignored')
# early stopping
if patience_counter >= patience_limit:
break
return train_losses, valid_losses, train_accs, valid_accs, best_valid_loss ,train_f1_scores,valid_f1_scores,train_precision_scores,valid_precision_scores,train_recall_scores,valid_recall_scores,best_valid_acc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Chainer v4.0.0')
parser.add_argument('--train', '-t', type=int, default=1, help='If negative, skip training')
parser.add_argument('--n_fold', '-nf', type=int, default=5, help='The number of K-fold')
parser.add_argument('--batchsize', '-b', type=int, default=100, help='Number of images in each mini-batch')
parser.add_argument('--epoch', '-e', type=int, default=3000, help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=-1, help='Main GPU ID')
parser.add_argument('--save_interval', '-s', type=int, default=500, help='interval for saving model')
parser.add_argument('--lr', '-lr', type=float, default=0.01, help='Learning Rate')
parser.add_argument('--alpha', '-alpha', type=float, default=0.001, help='Alpha of Adam')
parser.add_argument('--dr', '-dr', type=float, default=0.0, help='Dropout Rate')
parser.add_argument('--n_out', '-no', type=int, default=4, help='Number of output class')
parser.add_argument('--model_type', '-modeltype', type=int, default=0, help='0:Alex, 1:GoogLe, 2:ResNet50')
parser.add_argument('--optm_type', '-optmtype', type=str, default='no optm is set', help='adam:ADAM, momentum:momentumSGD')
#parser.add_argument('--save_dir', '-save_dir', type=str, default='empty_folder', help='save_dir')
parser.add_argument('--remark', '-remark', type=str, default='hoge', help='remarks:e.g meshyper')
parser.add_argument('--weighted_loss', '-weighted_loss', type=int, default=1, help='use weighted cross entropy if 1')
#parser.add_argument('--entropy_weight', '-entropy_weight', type=float, default=1, help='entropy_weight',nargs='*')
parser.add_argument('--patience_limit', '-patience_limit', type=int, default=100, help='early stop counter,how many times new valid loss is over best valid loss')
args = parser.parse_args()
# remark name
npy_filename = args.remark
#print(npy_filename)
n_fold = args.n_fold
# path to data
data_dir = './dataset/folded_npy/'
npy_filename = args.remark
patience_limit= args.patience_limit
# check saving directory
save_dir=os.getcwd() + '/result/' + npy_filename + '/resnet50_transfer/'
if not os.path.exists(save_dir):
print("no save dir", save_dir)
exit()
# check resume model if use
resume_path = None
if resume_path is not None:
if not os.path.exists(resume_path):
print("no resume model", resume_path)
exit()
# misc
flag_train = False if args.train < 0 else True
n_epoch = args.epoch if flag_train == True else 1
# print status
print('GPU: {}'.format(args.gpu))
print('# epoch: {}'.format(args.epoch))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# dropout: {}'.format(args.dr))
print('# learning rate: {}'.format(args.lr))
# for figure
all_train_losses, all_valid_losses = [], []
all_train_accs, all_valid_accs = [], []
all_best_valid_losses = []
all_train_precision_scores,all_valid_precision_scores= [], []
all_train_recall_scores,all_valid_recall_scores= [], []
all_train_f1_scores,all_valid_f1_scores= [], []
all_best_valid_accs = []
entropy_weight=[]
weight_sheet=pd.read_csv('./dataset/weight_of_remark_new.csv',index_col=0)
weight_sheet=weight_sheet.rename(columns={'0': 'remark'})
location = (np.where(weight_sheet['remark'] == npy_filename)[0])
print(weight_sheet.iloc[location+0,:])
print(weight_sheet.iloc[location+1,:])
print(weight_sheet.iloc[location+2,:])
print(weight_sheet.iloc[location+3,:])
if args.weighted_loss == 1:
for j in range(args.n_out):
weight_jth = weight_sheet.iloc[location+3,j]
weight_jth = float(weight_jth)
entropy_weight.append(weight_jth)
else:
for j in range(args.n_out):
weight_jth = 1
weight_jth = float(weight_jth)
entropy_weight.append(weight_jth)
# time watch
print("start:", datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
#print(args.n_out)
# start eval
for ncv in range(0, n_fold):
print(str(ncv+1)+'folding_calculation_start')
train_path = data_dir + 'train_' + ordinal_dict[str(ncv+1)]+'/'
valid_path = data_dir + 'valid_' + ordinal_dict[str(ncv+1)]+'/'
print("load train data from " + train_path)
x_train=np.load(train_path + 'train_' + ordinal_dict[str(ncv+1)] + '_images.npy')
y_train=np.load(train_path + 'train_' + ordinal_dict[str(ncv+1)] + '_' + npy_filename + '_label.npy')
print("load valid data from " + valid_path)
x_valid=np.load(valid_path + 'valid_' + ordinal_dict[str(ncv+1)] + '_images.npy')
y_valid=np.load(valid_path + 'valid_' + ordinal_dict[str(ncv+1)] + '_' + npy_filename + '_label.npy')
train = chainer.datasets.tuple_dataset.TupleDataset(x_train, y_train)
valid = chainer.datasets.tuple_dataset.TupleDataset(x_valid, y_valid)
prefix = str(args.model_type) + "_" + str(ncv+1) + 'thFold_'+npy_filename + "_b" + str(args.batchsize) + "_optm_"+str(args.optm_type)+"_alpha" + str(args.alpha) + "_lr" + str(args.lr)
my_network = NNModel(model_type=args.model_type, optm_type=args.optm_type,prefix=prefix, dataset=[train, valid], gpu=args.gpu,
flag_train=flag_train, epoch=args.epoch, batchsize=args.batchsize,alpha=args.alpha,
lr=args.lr, dr=args.dr, n_out=args.n_out, save_dir=save_dir,entropy_weight=entropy_weight,
save_interval=args.save_interval, resume_path=resume_path,patience_limit=patience_limit)
train_losses, valid_losses, train_accs, valid_accs, best_valid_loss ,train_f1_scores,valid_f1_scores,train_precision_scores,valid_precision_scores,train_recall_scores,valid_recall_scores,best_valid_acc= my_network.run()
all_train_losses.append(train_losses)
all_valid_losses.append(valid_losses)
all_train_accs.append(train_accs)
all_valid_accs.append(valid_accs)
all_best_valid_losses.append(best_valid_loss)
all_train_precision_scores.append(train_precision_scores)
all_valid_precision_scores.append(valid_precision_scores)
all_train_recall_scores.append(train_recall_scores)
all_valid_recall_scores.append(valid_recall_scores)
all_train_f1_scores.append(train_f1_scores)
all_valid_f1_scores.append(valid_f1_scores)
all_best_valid_accs.append(best_valid_acc)
print ("making figure")
# early stoppingを実装した場合、fold毎に終了epochが違うため、epoch数を揃える必要がある。
# for rangeループの中に含まれるものはarray集合のリストであり、長さが不揃いになっているためエポック数を揃える。
#print(all_train_losses.dtype,all_train_losses.shape,'all_train')
#print(all_valid_losses.dtype,all_valid_losses.shape,'all_valid')
if patience_limit >= 0:
print('early stopping, so ajusting the number of epoch for each folding.')
n_cv1 = len(all_valid_losses[0])
n_cv2 = len(all_valid_losses[1])
n_cv3 = len(all_valid_losses[2])
n_cv4 = len(all_valid_losses[3])
#n_cv5 = len(all_valid_losses[4])
#min_index = min(n_cv1,n_cv2)
min_index = min(n_cv1,n_cv2,n_cv3,n_cv4)
for j in range(n_fold):
all_train_losses[j] = (all_train_losses[j])[0:min_index]
all_valid_losses[j] = (all_valid_losses[j])[0:min_index]
all_train_accs[j] = (all_train_accs[j])[0:min_index]
all_valid_accs[j] = (all_valid_accs[j])[0:min_index]
# figure
mean_best_valid_losses = np.array(all_best_valid_losses).mean(axis=0)
mean_best_valid_accs = np.array(all_best_valid_accs).mean(axis=0)
mean_train_losses, mean_valid_losses = np.array(all_train_losses).mean(axis=0), np.array(all_valid_losses).mean(axis=0)
mean_train_accs, mean_valid_accs = np.array(all_train_accs).mean(axis=0), np.array(all_valid_accs).mean(axis=0)
# loss
filename_prefix = str(args.model_type) + "_remark_" + npy_filename + "_b" + str(args.batchsize) + "_alpha" + str(args.alpha)
fig_path = os.getcwd() + '/result/' + npy_filename + '/' + filename_prefix + "_best_" + str(mean_best_valid_losses) + "_loss.png"
make_learning_image(fig_path=fig_path, mean_train_losses=mean_train_losses, mean_valid_losses=mean_valid_losses)
# acc
fig_path = os.getcwd() + '/result/' + npy_filename + '/' + filename_prefix + "_best_" + str(mean_best_valid_accs) + "_acc.png"
make_accuracy_image(fig_path=fig_path, mean_train_accs=mean_train_accs, mean_valid_accs=mean_valid_accs)
if args.save_interval > 0:
try:
np_save_point = os.getcwd() + '/result/' + npy_filename + '/'
np.save(np_save_point+'all_train_f1_scores',all_train_f1_scores)
np.save(np_save_point+'all_train_precision_scores',all_train_precision_scores)
np.save(np_save_point+'all_train_recall_scores',all_train_recall_scores)
np.save(np_save_point+'all_valid_f1_scores',all_valid_f1_scores)
np.save(np_save_point+'all_valid_precision_scores',all_valid_precision_scores)
np.save(np_save_point+'all_valid_recall_scores',all_valid_recall_scores)
except:
pass
# time watch
print("finish:", datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
|
[
"argparse.ArgumentParser",
"cupy.empty",
"pandas.read_csv",
"cupy.hstack",
"chainer.no_backprop_mode",
"chainer.iterators.MultithreadIterator",
"chainer.serializers.load_npz",
"os.path.exists",
"cupy_augmentation.cupy_augmentation",
"util.make_accuracy_image",
"datetime.datetime.now",
"chainer.dataset.convert.concat_examples",
"numpy.save",
"chainer.functions.classification_summary",
"chainer.datasets.tuple_dataset.TupleDataset",
"yama_resnet50_revised.ResNet50Layers_transfer",
"chainer.Variable",
"chainer.optimizers.Adam",
"cupy.vstack",
"os.getcwd",
"chainer.optimizers.MomentumSGD",
"numpy.where",
"numpy.array",
"chainer.using_config",
"chainer.cuda.get_device_from_id",
"util.make_learning_image"
] |
[((12467, 12520), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chainer v4.0.0"""'}), "(description='Chainer v4.0.0')\n", (12490, 12520), False, 'import argparse\n'), ((15758, 15820), 'pandas.read_csv', 'pd.read_csv', (['"""./dataset/weight_of_remark_new.csv"""'], {'index_col': '(0)'}), "('./dataset/weight_of_remark_new.csv', index_col=0)\n", (15769, 15820), True, 'import pandas as pd\n'), ((20738, 20854), 'util.make_learning_image', 'make_learning_image', ([], {'fig_path': 'fig_path', 'mean_train_losses': 'mean_train_losses', 'mean_valid_losses': 'mean_valid_losses'}), '(fig_path=fig_path, mean_train_losses=mean_train_losses,\n mean_valid_losses=mean_valid_losses)\n', (20757, 20854), False, 'from util import load_data, make_learning_image, make_accuracy_image\n'), ((20997, 21105), 'util.make_accuracy_image', 'make_accuracy_image', ([], {'fig_path': 'fig_path', 'mean_train_accs': 'mean_train_accs', 'mean_valid_accs': 'mean_valid_accs'}), '(fig_path=fig_path, mean_train_accs=mean_train_accs,\n mean_valid_accs=mean_valid_accs)\n', (21016, 21105), False, 'from util import load_data, make_learning_image, make_accuracy_image\n'), ((3800, 3891), 'chainer.iterators.MultithreadIterator', 'chainer.iterators.MultithreadIterator', (['train', 'self.batchsize'], {'repeat': '(True)', 'shuffle': '(True)'}), '(train, self.batchsize, repeat=True,\n shuffle=True)\n', (3837, 3891), False, 'import chainer\n'), ((3928, 4021), 'chainer.iterators.MultithreadIterator', 'chainer.iterators.MultithreadIterator', (['valid', 'self.batchsize'], {'repeat': '(False)', 'shuffle': '(False)'}), '(valid, self.batchsize, repeat=False,\n shuffle=False)\n', (3965, 4021), False, 'import chainer\n'), ((14720, 14744), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (14734, 14744), False, 'import os\n'), ((15898, 15946), 'numpy.where', 'np.where', (["(weight_sheet['remark'] == npy_filename)"], {}), "(weight_sheet['remark'] == npy_filename)\n", (15906, 15946), True, 'import numpy as np\n'), ((17377, 17438), 'chainer.datasets.tuple_dataset.TupleDataset', 'chainer.datasets.tuple_dataset.TupleDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (17420, 17438), False, 'import chainer\n'), ((17455, 17516), 'chainer.datasets.tuple_dataset.TupleDataset', 'chainer.datasets.tuple_dataset.TupleDataset', (['x_valid', 'y_valid'], {}), '(x_valid, y_valid)\n', (17498, 17516), False, 'import chainer\n'), ((5136, 5161), 'cupy.empty', 'cupy.empty', (['(0)', 'cupy.int32'], {}), '(0, cupy.int32)\n', (5146, 5161), False, 'import cupy\n'), ((5163, 5188), 'cupy.empty', 'cupy.empty', (['(0)', 'cupy.int32'], {}), '(0, cupy.int32)\n', (5173, 5188), False, 'import cupy\n'), ((5223, 5264), 'cupy.empty', 'cupy.empty', (['(0, args.n_out)', 'cupy.float32'], {}), '((0, args.n_out), cupy.float32)\n', (5233, 5264), False, 'import cupy\n'), ((5263, 5304), 'cupy.empty', 'cupy.empty', (['(0, args.n_out)', 'cupy.float32'], {}), '((0, args.n_out), cupy.float32)\n', (5273, 5304), False, 'import cupy\n'), ((14904, 14931), 'os.path.exists', 'os.path.exists', (['resume_path'], {}), '(resume_path)\n', (14918, 14931), False, 'import os\n'), ((20103, 20134), 'numpy.array', 'np.array', (['all_best_valid_losses'], {}), '(all_best_valid_losses)\n', (20111, 20134), True, 'import numpy as np\n'), ((20176, 20205), 'numpy.array', 'np.array', (['all_best_valid_accs'], {}), '(all_best_valid_accs)\n', (20184, 20205), True, 'import numpy as np\n'), ((21234, 21301), 'numpy.save', 'np.save', (["(np_save_point + 'all_train_f1_scores')", 'all_train_f1_scores'], {}), "(np_save_point + 'all_train_f1_scores', all_train_f1_scores)\n", (21241, 21301), True, 'import numpy as np\n'), ((21311, 21396), 'numpy.save', 'np.save', (["(np_save_point + 'all_train_precision_scores')", 'all_train_precision_scores'], {}), "(np_save_point + 'all_train_precision_scores',\n all_train_precision_scores)\n", (21318, 21396), True, 'import numpy as np\n'), ((21402, 21477), 'numpy.save', 'np.save', (["(np_save_point + 'all_train_recall_scores')", 'all_train_recall_scores'], {}), "(np_save_point + 'all_train_recall_scores', all_train_recall_scores)\n", (21409, 21477), True, 'import numpy as np\n'), ((21487, 21554), 'numpy.save', 'np.save', (["(np_save_point + 'all_valid_f1_scores')", 'all_valid_f1_scores'], {}), "(np_save_point + 'all_valid_f1_scores', all_valid_f1_scores)\n", (21494, 21554), True, 'import numpy as np\n'), ((21564, 21649), 'numpy.save', 'np.save', (["(np_save_point + 'all_valid_precision_scores')", 'all_valid_precision_scores'], {}), "(np_save_point + 'all_valid_precision_scores',\n all_valid_precision_scores)\n", (21571, 21649), True, 'import numpy as np\n'), ((21655, 21730), 'numpy.save', 'np.save', (["(np_save_point + 'all_valid_recall_scores')", 'all_valid_recall_scores'], {}), "(np_save_point + 'all_valid_recall_scores', all_valid_recall_scores)\n", (21662, 21730), True, 'import numpy as np\n'), ((2522, 2563), 'chainer.optimizers.Adam', 'chainer.optimizers.Adam', ([], {'alpha': 'args.alpha'}), '(alpha=args.alpha)\n', (2545, 2563), False, 'import chainer\n'), ((3006, 3070), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (["(resume_path + '.model')", 'self.model'], {}), "(resume_path + '.model', self.model)\n", (3034, 3070), False, 'import chainer\n'), ((3087, 3155), 'chainer.serializers.load_npz', 'chainer.serializers.load_npz', (["(resume_path + '.state')", 'self.optimizer'], {}), "(resume_path + '.state', self.optimizer)\n", (3115, 3155), False, 'import chainer\n'), ((5656, 5696), 'chainer.dataset.convert.concat_examples', 'convert.concat_examples', (['batch', 'self.gpu'], {}), '(batch, self.gpu)\n', (5679, 5696), False, 'from chainer.dataset import convert\n'), ((5849, 5884), 'cupy.hstack', 'cupy.hstack', (['[all_train_t, t_array]'], {}), '([all_train_t, t_array])\n', (5860, 5884), False, 'import cupy\n'), ((5979, 5999), 'cupy_augmentation.cupy_augmentation', 'cupy_augmentation', (['x'], {}), '(x)\n', (5996, 5999), False, 'from cupy_augmentation import chw_cupy_random_rotate, chw_cupy_random_flip, cupy_augmentation\n'), ((6198, 6232), 'cupy.vstack', 'cupy.vstack', (['[all_train_y, y.data]'], {}), '([all_train_y, y.data])\n', (6209, 6232), False, 'import cupy\n'), ((14645, 14656), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14654, 14656), False, 'import os\n'), ((16537, 16551), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16549, 16551), False, 'from datetime import datetime\n'), ((20262, 20288), 'numpy.array', 'np.array', (['all_train_losses'], {}), '(all_train_losses)\n', (20270, 20288), True, 'import numpy as np\n'), ((20303, 20329), 'numpy.array', 'np.array', (['all_valid_losses'], {}), '(all_valid_losses)\n', (20311, 20329), True, 'import numpy as np\n'), ((20382, 20406), 'numpy.array', 'np.array', (['all_train_accs'], {}), '(all_train_accs)\n', (20390, 20406), True, 'import numpy as np\n'), ((20421, 20445), 'numpy.array', 'np.array', (['all_valid_accs'], {}), '(all_valid_accs)\n', (20429, 20445), True, 'import numpy as np\n'), ((21800, 21814), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (21812, 21814), False, 'from datetime import datetime\n'), ((2206, 2242), 'chainer.cuda.get_device_from_id', 'chainer.cuda.get_device_from_id', (['gpu'], {}), '(gpu)\n', (2237, 2242), False, 'import chainer\n'), ((2704, 2760), 'chainer.optimizers.MomentumSGD', 'chainer.optimizers.MomentumSGD', ([], {'lr': 'args.lr', 'momentum': '(0.9)'}), '(lr=args.lr, momentum=0.9)\n', (2734, 2760), False, 'import chainer\n'), ((5908, 5933), 'chainer.Variable', 'chainer.Variable', (['x_array'], {}), '(x_array)\n', (5924, 5933), False, 'import chainer\n'), ((5935, 5960), 'chainer.Variable', 'chainer.Variable', (['t_array'], {}), '(t_array)\n', (5951, 5960), False, 'import chainer\n'), ((7778, 7803), 'cupy.empty', 'cupy.empty', (['(0)', 'cupy.int32'], {}), '(0, cupy.int32)\n', (7788, 7803), False, 'import cupy\n'), ((7805, 7830), 'cupy.empty', 'cupy.empty', (['(0)', 'cupy.int32'], {}), '(0, cupy.int32)\n', (7815, 7830), False, 'import cupy\n'), ((7873, 7914), 'cupy.empty', 'cupy.empty', (['(0, args.n_out)', 'cupy.float32'], {}), '((0, args.n_out), cupy.float32)\n', (7883, 7914), False, 'import cupy\n'), ((7913, 7954), 'cupy.empty', 'cupy.empty', (['(0, args.n_out)', 'cupy.float32'], {}), '((0, args.n_out), cupy.float32)\n', (7923, 7954), False, 'import cupy\n'), ((8039, 8079), 'chainer.dataset.convert.concat_examples', 'convert.concat_examples', (['batch', 'self.gpu'], {}), '(batch, self.gpu)\n', (8062, 8079), False, 'from chainer.dataset import convert\n'), ((8112, 8147), 'cupy.hstack', 'cupy.hstack', (['[all_valid_t, t_array]'], {}), '([all_valid_t, t_array])\n', (8123, 8147), False, 'import cupy\n'), ((8743, 8777), 'cupy.vstack', 'cupy.vstack', (['[all_valid_y, y.data]'], {}), '([all_valid_y, y.data])\n', (8754, 8777), False, 'import cupy\n'), ((11273, 11298), 'cupy.empty', 'cupy.empty', (['(0)', 'cupy.int32'], {}), '(0, cupy.int32)\n', (11283, 11298), False, 'import cupy\n'), ((11300, 11325), 'cupy.empty', 'cupy.empty', (['(0)', 'cupy.int32'], {}), '(0, cupy.int32)\n', (11310, 11325), False, 'import cupy\n'), ((11368, 11409), 'cupy.empty', 'cupy.empty', (['(0, args.n_out)', 'cupy.float32'], {}), '((0, args.n_out), cupy.float32)\n', (11378, 11409), False, 'import cupy\n'), ((11408, 11449), 'cupy.empty', 'cupy.empty', (['(0, args.n_out)', 'cupy.float32'], {}), '((0, args.n_out), cupy.float32)\n', (11418, 11449), False, 'import cupy\n'), ((8230, 8255), 'chainer.Variable', 'chainer.Variable', (['x_array'], {}), '(x_array)\n', (8246, 8255), False, 'import chainer\n'), ((8257, 8282), 'chainer.Variable', 'chainer.Variable', (['t_array'], {}), '(t_array)\n', (8273, 8282), False, 'import chainer\n'), ((8309, 8345), 'chainer.using_config', 'chainer.using_config', (['"""train"""', '(False)'], {}), "('train', False)\n", (8329, 8345), False, 'import chainer\n'), ((8347, 8373), 'chainer.no_backprop_mode', 'chainer.no_backprop_mode', ([], {}), '()\n', (8371, 8373), False, 'import chainer\n'), ((21176, 21187), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (21185, 21187), False, 'import os\n'), ((1880, 1956), 'yama_resnet50_revised.ResNet50Layers_transfer', 'yama_net.ResNet50Layers_transfer', ([], {'n_out': 'n_out', 'entropy_weight': 'entropy_weight'}), '(n_out=n_out, entropy_weight=entropy_weight)\n', (1912, 1956), True, 'import yama_resnet50_revised as yama_net\n'), ((7227, 7277), 'chainer.functions.classification_summary', 'F.classification_summary', (['all_train_y', 'all_train_t'], {}), '(all_train_y, all_train_t)\n', (7251, 7277), True, 'import chainer.functions as F\n'), ((7398, 7448), 'chainer.functions.classification_summary', 'F.classification_summary', (['all_train_y', 'all_train_t'], {}), '(all_train_y, all_train_t)\n', (7422, 7448), True, 'import chainer.functions as F\n'), ((7559, 7609), 'chainer.functions.classification_summary', 'F.classification_summary', (['all_train_y', 'all_train_t'], {}), '(all_train_y, all_train_t)\n', (7583, 7609), True, 'import chainer.functions as F\n'), ((9457, 9507), 'chainer.functions.classification_summary', 'F.classification_summary', (['all_valid_y', 'all_valid_t'], {}), '(all_valid_y, all_valid_t)\n', (9481, 9507), True, 'import chainer.functions as F\n'), ((9628, 9678), 'chainer.functions.classification_summary', 'F.classification_summary', (['all_valid_y', 'all_valid_t'], {}), '(all_valid_y, all_valid_t)\n', (9652, 9678), True, 'import chainer.functions as F\n'), ((9789, 9839), 'chainer.functions.classification_summary', 'F.classification_summary', (['all_valid_y', 'all_valid_t'], {}), '(all_valid_y, all_valid_t)\n', (9813, 9839), True, 'import chainer.functions as F\n'), ((20615, 20626), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20624, 20626), False, 'import os\n'), ((20877, 20888), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20886, 20888), False, 'import os\n')]
|
import os
import numpy as np
from Simulation import Simulation
from utils import is_empty, erase_files
if __name__ == "__main__":
# step to begin and step to end for all the simulations
step_to_begin = 0
step_to_end = 5000
# list of number of boids, must be same size than list_directories var
list_num_boids = [[200] * 4, [500] * 4, [1000] * 4]
# number of simulation to generate for each population
num_run = 10
# data of simulation number :i: with a population of :num_boids:
# will be stored in /data/simulation_data_:num_boids:_Boids_:i:
for num_boids in list_num_boids:
for run in range(num_run):
directory = "simulation_data_" + str(num_boids) + "_Boids_" + str(run) + "/"
app = Simulation(list_num_boids=num_boids,
repository=directory,
step=step_to_begin)
if os.path.exists("data/" + directory):
if not is_empty("data/" + directory):
erase_files("data/" + directory)
else:
print("local directory " + "data/" + directory + " does not exist")
print("creating new directory")
os.mkdir("data/" + directory)
# set init step
app.step = step_to_begin
for i in np.arange(step_to_begin, step_to_end, 1):
app.animate(i)
|
[
"os.mkdir",
"utils.is_empty",
"Simulation.Simulation",
"os.path.exists",
"numpy.arange",
"utils.erase_files"
] |
[((768, 846), 'Simulation.Simulation', 'Simulation', ([], {'list_num_boids': 'num_boids', 'repository': 'directory', 'step': 'step_to_begin'}), '(list_num_boids=num_boids, repository=directory, step=step_to_begin)\n', (778, 846), False, 'from Simulation import Simulation\n'), ((921, 956), 'os.path.exists', 'os.path.exists', (["('data/' + directory)"], {}), "('data/' + directory)\n", (935, 956), False, 'import os\n'), ((1350, 1390), 'numpy.arange', 'np.arange', (['step_to_begin', 'step_to_end', '(1)'], {}), '(step_to_begin, step_to_end, 1)\n', (1359, 1390), True, 'import numpy as np\n'), ((1232, 1261), 'os.mkdir', 'os.mkdir', (["('data/' + directory)"], {}), "('data/' + directory)\n", (1240, 1261), False, 'import os\n'), ((982, 1011), 'utils.is_empty', 'is_empty', (["('data/' + directory)"], {}), "('data/' + directory)\n", (990, 1011), False, 'from utils import is_empty, erase_files\n'), ((1033, 1065), 'utils.erase_files', 'erase_files', (["('data/' + directory)"], {}), "('data/' + directory)\n", (1044, 1065), False, 'from utils import is_empty, erase_files\n')]
|
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from config import cfg
# TODO: argscope for detailed setting in fpn and rpn
def create_anchors(feats, stride, scales, aspect_ratios=[0.5, 1, 2], base_size=16):
feat_size = cfg.image_size / stride
num_ratios = len(aspect_ratios)
num_scales = len(scales)
ctr = 0.5*base_size
aspr = np.array(aspect_ratios)
fixed_area = base_size**2
ratio_wh = np.zeros((num_ratios, 2))
ratio_wh[:,0] = np.round(np.sqrt(fixed_area/aspr))
ratio_wh[:,1] = np.round(np.sqrt(fixed_area*aspr))
scs = np.array(scales).reshape(-1, 1, 1)
scale_wh = scs * ratio_wh[np.newaxis, :, :]
scale_wh = scale_wh.reshape(-1, 2)
base_anchors = np.hstack((ctr-0.5*scale_wh, ctr+0.5*scale_wh))
anchors = np.zeros((int(feat_size), int(feat_size), num_ratios*num_scales, 4), np.float32)
anchors += base_anchors.reshape(1,1,-1,4)
anchors[:,:,:,[0,2]] += np.arange(feat_size).reshape(-1,1,1,1) * stride
anchors[:,:,:,[1,3]] += np.arange(feat_size).reshape(-1,1,1,1) * stride
anchors = np.minimum(cfg.image_size-1, np.maximum(0.0, anchors))
print('anchors\n', anchors.reshape(-1,4))
print('base anchors\n', base_anchors)
anchors = tf.convert_to_tensor(anchors, dtype=tf.float32)
return anchors
def rpn_logits(feats, ratios):
out_anchors = []
out_loc = []
out_cls = []
for i, feat in enumerate(feats):
ratio = ratios[i]
# create anchors
anchors = create_anchors(feat, 2**ratio, [2**(ratio-2), 2**(ratio-1), 2**ratio])
num_anchors = anchors.get_shape().as_list()[-2]
# predict cls, coordinate
initializer = tf.truncated_normal_initializer(stddev=0.001)
conv_feat = slim.conv2d(feat, 512, 3,
weights_initializer=initializer)
loc = slim.conv2d(conv_feat, num_anchors*4, 1, activation_fn = None,
weights_initializer=initializer)
cls = slim.conv2d(conv_feat, num_anchors*2, 1, activation_fn = None,
weights_initializer=initializer)
# reshape into size(N, -18)
out_anchors.append(tf.reshape(anchors, (-1, 4))) # shape: [H*W*N_anchor, 4]
out_loc.append(tf.reshape(loc, (cfg.batch_size, -1, 4))) # shape: [N, H*W*num_anchor, 4]
out_cls.append(tf.reshape(cls, (cfg.batch_size, -1, 2))) # shape: [N, H*W*num_anchor]
out_anchors = tf.concat(out_anchors, axis=0)
out_loc = tf.concat(out_loc, axis=1)
out_cls = tf.concat(out_cls, axis=1)
return out_anchors, out_loc, out_cls
def decode_roi(anchors, loc, cls):
'''
Inputs
- anchors: anchor boxes, a tensor of shape [H*W*N_anchor, 4]
- loc: the location rpn logits, a tensor of shape [N, H*W*N_anchor, 4]
- cls: the class rpn logits, a tensor of shape [N, H*W*N_anchor, 2]
Ouputs
- boxes: the bbox coordinates (xmin, ymin, xmax, ymax),
a tensor of shape [N, H*W*N_anchor, 4], the
decoded [xmin, ymin, xmax, ymax] for each box
- probs: probability of object, a tensor of shape [N, H*W*N_anchor]
'''
H, W = 1.*cfg.image_size, 1.*cfg.image_size
anchors = tf.expand_dims(anchors, 0)
anc_widths = anchors[:,:,2] - anchors[:,:, 0]
anc_heights = anchors[:,:,3] - anchors[:,:,1]
anc_ctrx = anchors[:,:,0] + 0.5 * anc_widths
anc_ctry = anchors[:,:,1] + 0.5 * anc_heights
loc = loc * cfg.bbox_stddev.reshape(1,1,4) + cfg.bbox_mean.reshape(1,1,4)
box_ctrx = loc[:,:,0] * (anc_widths+cfg.eps) + anc_ctrx
box_ctry = loc[:,:,1] * (anc_heights+cfg.eps) + anc_ctry
box_w = (anc_widths+cfg.eps) * (tf.exp(loc[:,:,2])-cfg.log_eps)
box_h = (anc_heights+cfg.eps) * (tf.exp(loc[:,:,3])-cfg.log_eps)
box_minx = box_ctrx - 0.5 * box_w
box_miny = box_ctry - 0.5 * box_h
box_maxx = box_ctrx + 0.5 * box_w
box_maxy = box_ctry + 0.5 * box_h
boxes = tf.stack([box_minx, box_miny, box_maxx, box_maxy], axis=2)
probs = tf.nn.softmax(cls)
probs = probs[:,:,1]
rois = {'anchor': anchors, 'box': boxes, 'prob': probs}
return rois
def refine_roi(boxes, probs, pre_nms_topn):
image_size = cfg.image_size
min_size = cfg.min_size
# filter with scores
_, order = tf.nn.top_k(probs, tf.minimum(pre_nms_topn, tf.size(probs)))
boxes = tf.gather(boxes, order)
probs = tf.gather(probs, order)
# filter too small boxes
widths = boxes[:,2] - boxes[:,0]
heights = boxes[:,3] - boxes[:,1]
keep = tf.logical_and(widths >= min_size, heights >= min_size)
boxes = tf.boolean_mask(boxes, keep)
probs = tf.boolean_mask(probs, keep)
return boxes, probs
def refine_rois(rois, training):
image_size = cfg.image_size
min_size = cfg.min_size
nms_thresh = cfg.rpn_nms_thresh
proposal_count = cfg.proposal_count_infer
batch_size = 1
box_mean = cfg.bbox_mean.reshape(1, 1, 4)
box_stddev = cfg.bbox_stddev.reshape(1, 1, 4)
pre_nms_topn = 12000
if not training:
pre_nms_topn = 6000
boxes, probs = rois['box'], rois['prob']
boxes = boxes * box_stddev + box_mean
N = boxes.shape[0]
roi_batch = []
for i in range(batch_size):
box, prob = boxes[i], probs[i]
box, prob = tf.reshape(box, [-1, 4]), tf.reshape(prob, [-1])
nonms_box, nonms_probs = refine_roi(box, prob, pre_nms_topn)
indices = tf.image.non_max_suppression(nonms_box, nonms_probs, proposal_count, nms_thresh)
proposals = tf.gather(nonms_box, indices)
padding = proposal_count-tf.shape(proposals)[0]
proposals = tf.reshape(tf.pad(proposals, [[0, padding], [0,0]]), [proposal_count, 4])
roi_batch.append(proposals)
final_proposal = tf.stack(roi_batch, axis=0)
return final_proposal
def crop_proposals(feats, crop_size, boxes, training):
crop_channel = feats[0].shape[-1]
image_size = cfg.image_size
proposal_count = cfg.rois_per_img if training else cfg.proposal_count_infer
x1, y1, x2, y2 = tf.split(boxes, 4, axis=2)
x1, y1, x2, y2 = x1[:,:,0], y1[:,:,0], x2[:,:,0], y2[:,:,0]
w = x2 - x1
h = y2 - y1
if not cfg.use_fpn:
output = tf.image.crop_and_resize(feats[0], tf.reshape(boxes, (-1,4)),
tf.range(cfg.batch_size*proposal_count)//proposal_count,
[crop_size, crop_size])
else:
# adaptive features in fpn
ks = tf.log(tf.sqrt(w*h)/(image_size+cfg.eps)+cfg.log_eps) / tf.log(tf.constant(2.0))
ks = 4 + tf.cast(tf.round(ks), tf.int32)
ks = tf.minimum(5, tf.maximum(2, ks))
# crop and resize
outputs = []
original_ind = []
for i, curk in enumerate(range(2, 6)):
filtered_ind = tf.where(tf.equal(ks, curk))
cur_boxes = tf.gather_nd(boxes, filtered_ind)
batch_ind = tf.cast(filtered_ind[:, 0], tf.int32)
original_ind.append(batch_ind)
out = tf.image.crop_and_resize(feats[i], cur_boxes/cfg.image_size, batch_ind, [crop_size, crop_size])
#out = tf.stop_gradient(out)
outputs.append(out)
# encapsulate
out = tf.concat(outputs, axis=0)
original_ind = tf.concat(original_ind, axis=0)
# re-arrange
num_total_box = tf.shape(original_ind)[0]
ind_total_box = tf.range(num_total_box)
sort_ind = original_ind * num_total_box + ind_total_box
ind = tf.nn.top_k(sort_ind, k=num_total_box).indices[::-1]
output = tf.gather(out, ind)
output = tf.reshape(output, [-1, crop_size, crop_size, crop_channel])
return output
|
[
"numpy.maximum",
"config.cfg.bbox_mean.reshape",
"tensorflow.maximum",
"tensorflow.gather_nd",
"tensorflow.reshape",
"tensorflow.image.crop_and_resize",
"numpy.arange",
"tensorflow.sqrt",
"tensorflow.split",
"tensorflow.nn.softmax",
"tensorflow.contrib.slim.conv2d",
"tensorflow.logical_and",
"tensorflow.size",
"tensorflow.gather",
"tensorflow.nn.top_k",
"tensorflow.pad",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.cast",
"tensorflow.exp",
"tensorflow.truncated_normal_initializer",
"tensorflow.equal",
"tensorflow.range",
"tensorflow.constant",
"numpy.hstack",
"tensorflow.round",
"tensorflow.expand_dims",
"tensorflow.image.non_max_suppression",
"config.cfg.bbox_stddev.reshape",
"tensorflow.convert_to_tensor",
"numpy.zeros",
"tensorflow.shape",
"numpy.array",
"tensorflow.boolean_mask",
"numpy.sqrt"
] |
[((384, 407), 'numpy.array', 'np.array', (['aspect_ratios'], {}), '(aspect_ratios)\n', (392, 407), True, 'import numpy as np\n'), ((453, 478), 'numpy.zeros', 'np.zeros', (['(num_ratios, 2)'], {}), '((num_ratios, 2))\n', (461, 478), True, 'import numpy as np\n'), ((746, 801), 'numpy.hstack', 'np.hstack', (['(ctr - 0.5 * scale_wh, ctr + 0.5 * scale_wh)'], {}), '((ctr - 0.5 * scale_wh, ctr + 0.5 * scale_wh))\n', (755, 801), True, 'import numpy as np\n'), ((1263, 1310), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['anchors'], {'dtype': 'tf.float32'}), '(anchors, dtype=tf.float32)\n', (1283, 1310), True, 'import tensorflow as tf\n'), ((2459, 2489), 'tensorflow.concat', 'tf.concat', (['out_anchors'], {'axis': '(0)'}), '(out_anchors, axis=0)\n', (2468, 2489), True, 'import tensorflow as tf\n'), ((2504, 2530), 'tensorflow.concat', 'tf.concat', (['out_loc'], {'axis': '(1)'}), '(out_loc, axis=1)\n', (2513, 2530), True, 'import tensorflow as tf\n'), ((2545, 2571), 'tensorflow.concat', 'tf.concat', (['out_cls'], {'axis': '(1)'}), '(out_cls, axis=1)\n', (2554, 2571), True, 'import tensorflow as tf\n'), ((3223, 3249), 'tensorflow.expand_dims', 'tf.expand_dims', (['anchors', '(0)'], {}), '(anchors, 0)\n', (3237, 3249), True, 'import tensorflow as tf\n'), ((3952, 4010), 'tensorflow.stack', 'tf.stack', (['[box_minx, box_miny, box_maxx, box_maxy]'], {'axis': '(2)'}), '([box_minx, box_miny, box_maxx, box_maxy], axis=2)\n', (3960, 4010), True, 'import tensorflow as tf\n'), ((4024, 4042), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['cls'], {}), '(cls)\n', (4037, 4042), True, 'import tensorflow as tf\n'), ((4364, 4387), 'tensorflow.gather', 'tf.gather', (['boxes', 'order'], {}), '(boxes, order)\n', (4373, 4387), True, 'import tensorflow as tf\n'), ((4400, 4423), 'tensorflow.gather', 'tf.gather', (['probs', 'order'], {}), '(probs, order)\n', (4409, 4423), True, 'import tensorflow as tf\n'), ((4541, 4596), 'tensorflow.logical_and', 'tf.logical_and', (['(widths >= min_size)', '(heights >= min_size)'], {}), '(widths >= min_size, heights >= min_size)\n', (4555, 4596), True, 'import tensorflow as tf\n'), ((4609, 4637), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['boxes', 'keep'], {}), '(boxes, keep)\n', (4624, 4637), True, 'import tensorflow as tf\n'), ((4650, 4678), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['probs', 'keep'], {}), '(probs, keep)\n', (4665, 4678), True, 'import tensorflow as tf\n'), ((4913, 4943), 'config.cfg.bbox_mean.reshape', 'cfg.bbox_mean.reshape', (['(1)', '(1)', '(4)'], {}), '(1, 1, 4)\n', (4934, 4943), False, 'from config import cfg\n'), ((4961, 4993), 'config.cfg.bbox_stddev.reshape', 'cfg.bbox_stddev.reshape', (['(1)', '(1)', '(4)'], {}), '(1, 1, 4)\n', (4984, 4993), False, 'from config import cfg\n'), ((5771, 5798), 'tensorflow.stack', 'tf.stack', (['roi_batch'], {'axis': '(0)'}), '(roi_batch, axis=0)\n', (5779, 5798), True, 'import tensorflow as tf\n'), ((6052, 6078), 'tensorflow.split', 'tf.split', (['boxes', '(4)'], {'axis': '(2)'}), '(boxes, 4, axis=2)\n', (6060, 6078), True, 'import tensorflow as tf\n'), ((7637, 7697), 'tensorflow.reshape', 'tf.reshape', (['output', '[-1, crop_size, crop_size, crop_channel]'], {}), '(output, [-1, crop_size, crop_size, crop_channel])\n', (7647, 7697), True, 'import tensorflow as tf\n'), ((508, 534), 'numpy.sqrt', 'np.sqrt', (['(fixed_area / aspr)'], {}), '(fixed_area / aspr)\n', (515, 534), True, 'import numpy as np\n'), ((563, 589), 'numpy.sqrt', 'np.sqrt', (['(fixed_area * aspr)'], {}), '(fixed_area * aspr)\n', (570, 589), True, 'import numpy as np\n'), ((1135, 1159), 'numpy.maximum', 'np.maximum', (['(0.0)', 'anchors'], {}), '(0.0, anchors)\n', (1145, 1159), True, 'import numpy as np\n'), ((1724, 1769), 'tensorflow.truncated_normal_initializer', 'tf.truncated_normal_initializer', ([], {'stddev': '(0.001)'}), '(stddev=0.001)\n', (1755, 1769), True, 'import tensorflow as tf\n'), ((1790, 1848), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['feat', '(512)', '(3)'], {'weights_initializer': 'initializer'}), '(feat, 512, 3, weights_initializer=initializer)\n', (1801, 1848), True, 'import tensorflow.contrib.slim as slim\n'), ((1883, 1982), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv_feat', '(num_anchors * 4)', '(1)'], {'activation_fn': 'None', 'weights_initializer': 'initializer'}), '(conv_feat, num_anchors * 4, 1, activation_fn=None,\n weights_initializer=initializer)\n', (1894, 1982), True, 'import tensorflow.contrib.slim as slim\n'), ((2013, 2112), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['conv_feat', '(num_anchors * 2)', '(1)'], {'activation_fn': 'None', 'weights_initializer': 'initializer'}), '(conv_feat, num_anchors * 2, 1, activation_fn=None,\n weights_initializer=initializer)\n', (2024, 2112), True, 'import tensorflow.contrib.slim as slim\n'), ((3500, 3530), 'config.cfg.bbox_mean.reshape', 'cfg.bbox_mean.reshape', (['(1)', '(1)', '(4)'], {}), '(1, 1, 4)\n', (3521, 3530), False, 'from config import cfg\n'), ((5432, 5517), 'tensorflow.image.non_max_suppression', 'tf.image.non_max_suppression', (['nonms_box', 'nonms_probs', 'proposal_count', 'nms_thresh'], {}), '(nonms_box, nonms_probs, proposal_count, nms_thresh\n )\n', (5460, 5517), True, 'import tensorflow as tf\n'), ((5534, 5563), 'tensorflow.gather', 'tf.gather', (['nonms_box', 'indices'], {}), '(nonms_box, indices)\n', (5543, 5563), True, 'import tensorflow as tf\n'), ((7254, 7280), 'tensorflow.concat', 'tf.concat', (['outputs'], {'axis': '(0)'}), '(outputs, axis=0)\n', (7263, 7280), True, 'import tensorflow as tf\n'), ((7304, 7335), 'tensorflow.concat', 'tf.concat', (['original_ind'], {'axis': '(0)'}), '(original_ind, axis=0)\n', (7313, 7335), True, 'import tensorflow as tf\n'), ((7432, 7455), 'tensorflow.range', 'tf.range', (['num_total_box'], {}), '(num_total_box)\n', (7440, 7455), True, 'import tensorflow as tf\n'), ((7604, 7623), 'tensorflow.gather', 'tf.gather', (['out', 'ind'], {}), '(out, ind)\n', (7613, 7623), True, 'import tensorflow as tf\n'), ((600, 616), 'numpy.array', 'np.array', (['scales'], {}), '(scales)\n', (608, 616), True, 'import numpy as np\n'), ((2193, 2221), 'tensorflow.reshape', 'tf.reshape', (['anchors', '(-1, 4)'], {}), '(anchors, (-1, 4))\n', (2203, 2221), True, 'import tensorflow as tf\n'), ((2273, 2313), 'tensorflow.reshape', 'tf.reshape', (['loc', '(cfg.batch_size, -1, 4)'], {}), '(loc, (cfg.batch_size, -1, 4))\n', (2283, 2313), True, 'import tensorflow as tf\n'), ((2370, 2410), 'tensorflow.reshape', 'tf.reshape', (['cls', '(cfg.batch_size, -1, 2)'], {}), '(cls, (cfg.batch_size, -1, 2))\n', (2380, 2410), True, 'import tensorflow as tf\n'), ((3467, 3499), 'config.cfg.bbox_stddev.reshape', 'cfg.bbox_stddev.reshape', (['(1)', '(1)', '(4)'], {}), '(1, 1, 4)\n', (3490, 3499), False, 'from config import cfg\n'), ((3686, 3706), 'tensorflow.exp', 'tf.exp', (['loc[:, :, 2]'], {}), '(loc[:, :, 2])\n', (3692, 3706), True, 'import tensorflow as tf\n'), ((3755, 3775), 'tensorflow.exp', 'tf.exp', (['loc[:, :, 3]'], {}), '(loc[:, :, 3])\n', (3761, 3775), True, 'import tensorflow as tf\n'), ((4335, 4349), 'tensorflow.size', 'tf.size', (['probs'], {}), '(probs)\n', (4342, 4349), True, 'import tensorflow as tf\n'), ((5295, 5319), 'tensorflow.reshape', 'tf.reshape', (['box', '[-1, 4]'], {}), '(box, [-1, 4])\n', (5305, 5319), True, 'import tensorflow as tf\n'), ((5321, 5343), 'tensorflow.reshape', 'tf.reshape', (['prob', '[-1]'], {}), '(prob, [-1])\n', (5331, 5343), True, 'import tensorflow as tf\n'), ((5651, 5692), 'tensorflow.pad', 'tf.pad', (['proposals', '[[0, padding], [0, 0]]'], {}), '(proposals, [[0, padding], [0, 0]])\n', (5657, 5692), True, 'import tensorflow as tf\n'), ((6252, 6278), 'tensorflow.reshape', 'tf.reshape', (['boxes', '(-1, 4)'], {}), '(boxes, (-1, 4))\n', (6262, 6278), True, 'import tensorflow as tf\n'), ((6660, 6677), 'tensorflow.maximum', 'tf.maximum', (['(2)', 'ks'], {}), '(2, ks)\n', (6670, 6677), True, 'import tensorflow as tf\n'), ((6880, 6913), 'tensorflow.gather_nd', 'tf.gather_nd', (['boxes', 'filtered_ind'], {}), '(boxes, filtered_ind)\n', (6892, 6913), True, 'import tensorflow as tf\n'), ((6938, 6975), 'tensorflow.cast', 'tf.cast', (['filtered_ind[:, 0]', 'tf.int32'], {}), '(filtered_ind[:, 0], tf.int32)\n', (6945, 6975), True, 'import tensorflow as tf\n'), ((7048, 7150), 'tensorflow.image.crop_and_resize', 'tf.image.crop_and_resize', (['feats[i]', '(cur_boxes / cfg.image_size)', 'batch_ind', '[crop_size, crop_size]'], {}), '(feats[i], cur_boxes / cfg.image_size, batch_ind, [\n crop_size, crop_size])\n', (7072, 7150), True, 'import tensorflow as tf\n'), ((7382, 7404), 'tensorflow.shape', 'tf.shape', (['original_ind'], {}), '(original_ind)\n', (7390, 7404), True, 'import tensorflow as tf\n'), ((968, 988), 'numpy.arange', 'np.arange', (['feat_size'], {}), '(feat_size)\n', (977, 988), True, 'import numpy as np\n'), ((1044, 1064), 'numpy.arange', 'np.arange', (['feat_size'], {}), '(feat_size)\n', (1053, 1064), True, 'import numpy as np\n'), ((5597, 5616), 'tensorflow.shape', 'tf.shape', (['proposals'], {}), '(proposals)\n', (5605, 5616), True, 'import tensorflow as tf\n'), ((6322, 6363), 'tensorflow.range', 'tf.range', (['(cfg.batch_size * proposal_count)'], {}), '(cfg.batch_size * proposal_count)\n', (6330, 6363), True, 'import tensorflow as tf\n'), ((6566, 6582), 'tensorflow.constant', 'tf.constant', (['(2.0)'], {}), '(2.0)\n', (6577, 6582), True, 'import tensorflow as tf\n'), ((6609, 6621), 'tensorflow.round', 'tf.round', (['ks'], {}), '(ks)\n', (6617, 6621), True, 'import tensorflow as tf\n'), ((6836, 6854), 'tensorflow.equal', 'tf.equal', (['ks', 'curk'], {}), '(ks, curk)\n', (6844, 6854), True, 'import tensorflow as tf\n'), ((7534, 7572), 'tensorflow.nn.top_k', 'tf.nn.top_k', (['sort_ind'], {'k': 'num_total_box'}), '(sort_ind, k=num_total_box)\n', (7545, 7572), True, 'import tensorflow as tf\n'), ((6510, 6524), 'tensorflow.sqrt', 'tf.sqrt', (['(w * h)'], {}), '(w * h)\n', (6517, 6524), True, 'import tensorflow as tf\n')]
|
"""
Here we do inference on a DICOM volume, constructing the volume first, and then sending it to the
clinical archive
This code will do the following:
1. Identify the series to run HippoCrop.AI algorithm on from a folder containing multiple studies
2. Construct a NumPy volume from a set of DICOM files
3. Run inference on the constructed volume
4. Create report from the inference
5. Call a shell script to push report to the storage archive
"""
import os
import sys
import datetime
import time
import shutil
import subprocess
import numpy as np
import pydicom
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from inference.UNetInferenceAgent import UNetInferenceAgent
def load_dicom_volume_as_numpy_from_list(dcmlist):
"""Loads a list of PyDicom objects a Numpy array.
Assumes that only one series is in the array
Arguments:
dcmlist {list of PyDicom objects} -- path to directory
Returns:
tuple of (3D volume, header of the 1st image)
"""
slices = [np.flip(dcm.pixel_array).T for dcm in sorted(dcmlist, key=lambda dcm: dcm.InstanceNumber)]
hdr = dcmlist[0]
# zero-out Pixel Data since the users of this function are only interested in metadata
hdr.PixelData = None
return (np.stack(slices, 2), hdr)
def get_predicted_volumes(pred):
"""Gets volumes of two hippocampal structures from the predicted array
Arguments:
pred {Numpy array} -- array with labels. Assuming 0 is bg, 1 is anterior, 2 is posterior
Returns:
A dictionary with respective volumes
"""
volume_ant = np.sum(pred == 1)
volume_post = np.sum(pred == 2)
total_volume = np.sum(pred > 0)
return {"anterior": volume_ant, "posterior": volume_post, "total": total_volume}
def create_report(inference, header, orig_vol, pred_vol):
"""Generates an image with inference report
Arguments:
inference {Dictionary} -- dict containing anterior, posterior and full volume values
header {PyDicom Dataset} -- DICOM header
orig_vol {Numpy array} -- original volume
pred_vol {Numpy array} -- predicted label
Returns:
PIL image
"""
# The code below uses PIL image library to compose an RGB image that will go into the report
# A standard way of storing measurement data in DICOM archives is creating such report and
# sending them on as Secondary Capture IODs (http://dicom.nema.org/medical/dicom/current/output/chtml/part03/sect_A.8.html)
# Essentially, the report is just a standard RGB image, with some metadata, packed into
# DICOM format.
pimg = Image.new("RGB", (1000, 1000))
draw = ImageDraw.Draw(pimg)
header_font = ImageFont.truetype("assets/Roboto-Regular.ttf", size=40)
main_font = ImageFont.truetype("assets/Roboto-Regular.ttf", size=20)
# slice_nums = [orig_vol.shape[2]//3, orig_vol.shape[2]//2, orig_vol.shape[2]*3//4]
# Create the report and show information relevant to clinicians.
draw.text((50, 50), "HippoVolume.AI", (255, 255, 255), font=header_font)
draw.multiline_text((50, 140),
f"Patient ID: {header.PatientID} \n \
Study Description : {header.StudyDescription}\n \
Series Description: {header.SeriesDescription}\n \
Modality: {header.Modality}\n \
Image Type: {header.ImageType}\n \
Anterior Volume: {inference['anterior']}\n \
Posterior Volume: {inference['posterior']}\n \
Total Volume: {inference['total']}\n",
(255, 255, 255), font=main_font)
# Create a PIL image from array:
# Numpy array needs to flipped, transposed and normalized to a matrix of values in the range of [0..255]
nd_orig = np.flip((orig_vol[0, :, :]/np.max(orig_vol[0, :, :]))*0xff).T.astype(np.uint8)
# create a PIL image from numpy array
pil_orig = Image.fromarray(nd_orig, mode="L").convert("RGBA").resize((400, 400))
# paste the PIL image into the main report image object (pimg)
pimg.paste(pil_orig, box=(50, 500))
nd_pred = np.flip((pred_vol[0, :, :]/np.max(pred_vol[0, :, :]))*0xff).T.astype(np.uint8)
# create a PIL image from numpy array
pil_pred = Image.fromarray(nd_pred, mode="L").convert("RGBA").resize((400, 400))
# paste the PIL image into the main report image object (pimg)
pimg.paste(pil_pred, box=(550, 500))
return pimg
def save_report_as_dcm(header, report, path):
"""Writes the supplied image as a DICOM Secondary Capture file
Arguments:
header {PyDicom Dataset} -- original DICOM file header
report {PIL image} -- image representing the report
path {Where to save the report}
Returns:
N/A
"""
# create a DICOM Secondary Capture instance that will be correctly interpreted by most imaging viewers including OHIF
# Set up DICOM metadata fields. Most of them will be the same as original file header
out = pydicom.Dataset(header)
out.file_meta = pydicom.Dataset()
out.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian
out.is_little_endian = True
out.is_implicit_VR = False
# change class to Secondary Capture
out.SOPClassUID = "1.2.840.10008.5.1.4.1.1.7"
out.file_meta.MediaStorageSOPClassUID = out.SOPClassUID
# The report is a separate image series of one image
out.SeriesInstanceUID = pydicom.uid.generate_uid()
out.SOPInstanceUID = pydicom.uid.generate_uid()
out.file_meta.MediaStorageSOPInstanceUID = out.SOPInstanceUID
out.Modality = "OT"
out.SeriesDescription = "HippoVolume.AI"
out.Rows = report.height
out.Columns = report.width
out.ImageType = r"DERIVED\PRIMARY\AXIAL" # deriving this image from patient data
out.SamplesPerPixel = 3 # building an RGB image.
out.PhotometricInterpretation = "RGB"
out.PlanarConfiguration = 0 # bytes encode pixels as R1G1B1R2G2B2... as opposed to R1R2R3...G1G2G3...
out.BitsAllocated = 8 # using 8 bits/pixel
out.BitsStored = 8
out.HighBit = 7
out.PixelRepresentation = 0
# Set time and date
dt = datetime.date.today().strftime("%Y%m%d")
tm = datetime.datetime.now().strftime("%H%M%S")
out.StudyDate = dt
out.StudyTime = tm
out.SeriesDate = dt
out.SeriesTime = tm
out.ImagesInAcquisition = 1
# empty these since most viewers will then default to auto W/L
out.WindowCenter = ""
out.WindowWidth = ""
# Data imprinted directly into image pixels is called "burned in annotation"
out.BurnedInAnnotation = "YES"
out.PixelData = report.tobytes()
pydicom.filewriter.dcmwrite(path, out, write_like_original=False)
# path = '../TestVolumes'
def get_series_for_inference(path):
"""Reads multiple series from one folder and picks the one
to run inference on.
Arguments:
path {string} -- location of the DICOM files
Returns:
Numpy array representing the series
"""
series_path = [dir for dir, subdirs, files in os.walk(path) if 'HCropVolume' in dir]
chosen_path = np.random.choice(series_path)
series_for_inference = [pydicom.dcmread(os.path.join(chosen_path, f)) for f in os.listdir(chosen_path)]
# Check if there are more than one series (using set comprehension).
if len({f.SeriesInstanceUID for f in series_for_inference}) != 1:
print("Error: can not figure out what series to run inference on")
return []
return series_for_inference
def os_command(command):
# Comment this if running under Windows
sp = subprocess.Popen(["/bin/bash", "-i", "-c", command])
sp.communicate()
# Uncomment this if running under Windows
# os.system(command)
if __name__ == "__main__":
# This code expects a single command line argument with link to the directory containing
# routed studies
if len(sys.argv) != 2:
print("You should supply one command line argument pointing to the routing folder. Exiting.")
sys.exit()
# Find all subdirectories within the supplied directory. We assume that
# one subdirectory contains a full study
subdirs = [os.path.join(sys.argv[1], d) for d in os.listdir(sys.argv[1]) if
os.path.isdir(os.path.join(sys.argv[1], d))]
# Get the latest directory
study_dir = sorted(subdirs, key=lambda dir: os.stat(dir).st_mtime, reverse=True)[0]
print(f"Looking for series to run inference on in directory {study_dir}...")
volume, header = load_dicom_volume_as_numpy_from_list(get_series_for_inference(study_dir))
print(f"Found series of {volume.shape[2]} axial slices")
print("HippoVolume.AI: Running inference...")
# Use the UNetInferenceAgent class and model parameter file from the previous section
inference_agent = UNetInferenceAgent(
device="cpu",
parameter_file_path=r"")
# Run inference
pred_label = inference_agent.single_volume_inference_unpadded(np.array(volume), 64)
pred_volumes = get_predicted_volumes(pred_label)
# Create and save the report
print("Creating and pushing report...")
report_save_path = r"../out/report.dcm"
report_img = create_report(pred_volumes, header, volume, pred_label)
save_report_as_dcm(header, report_img, report_save_path)
# Send report to the storage archive
os_command("sudo storescu localhost 4242 -v -aec HIPPOAI +r +sd ../out/report.dcm")
# remove the study dir if run as root user
# sleep to let the StoreSCP server process the report
# the main archive is routing everyting that is sent to it, including thefreshly generated report
# I want to give it time to save before cleaning it up
time.sleep(2)
shutil.rmtree(study_dir, onerror=lambda f, p, e: print(f"Error deleting: {e[1]}"))
print(f"Inference successful on {header['SOPInstanceUID'].value}, out: {pred_label.shape}",
f"volume ant: {pred_volumes['anterior']}, ",
f"volume post: {pred_volumes['posterior']}, total volume: {pred_volumes['total']}")
|
[
"PIL.Image.new",
"numpy.sum",
"os.walk",
"pydicom.Dataset",
"os.path.join",
"numpy.max",
"pydicom.filewriter.dcmwrite",
"numpy.random.choice",
"PIL.ImageDraw.Draw",
"datetime.datetime.now",
"numpy.stack",
"subprocess.Popen",
"os.stat",
"datetime.date.today",
"time.sleep",
"os.listdir",
"sys.exit",
"inference.UNetInferenceAgent.UNetInferenceAgent",
"numpy.flip",
"PIL.ImageFont.truetype",
"numpy.array",
"PIL.Image.fromarray",
"pydicom.uid.generate_uid"
] |
[((1621, 1638), 'numpy.sum', 'np.sum', (['(pred == 1)'], {}), '(pred == 1)\n', (1627, 1638), True, 'import numpy as np\n'), ((1657, 1674), 'numpy.sum', 'np.sum', (['(pred == 2)'], {}), '(pred == 2)\n', (1663, 1674), True, 'import numpy as np\n'), ((1694, 1710), 'numpy.sum', 'np.sum', (['(pred > 0)'], {}), '(pred > 0)\n', (1700, 1710), True, 'import numpy as np\n'), ((2648, 2678), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(1000, 1000)'], {}), "('RGB', (1000, 1000))\n", (2657, 2678), False, 'from PIL import Image\n'), ((2690, 2710), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['pimg'], {}), '(pimg)\n', (2704, 2710), False, 'from PIL import ImageDraw\n'), ((2730, 2786), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""assets/Roboto-Regular.ttf"""'], {'size': '(40)'}), "('assets/Roboto-Regular.ttf', size=40)\n", (2748, 2786), False, 'from PIL import ImageFont\n'), ((2803, 2859), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""assets/Roboto-Regular.ttf"""'], {'size': '(20)'}), "('assets/Roboto-Regular.ttf', size=20)\n", (2821, 2859), False, 'from PIL import ImageFont\n'), ((5131, 5154), 'pydicom.Dataset', 'pydicom.Dataset', (['header'], {}), '(header)\n', (5146, 5154), False, 'import pydicom\n'), ((5176, 5193), 'pydicom.Dataset', 'pydicom.Dataset', ([], {}), '()\n', (5191, 5193), False, 'import pydicom\n'), ((5568, 5594), 'pydicom.uid.generate_uid', 'pydicom.uid.generate_uid', ([], {}), '()\n', (5592, 5594), False, 'import pydicom\n'), ((5620, 5646), 'pydicom.uid.generate_uid', 'pydicom.uid.generate_uid', ([], {}), '()\n', (5644, 5646), False, 'import pydicom\n'), ((6786, 6851), 'pydicom.filewriter.dcmwrite', 'pydicom.filewriter.dcmwrite', (['path', 'out'], {'write_like_original': '(False)'}), '(path, out, write_like_original=False)\n', (6813, 6851), False, 'import pydicom\n'), ((7250, 7279), 'numpy.random.choice', 'np.random.choice', (['series_path'], {}), '(series_path)\n', (7266, 7279), True, 'import numpy as np\n'), ((7739, 7791), 'subprocess.Popen', 'subprocess.Popen', (["['/bin/bash', '-i', '-c', command]"], {}), "(['/bin/bash', '-i', '-c', command])\n", (7755, 7791), False, 'import subprocess\n'), ((8961, 9017), 'inference.UNetInferenceAgent.UNetInferenceAgent', 'UNetInferenceAgent', ([], {'device': '"""cpu"""', 'parameter_file_path': '""""""'}), "(device='cpu', parameter_file_path='')\n", (8979, 9017), False, 'from inference.UNetInferenceAgent import UNetInferenceAgent\n'), ((9856, 9869), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (9866, 9869), False, 'import time\n'), ((1289, 1308), 'numpy.stack', 'np.stack', (['slices', '(2)'], {}), '(slices, 2)\n', (1297, 1308), True, 'import numpy as np\n'), ((8164, 8174), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8172, 8174), False, 'import sys\n'), ((8313, 8341), 'os.path.join', 'os.path.join', (['sys.argv[1]', 'd'], {}), '(sys.argv[1], d)\n', (8325, 8341), False, 'import os\n'), ((9123, 9139), 'numpy.array', 'np.array', (['volume'], {}), '(volume)\n', (9131, 9139), True, 'import numpy as np\n'), ((1047, 1071), 'numpy.flip', 'np.flip', (['dcm.pixel_array'], {}), '(dcm.pixel_array)\n', (1054, 1071), True, 'import numpy as np\n'), ((6287, 6308), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (6306, 6308), False, 'import datetime\n'), ((6337, 6360), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6358, 6360), False, 'import datetime\n'), ((7193, 7206), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (7200, 7206), False, 'import os\n'), ((7326, 7354), 'os.path.join', 'os.path.join', (['chosen_path', 'f'], {}), '(chosen_path, f)\n', (7338, 7354), False, 'import os\n'), ((7365, 7388), 'os.listdir', 'os.listdir', (['chosen_path'], {}), '(chosen_path)\n', (7375, 7388), False, 'import os\n'), ((8351, 8374), 'os.listdir', 'os.listdir', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (8361, 8374), False, 'import os\n'), ((8408, 8436), 'os.path.join', 'os.path.join', (['sys.argv[1]', 'd'], {}), '(sys.argv[1], d)\n', (8420, 8436), False, 'import os\n'), ((4054, 4088), 'PIL.Image.fromarray', 'Image.fromarray', (['nd_orig'], {'mode': '"""L"""'}), "(nd_orig, mode='L')\n", (4069, 4088), False, 'from PIL import Image\n'), ((4386, 4420), 'PIL.Image.fromarray', 'Image.fromarray', (['nd_pred'], {'mode': '"""L"""'}), "(nd_pred, mode='L')\n", (4401, 4420), False, 'from PIL import Image\n'), ((8519, 8531), 'os.stat', 'os.stat', (['dir'], {}), '(dir)\n', (8526, 8531), False, 'import os\n'), ((3945, 3970), 'numpy.max', 'np.max', (['orig_vol[0, :, :]'], {}), '(orig_vol[0, :, :])\n', (3951, 3970), True, 'import numpy as np\n'), ((4277, 4302), 'numpy.max', 'np.max', (['pred_vol[0, :, :]'], {}), '(pred_vol[0, :, :])\n', (4283, 4302), True, 'import numpy as np\n')]
|
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import dragon
import warnings
from dragon.core.tensor import Tensor
from dragon.core.tensor_utils import FromShape
from dragon.operators.rnn.rnn_param import RNNParamSet
class RNNBase(object):
"""A simple class wrapping general RNN ops."""
def __init__(self,
mode, input_size, hidden_size, num_layers=1,
bidirectional=False, dropout=0, name=None):
eligible_rnn_modes = ('rnn_tanh', 'rnn_relu', 'lstm', 'gru')
if mode.lower() not in eligible_rnn_modes:
raise ValueError('Unknown rnn mode: {}.'
'\n<RecurrentOp> supports the following rnn modes: {{\n{}\n}}'.format(
mode, ',\n'.join([' * ' + emode for emode in eligible_rnn_modes])))
if dropout > 0 and num_layers == 1:
warnings.warn("Add dropout to single-layer RNN is meaningless.")
self.mode = mode.lower()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.dropout = dropout if dropout > 0 else None
self.bidirectional = bidirectional
self.num_directions = 2 if bidirectional else 1
self.name = name
self._init_params = False
self._plan_params()
def _plan_params(self):
if self.mode == 'lstm': gate_size = 4 * self.hidden_size
elif self.mode == 'gru': gate_size = 3 * self.hidden_size
else: gate_size = self.hidden_size
# 1. Plan weights
self._matrix_shape, self._bias_shape = [], []
for layer in range(self.num_layers):
for direction in range(self.num_directions):
layer_input_size = self.input_size if layer == 0 \
else self.hidden_size * self.num_directions
w_ih_shape = [gate_size, layer_input_size]
w_hh_shape = [gate_size, self.hidden_size]
b_ih_shape, b_hh_shape = [gate_size], [gate_size]
# W (0 ~ 3), R (4 ~ 7)
self._matrix_shape.extend([w_ih_shape, w_hh_shape])
# Bw (0 ~ 3), Br (4 ~ 7)
self._bias_shape.extend([b_ih_shape, b_hh_shape])
# 2. Compute total number of parameters
self._weights_count = 0
for shape in self._matrix_shape + self._bias_shape:
self._weights_count += numpy.prod(shape)
# 3. Register the packed weights
self.weights = FromShape(shape=[self._weights_count],
name=self.name + '/weights' if self.name else None)
# 4. Create the initialization grids
if self.mode == 'lstm': num_params_per_layer = 8
elif self.mode == 'gru': num_params_per_layer = 6
else: num_params_per_layer = 2
self._matrix_init_grids = [
[['orthogonal' for _ in range(num_params_per_layer)]
for _ in range(self.num_directions)]
for _ in range(self.num_layers)
]
self._bias_init_grids = [
[['zero' for _ in range(num_params_per_layer)]
for _ in range(self.num_directions)]
for _ in range(self.num_layers)
]
##############################################
# #
# INITIALIZER #
# #
##############################################
def _uniform_init(self, shape, dtype='float32'):
stdv = 1.0 / numpy.sqrt(self.hidden_size)
return numpy.random.uniform(-stdv, stdv, shape).astype(dtype)
def _orthogonal_init(self, shape, gain=1, dtype='float32'):
num_rows = 1
for dim in shape[:-1]: num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols \
else (num_rows, num_cols)
W = numpy.random.randn(*flat_shape)
q, r = numpy.linalg.qr(W)
# Make Q uniform
d = numpy.diag(r)
q *= numpy.sign(d)
if num_rows < num_cols: q = q.T
return gain * q.reshape(shape).astype(dtype)
def _zero_init(self, shape, dtype='float32'):
return numpy.zeros(shape, dtype=dtype)
##############################################
# #
# PARAMETERS #
# #
##############################################
def set_param(self, layer=0, direction=0, param_id=0,
type='matrix', initializer=None):
if type == 'matrix':
self._matrix_init_grids[layer][direction][param_id] = initializer
elif type == 'bias':
self._bias_init_grids[layer][direction][param_id] = initializer
else:
raise ValueError('Unknown param type: ' + type)
def _set_param(self, layer_id, param_id, param_type, param):
if isinstance(param, numpy.ndarray):
param_temp = dragon.Tensor.Ref('/tmp/rnn_param')
param_temp.set_value(param)
param = param_temp
else: raise ValueError('Excepted a numpy array.')
self.weights.expressions = dict() # Clear cached expressions
outputs = RNNParamSet([self.weights, param], layer_id, param_id, param_type,
rnn_mode=self.mode, input_size=self.input_size, hidden_size=self.hidden_size,
num_layers=self.num_layers, num_directions=self.num_directions)
for k, v in outputs.expressions.items(): dragon.workspace.RunOperator(v)
def _reset_params(self):
numpy.random.seed(dragon.config.GetRandomSeed())
if self.mode == 'lstm': num_gates = 4
elif self.mode == 'gru': num_gates = 3
else: num_gates = 1
weights_states = self.weights.expressions.copy()
for layer in range(len(self._matrix_init_grids)):
for direction in range(len(self._matrix_init_grids[0])):
for param_id in range(len(self._matrix_init_grids[0][0])):
matrix_init = self._matrix_init_grids[layer][direction][param_id]
bias_init = self._bias_init_grids[layer][direction][param_id]
if isinstance(matrix_init, str):
matrix_init = getattr(self, '_{}_init'.format(matrix_init))
if isinstance(bias_init, str):
bias_init = getattr(self, '_{}_init'.format(bias_init))
pseudo_layer_id = layer * self.num_directions + direction
packed_id = pseudo_layer_id * 2 + int(param_id / num_gates)
matrix_shape = self._matrix_shape[packed_id][:]
bias_shape = self._bias_shape[packed_id][:]
matrix_shape[0] = bias_shape[0] = int(matrix_shape[0] / num_gates)
self._set_param(layer_id=pseudo_layer_id, param_id=param_id,
param_type='matrix', param=matrix_init(matrix_shape))
self._set_param(layer_id=pseudo_layer_id, param_id=param_id,
param_type='bias', param=bias_init(bias_shape))
self.weights.expressions = weights_states
self._init_params = True
def create(self, x, hx=None, cx=None,
required_hidden=True, required_cell=False):
"""Return outputs of this rnn.
Parameters
----------
x : Tensor
The input tensor.
hx : Tensor, optional
The h(0) state.
cx : Tensor, optional
The c(0) state.
required_hidden : bool, optional
Return ``y`` and ``hidden`` if ``True``.
required_hidden : bool, optional
Return ``y``, ``hidden``, ``cell`` if ``True``.
"""
if hx and not isinstance(hx, Tensor):
raise TypeError('Excepted hx as a Tensor, got {}.'.format(type(hx)))
if cx and not isinstance(cx, Tensor):
raise TypeError('Excepted cx as a Tensor, got {}.'.format(type(cx)))
if not self._init_params: self._reset_params()
arguments = {
'op_type': 'Recurrent',
'inputs': [x, self.weights] +
([hx] if hx else []) +
([cx] if cx else []),
'hidden_size': self.hidden_size,
'num_layers': self.num_layers,
'bidirectional': self.bidirectional,
'rnn_mode': self.mode,
'rnn_input_mode': 'linear',
'dropout_ratio': self.dropout,
}
if required_cell: num_outputs = 3
elif required_hidden: num_outputs = 2
else: num_outputs = 1
return Tensor.CreateOperator(num_outputs=num_outputs, **arguments)
def __call__(self, *args, **kwargs):
return self.create(*args, **kwargs)
|
[
"numpy.random.uniform",
"numpy.random.randn",
"dragon.Tensor.Ref",
"numpy.linalg.qr",
"numpy.zeros",
"numpy.prod",
"dragon.config.GetRandomSeed",
"dragon.operators.rnn.rnn_param.RNNParamSet",
"dragon.workspace.RunOperator",
"numpy.sign",
"numpy.diag",
"numpy.sqrt",
"warnings.warn",
"dragon.core.tensor_utils.FromShape",
"dragon.core.tensor.Tensor.CreateOperator"
] |
[((2918, 3013), 'dragon.core.tensor_utils.FromShape', 'FromShape', ([], {'shape': '[self._weights_count]', 'name': "(self.name + '/weights' if self.name else None)"}), "(shape=[self._weights_count], name=self.name + '/weights' if self.\n name else None)\n", (2927, 3013), False, 'from dragon.core.tensor_utils import FromShape\n'), ((4355, 4386), 'numpy.random.randn', 'numpy.random.randn', (['*flat_shape'], {}), '(*flat_shape)\n', (4373, 4386), False, 'import numpy\n'), ((4402, 4420), 'numpy.linalg.qr', 'numpy.linalg.qr', (['W'], {}), '(W)\n', (4417, 4420), False, 'import numpy\n'), ((4458, 4471), 'numpy.diag', 'numpy.diag', (['r'], {}), '(r)\n', (4468, 4471), False, 'import numpy\n'), ((4485, 4498), 'numpy.sign', 'numpy.sign', (['d'], {}), '(d)\n', (4495, 4498), False, 'import numpy\n'), ((4658, 4689), 'numpy.zeros', 'numpy.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (4669, 4689), False, 'import numpy\n'), ((5731, 5948), 'dragon.operators.rnn.rnn_param.RNNParamSet', 'RNNParamSet', (['[self.weights, param]', 'layer_id', 'param_id', 'param_type'], {'rnn_mode': 'self.mode', 'input_size': 'self.input_size', 'hidden_size': 'self.hidden_size', 'num_layers': 'self.num_layers', 'num_directions': 'self.num_directions'}), '([self.weights, param], layer_id, param_id, param_type, rnn_mode\n =self.mode, input_size=self.input_size, hidden_size=self.hidden_size,\n num_layers=self.num_layers, num_directions=self.num_directions)\n', (5742, 5948), False, 'from dragon.operators.rnn.rnn_param import RNNParamSet\n'), ((9182, 9241), 'dragon.core.tensor.Tensor.CreateOperator', 'Tensor.CreateOperator', ([], {'num_outputs': 'num_outputs'}), '(num_outputs=num_outputs, **arguments)\n', (9203, 9241), False, 'from dragon.core.tensor import Tensor\n'), ((1292, 1356), 'warnings.warn', 'warnings.warn', (['"""Add dropout to single-layer RNN is meaningless."""'], {}), "('Add dropout to single-layer RNN is meaningless.')\n", (1305, 1356), False, 'import warnings\n'), ((2835, 2852), 'numpy.prod', 'numpy.prod', (['shape'], {}), '(shape)\n', (2845, 2852), False, 'import numpy\n'), ((3976, 4004), 'numpy.sqrt', 'numpy.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (3986, 4004), False, 'import numpy\n'), ((5479, 5514), 'dragon.Tensor.Ref', 'dragon.Tensor.Ref', (['"""/tmp/rnn_param"""'], {}), "('/tmp/rnn_param')\n", (5496, 5514), False, 'import dragon\n'), ((6013, 6044), 'dragon.workspace.RunOperator', 'dragon.workspace.RunOperator', (['v'], {}), '(v)\n', (6041, 6044), False, 'import dragon\n'), ((6101, 6130), 'dragon.config.GetRandomSeed', 'dragon.config.GetRandomSeed', ([], {}), '()\n', (6128, 6130), False, 'import dragon\n'), ((4020, 4060), 'numpy.random.uniform', 'numpy.random.uniform', (['(-stdv)', 'stdv', 'shape'], {}), '(-stdv, stdv, shape)\n', (4040, 4060), False, 'import numpy\n')]
|
"""
Routines for plotting time-dependent vertical profiles.
"""
import numpy
import matplotlib.pyplot as plt
import cf_units
import matplotlib
import os
import iris
from . import utility
import matplotlib.dates as mdates
__all__ = [
'plot_timeprofile',
'make_timeprofile_plot',
'save_timeprofile_figure',
]
log_scale_vars = [
'specific_turbulent_kinetic_energy_of_sea_water',
'specific_turbulent_kinetic_energy_dissipation_in_sea_water',
'ocean_vertical_heat_diffusivity',
'ocean_vertical_momentum_diffusivity',
]
symmetric_vars = [
'sea_water_x_velocity',
'sea_water_y_velocity',
'upward_sea_water_velocity',
]
var_short_name = {
'specific_turbulent_kinetic_energy_of_sea_water': 'tke',
'specific_turbulent_kinetic_energy_dissipation_in_sea_water': 'tke dissipation rate',
'ocean_vertical_heat_diffusivity': 'eddy diffusivity',
'ocean_vertical_momentum_diffusivity': 'eddy viscosity',
'sea_water_absolute_salinity': 'absolute salinity',
}
def get_grid(cube, coordname):
coord = cube.coord(coordname)
if not coord.has_bounds():
coord.guess_bounds()
x = numpy.hstack((coord.bounds[:, 0], coord.bounds[[-1], 1]))
return x
def get_plot_time(cube):
epoch_time_units = cf_units.Unit(
'seconds since 1970-01-01 00:00:00-00', calendar='gregorian')
time_coord = cube.coord('time')
time_coord.convert_units(epoch_time_units)
t_epoch = get_grid(cube, 'time')
t = matplotlib.dates.epoch2num(t_epoch)
return t
def plot_timeprofile(cube, ax, title=None,
start_time=None, end_time=None,
log_scale=False, symmetric_scale=False,
label_alias=None,
norm=None,
cmap=None, vmin=None, vmax=None, colorbar=True):
"""
Plot a single cube in the given axes.
"""
fig = ax.figure
if start_time is not None or end_time is not None:
_cube = utility.constrain_cube_time(cube,
start_time=start_time,
end_time=end_time)
else:
_cube = cube
_cube = utility.crop_invalid_depths(_cube)
z = -get_grid(_cube, 'depth')
t = get_plot_time(_cube)
_log_scale = log_scale or _cube.standard_name in log_scale_vars
_symmetric_scale = symmetric_scale or _cube.standard_name in symmetric_vars
if vmin is None:
vmin = _cube.data.min()
if vmax is None:
vmax = _cube.data.max()
if _log_scale:
vmin = max(vmin, 1e-12)
vmax = max(vmax, 1e-12)
if _symmetric_scale:
abs_lim = max(abs(vmin), abs(vmax))
vmin = -abs_lim
vmax = abs_lim
if _log_scale and norm is None:
norm = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax)
if cube.attributes['dataset_id'][:5] == 'diff:':
# this must be a diff field
cmap = plt.get_cmap('RdBu_r')
val_max = numpy.nanmax(numpy.abs(cube.data))
vmin = -val_max
vmax = val_max
norm = None
p = ax.pcolormesh(t, z, _cube.data.T, vmin=vmin, vmax=vmax, cmap=cmap,
norm=norm)
xlim = ax.get_xlim()
range_days = xlim[1] - xlim[0]
if range_days < 15:
major_locator = mdates.DayLocator()
minor_locator = mdates.HourLocator(interval=6)
elif range_days < 30:
major_locator = mdates.DayLocator([1, 5, 10, 15, 20, 25])
minor_locator = mdates.DayLocator()
elif range_days < 80:
major_locator = mdates.DayLocator([1, 10, 20])
minor_locator = mdates.DayLocator()
elif range_days < 200:
major_locator = mdates.DayLocator([1, 15])
minor_locator = mdates.DayLocator()
elif range_days < 370:
major_locator = mdates.MonthLocator()
minor_locator = mdates.DayLocator([1, 5, 10, 15, 20, 25])
else:
major_locator = mdates.AutoDateLocator(minticks=7, maxticks=16,
interval_multiples=False)
minor_locator = mdates.MonthLocator(bymonthday=[1, 15], interval=1)
ax.xaxis.set_major_locator(major_locator)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
ax.xaxis.set_minor_locator(minor_locator)
ax.tick_params(axis='x', which='major', length=7)
ax.grid(which='major', linewidth=0.7, color='0.7')
ax.grid(which='minor', linestyle='dashed', linewidth=0.3, color='0.7')
ax.autoscale(enable=True, axis='x', tight=True)
# loc = matplotlib.dates.AutoDateLocator()
# fmt = matplotlib.dates.AutoDateFormatter(loc)
# ax.xaxis.set_major_locator(loc)
# ax.xaxis.set_major_formatter(fmt)
# ax.autoscale(enable=True, axis='x', tight=True)
fig.autofmt_xdate()
depth_coord = _cube.coord('depth')
ylabel = '{:} [{:}]'.format(depth_coord.name().capitalize(),
depth_coord.units)
if title is None:
loc = _cube.attributes['location_name']
data_id = _cube.attributes['dataset_id']
if label_alias is not None:
data_id = label_alias.get(data_id, data_id)
title = ' '.join([loc, data_id])
ax.set_title(title)
ax.set_ylabel(ylabel)
if colorbar:
# create colorbar
pad = 0.015
width = 0.02
pos = ax.get_position().bounds
x = pos[0] + pos[2] + pad
cax = fig.add_axes([x, pos[1], width, pos[3]])
cb = plt.colorbar(p, cax=cax)
var_name = _cube.name()
var_name = var_short_name.get(var_name, var_name)
var_name = var_name.replace('_', ' ').capitalize()
label = '{:} [{:}]'.format(var_name, _cube.units)
cb.set_label(label)
def make_timeprofile_plot(cube_list, **kwargs):
_cube_list = list(cube_list)
if 'vmin' not in kwargs or kwargs['vmin'] is None:
kwargs['vmin'] = numpy.min([numpy.nanmin(c.data) for c in _cube_list])
if 'vmax' not in kwargs or kwargs['vmax'] is None:
kwargs['vmax'] = numpy.max([numpy.nanmax(c.data) for c in _cube_list])
plot_diff = kwargs.pop('plot_diff', False)
if plot_diff:
# compute difference between first 2 cubes
[c.data for c in _cube_list] # force real data (looks like iris bug)
# first cube is the observation
a = _cube_list[0].copy()
b = _cube_list[1].copy()
if not a.is_compatible(b):
loc = a.attributes['location_name']
a_id = a.attributes['dataset_id']
b_id = b.attributes['dataset_id']
print(f'diff: {loc} interpolating {b_id} data on {a_id} grid')
# interpolate on common grid
a.remove_coord('latitude')
a.remove_coord('longitude')
# second cube is the model
b.remove_coord('latitude')
b.remove_coord('longitude')
# interpolate depth
obs_depth = a.coord('depth').points
b = b.interpolate([('depth', obs_depth)], iris.analysis.Nearest())
# interpolate time
b.coord('time').convert_units(a.coord('time').units)
obs_time = a.coord('time').points
b = b.interpolate([('time', obs_time)], iris.analysis.Linear())
# make sure time metadata is exactly the same
b.remove_coord('time')
b.add_dim_coord(a.coord('time'), 0)
diff = b - a
assert numpy.abs(diff.data).max() < 1e10, 'Bad values in diff field'
location_name = a.attributes['location_name']
diff.attributes['location_name'] = location_name
id_list = [c.attributes['dataset_id'] for c in [b, a]]
diff.attributes['dataset_id'] = 'diff:{:}-{:}'.format(*id_list)
diff.standard_name = a.standard_name
diff.long_name = 'diff:' + a.standard_name
diff.units = a.units
_cube_list.append(diff)
ncubes = len(_cube_list)
plot_height = 3.5
fig = plt.figure(figsize=(12, ncubes * plot_height))
sharey = kwargs.pop('share_y_axis', True)
ax_list = fig.subplots(ncubes, 1, sharex=True, sharey=sharey)
if ncubes == 1:
ax_list = [ax_list]
for cube, ax in zip(_cube_list, ax_list):
plot_timeprofile(cube, ax, **kwargs)
return fig, ax_list
def save_timeprofile_figure(cube_list, output_dir=None, plot_root_dir=None, **kwargs):
"""
Makes a default time profile plot and saves it to disk.
"""
time_extent = kwargs.pop('time_extent', None)
start_time = kwargs.pop('start_time', None)
end_time = kwargs.pop('end_time', None)
imgfile = kwargs.pop('filename', None)
if start_time is None and end_time is None and time_extent is not None:
start_time, end_time = utility.get_common_time_overlap(cube_list,
time_extent)
fig, ax_list = make_timeprofile_plot(
cube_list, start_time=start_time, end_time=end_time, **kwargs)
if imgfile is None:
imgfile = utility.generate_img_filename(cube_list,
output_dir=output_dir,
root_dir=plot_root_dir,
start_time=start_time,
end_time=end_time)
dir, filename = os.path.split(imgfile)
utility.create_directory(dir)
print('Saving image {:}'.format(imgfile))
fig.savefig(imgfile, dpi=200, bbox_inches='tight')
plt.close(fig)
|
[
"matplotlib.dates.MonthLocator",
"numpy.abs",
"matplotlib.dates.epoch2num",
"iris.analysis.Nearest",
"matplotlib.pyplot.figure",
"matplotlib.colors.LogNorm",
"matplotlib.dates.HourLocator",
"cf_units.Unit",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"matplotlib.dates.DateFormatter",
"iris.analysis.Linear",
"matplotlib.pyplot.get_cmap",
"numpy.hstack",
"numpy.nanmax",
"matplotlib.dates.DayLocator",
"numpy.nanmin",
"matplotlib.dates.AutoDateLocator",
"os.path.split"
] |
[((1139, 1196), 'numpy.hstack', 'numpy.hstack', (['(coord.bounds[:, 0], coord.bounds[[-1], 1])'], {}), '((coord.bounds[:, 0], coord.bounds[[-1], 1]))\n', (1151, 1196), False, 'import numpy\n'), ((1260, 1335), 'cf_units.Unit', 'cf_units.Unit', (['"""seconds since 1970-01-01 00:00:00-00"""'], {'calendar': '"""gregorian"""'}), "('seconds since 1970-01-01 00:00:00-00', calendar='gregorian')\n", (1273, 1335), False, 'import cf_units\n'), ((1473, 1508), 'matplotlib.dates.epoch2num', 'matplotlib.dates.epoch2num', (['t_epoch'], {}), '(t_epoch)\n', (1499, 1508), False, 'import matplotlib\n'), ((7953, 7999), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, ncubes * plot_height)'}), '(figsize=(12, ncubes * plot_height))\n', (7963, 7999), True, 'import matplotlib.pyplot as plt\n'), ((9351, 9373), 'os.path.split', 'os.path.split', (['imgfile'], {}), '(imgfile)\n', (9364, 9373), False, 'import os\n'), ((9514, 9528), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9523, 9528), True, 'import matplotlib.pyplot as plt\n'), ((2786, 2833), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (2811, 2833), False, 'import matplotlib\n'), ((2939, 2961), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (2951, 2961), True, 'import matplotlib.pyplot as plt\n'), ((3300, 3319), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (3317, 3319), True, 'import matplotlib.dates as mdates\n'), ((3344, 3374), 'matplotlib.dates.HourLocator', 'mdates.HourLocator', ([], {'interval': '(6)'}), '(interval=6)\n', (3362, 3374), True, 'import matplotlib.dates as mdates\n'), ((4208, 4240), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y-%m-%d"""'], {}), "('%Y-%m-%d')\n", (4228, 4240), True, 'import matplotlib.dates as mdates\n'), ((5470, 5494), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['p'], {'cax': 'cax'}), '(p, cax=cax)\n', (5482, 5494), True, 'import matplotlib.pyplot as plt\n'), ((2993, 3013), 'numpy.abs', 'numpy.abs', (['cube.data'], {}), '(cube.data)\n', (3002, 3013), False, 'import numpy\n'), ((3425, 3466), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', (['[1, 5, 10, 15, 20, 25]'], {}), '([1, 5, 10, 15, 20, 25])\n', (3442, 3466), True, 'import matplotlib.dates as mdates\n'), ((3491, 3510), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (3508, 3510), True, 'import matplotlib.dates as mdates\n'), ((3561, 3591), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', (['[1, 10, 20]'], {}), '([1, 10, 20])\n', (3578, 3591), True, 'import matplotlib.dates as mdates\n'), ((3616, 3635), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (3633, 3635), True, 'import matplotlib.dates as mdates\n'), ((5905, 5925), 'numpy.nanmin', 'numpy.nanmin', (['c.data'], {}), '(c.data)\n', (5917, 5925), False, 'import numpy\n'), ((6039, 6059), 'numpy.nanmax', 'numpy.nanmax', (['c.data'], {}), '(c.data)\n', (6051, 6059), False, 'import numpy\n'), ((7006, 7029), 'iris.analysis.Nearest', 'iris.analysis.Nearest', ([], {}), '()\n', (7027, 7029), False, 'import iris\n'), ((7225, 7247), 'iris.analysis.Linear', 'iris.analysis.Linear', ([], {}), '()\n', (7245, 7247), False, 'import iris\n'), ((3687, 3713), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', (['[1, 15]'], {}), '([1, 15])\n', (3704, 3713), True, 'import matplotlib.dates as mdates\n'), ((3738, 3757), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (3755, 3757), True, 'import matplotlib.dates as mdates\n'), ((7426, 7446), 'numpy.abs', 'numpy.abs', (['diff.data'], {}), '(diff.data)\n', (7435, 7446), False, 'import numpy\n'), ((3809, 3830), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (3828, 3830), True, 'import matplotlib.dates as mdates\n'), ((3855, 3896), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', (['[1, 5, 10, 15, 20, 25]'], {}), '([1, 5, 10, 15, 20, 25])\n', (3872, 3896), True, 'import matplotlib.dates as mdates\n'), ((3931, 4004), 'matplotlib.dates.AutoDateLocator', 'mdates.AutoDateLocator', ([], {'minticks': '(7)', 'maxticks': '(16)', 'interval_multiples': '(False)'}), '(minticks=7, maxticks=16, interval_multiples=False)\n', (3953, 4004), True, 'import matplotlib.dates as mdates\n'), ((4076, 4127), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {'bymonthday': '[1, 15]', 'interval': '(1)'}), '(bymonthday=[1, 15], interval=1)\n', (4095, 4127), True, 'import matplotlib.dates as mdates\n')]
|
import sys
import numpy
def solve(M, call, value, end, garments, mat):
aux = []
M -= value
if M < 0:
return sys.maxsize
if call == end:
return M
if(mat[M][call] != -1):
return mat[M][call]
aux = [int(solve(M, call+1, a, end, garments, mat)) for a in garments[call]]
mat[M][call] = min(aux)
return mat[M][call]
def main(argv):
n = int(input())
for _ in range(n):
garments = []
(M, ngarments) = [int(a) for a in input().split(' ')]
mat = numpy.zeros(shape=(201, 21))
numpy.place(mat, mat == 0, -1)
for i in range(ngarments):
garments.append([int(a) for a in input().split(' ')][1:])
ans = solve(int(M), 0, 0, ngarments, garments, mat)
if(M - ans < 0):
print('no solution')
else:
print(int(M - ans))
if __name__ == "__main__":
main(sys.argv)
|
[
"numpy.zeros",
"numpy.place"
] |
[((535, 563), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(201, 21)'}), '(shape=(201, 21))\n', (546, 563), False, 'import numpy\n'), ((572, 602), 'numpy.place', 'numpy.place', (['mat', '(mat == 0)', '(-1)'], {}), '(mat, mat == 0, -1)\n', (583, 602), False, 'import numpy\n')]
|
import random
import unittest
import numpy as np
from scipy.stats import norm
from ..StoneModel import StoneModel, ReqFuncSolver, logpdf_sum, StoneMod
def get_random_vars():
kai = random.random()
kx = random.random()
vi = random.randint(1, 30)
R = random.random()
Li = random.random()
return (kai, kx, vi, R, Li)
class TestStoneMethods(unittest.TestCase):
def setUp(self):
self.M = StoneModel()
self.Mold = StoneModel(False)
def test_reqFuncSolver(self):
kai, kx, vi, R, Li = get_random_vars()
diffFunAnon = lambda x: R-(10**x)*(1+vi*Li*kai*(1+kx*(10**x))**(vi-1))
output = ReqFuncSolver(R, kai, Li, vi, kx)
self.assertTrue(abs(diffFunAnon(np.log10(output))) < 1E-8)
self.assertTrue(np.isnan(ReqFuncSolver(R, kai, Li, -10, kx)))
def test_StoneMod(self):
# This test should check that the model output satisfies Rbound = Rtot - Req
kai, kx, vi, R, Li = get_random_vars()
StoneRet = StoneMod(np.log10(R),kai,vi,kx,Li,fullOutput = True)
Req = ReqFuncSolver(R,kai,Li,vi,kx)
self.assertAlmostEqual(R, Req + StoneRet[1], delta = R/1000)
# Test that monovalent ligand follows the binding curve
def test_StoneModTwo(self):
# logR,Ka,v,logKx,L0
# Sweep across ligand concentration
for i in range(100):
L = i / 100.0
StoneRet = StoneMod(0.0, 1.0, 1, 3.0, L)
self.assertAlmostEqual(StoneRet[0], L / (1 + L), delta = 0.0001)
def test_dataImport_kaBruhns(self):
self.assertTrue(self.M.kaBruhns.shape == (6,4))
def test_dataImport_tnpbsa(self):
self.assertTrue(self.M.tnpbsa.shape == (2,))
def test_dataImport_Rquant(self):
self.assertTrue(len(self.M.Rquant) == 6)
def test_Stone_names(self):
self.assertEqual(len(self.M.pNames), self.M.start.shape[0])
self.assertEqual(len(self.Mold.pNames), self.Mold.start.shape[0])
def test_dataImport_mfiAdjMean(self):
self.assertTrue(self.M.mfiAdjMean.shape == (24, 8))
self.assertTrue(self.Mold.mfiAdjMean.shape == (24, 8))
def test_NormalErrorCoef(self):
retVal = self.M.NormalErrorCoef(self.M.start)
self.assertFalse(np.isnan(retVal))
self.assertFalse(np.isinf(retVal))
# Test that our hand-coded logpdf matches the results of SciPy
def test_logpdf(self):
vecIn = np.array([0.01, 0.2, 0.3, 0.4])
self.assertAlmostEqual(norm.logpdf(vecIn, 0.2, 1).sum(), logpdf_sum(vecIn, 0.2, 1), 0.000001)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"random.randint",
"scipy.stats.norm.logpdf",
"numpy.isinf",
"numpy.isnan",
"random.random",
"numpy.array",
"numpy.log10"
] |
[((185, 200), 'random.random', 'random.random', ([], {}), '()\n', (198, 200), False, 'import random\n'), ((210, 225), 'random.random', 'random.random', ([], {}), '()\n', (223, 225), False, 'import random\n'), ((235, 256), 'random.randint', 'random.randint', (['(1)', '(30)'], {}), '(1, 30)\n', (249, 256), False, 'import random\n'), ((265, 280), 'random.random', 'random.random', ([], {}), '()\n', (278, 280), False, 'import random\n'), ((290, 305), 'random.random', 'random.random', ([], {}), '()\n', (303, 305), False, 'import random\n'), ((2603, 2618), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2616, 2618), False, 'import unittest\n'), ((2435, 2466), 'numpy.array', 'np.array', (['[0.01, 0.2, 0.3, 0.4]'], {}), '([0.01, 0.2, 0.3, 0.4])\n', (2443, 2466), True, 'import numpy as np\n'), ((1016, 1027), 'numpy.log10', 'np.log10', (['R'], {}), '(R)\n', (1024, 1027), True, 'import numpy as np\n'), ((2263, 2279), 'numpy.isnan', 'np.isnan', (['retVal'], {}), '(retVal)\n', (2271, 2279), True, 'import numpy as np\n'), ((2306, 2322), 'numpy.isinf', 'np.isinf', (['retVal'], {}), '(retVal)\n', (2314, 2322), True, 'import numpy as np\n'), ((2499, 2525), 'scipy.stats.norm.logpdf', 'norm.logpdf', (['vecIn', '(0.2)', '(1)'], {}), '(vecIn, 0.2, 1)\n', (2510, 2525), False, 'from scipy.stats import norm\n'), ((727, 743), 'numpy.log10', 'np.log10', (['output'], {}), '(output)\n', (735, 743), True, 'import numpy as np\n')]
|
import os
import glob
import torch
import random
import logging
import argparse
import zipfile
import numpy as np
from tqdm import tqdm, trange
from torch.utils.data import DataLoader
from transformers import (BertConfig, BertTokenizer)
from modeling import MonoBERT
from dataset import RelevantDataset, get_collate_function
logger = logging.getLogger(__name__)
logging.basicConfig(format = '%(asctime)s-%(levelname)s-%(name)s- %(message)s',
datefmt = '%d %H:%M:%S',
level = logging.INFO)
def evaluate(args, model, tokenizer):
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
eval_dataset = RelevantDataset(tokenizer, "dev.small", args.msmarco_dir,
args.collection_memmap_dir, args.tokenize_dir,
args.max_query_length, args.max_seq_length)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_dataloader = DataLoader(eval_dataset, batch_size=args.eval_batch_size,
collate_fn=get_collate_function())
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_scores, all_ids = [], []
for batch, qids, pids in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = {k:v.to(args.device) for k, v in batch.items()}
with torch.no_grad():
attentions = model(**batch)[1]
for layer_id, layer_attentions in enumerate(attentions):
attention_dir = os.path.join(eval_output_dir, "layer_{}".format(layer_id+1))
if not os.path.exists(attention_dir):
os.makedirs(attention_dir)
for idx, attention in enumerate(layer_attentions):
length = torch.sum(batch['attention_mask'][idx]).detach().cpu().item()
query_id, para_id = qids[idx], pids[idx]
attention = attention[:, :length, :length].detach().cpu().numpy()
file_path = os.path.join(attention_dir, "{}-{}.npy".format(query_id, para_id))
np.save(file_path, np.array(attention, dtype=np.float16))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--msmarco_dir", type=str, default="./data/msmarco-passage")
parser.add_argument("--collection_memmap_dir", type=str, default="./data/collection_memmap")
parser.add_argument("--tokenize_dir", type=str, default="./data/tokenize")
parser.add_argument("--output_dir", type=str, default="./data/attention")
parser.add_argument("--max_query_length", type=int, default=64)
parser.add_argument("--max_seq_length", type=int, default=256)
parser.add_argument("--model_path", type=str, default="./data/BERT_Base_trained_on_MSMARCO")
## Other parameters
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
args = parser.parse_args()
# Setup CUDA, GPU
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
args.device = device
# Setup logging
logger.warning("Device: %s, n_gpu: %s", device, args.n_gpu)
config = BertConfig.from_pretrained(f"{args.model_path}/bert_config.json")
config.output_attentions = True
model = MonoBERT.from_pretrained(f"{args.model_path}/model.ckpt-100000",
from_tf=True, config=config)
tokenizer = BertTokenizer.from_pretrained(args.model_path)
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Evaluation
evaluate(args, model, tokenizer)
|
[
"dataset.get_collate_function",
"tqdm.tqdm",
"dataset.RelevantDataset",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"os.path.exists",
"torch.cuda.device_count",
"transformers.BertTokenizer.from_pretrained",
"torch.cuda.is_available",
"numpy.array",
"transformers.BertConfig.from_pretrained",
"modeling.MonoBERT.from_pretrained",
"torch.nn.DataParallel",
"torch.no_grad",
"torch.sum",
"logging.getLogger"
] |
[((336, 363), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (353, 363), False, 'import logging\n'), ((364, 494), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s-%(levelname)s-%(name)s- %(message)s"""', 'datefmt': '"""%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s-%(levelname)s-%(name)s- %(message)s', datefmt=\n '%d %H:%M:%S', level=logging.INFO)\n", (383, 494), False, 'import logging\n'), ((718, 876), 'dataset.RelevantDataset', 'RelevantDataset', (['tokenizer', '"""dev.small"""', 'args.msmarco_dir', 'args.collection_memmap_dir', 'args.tokenize_dir', 'args.max_query_length', 'args.max_seq_length'], {}), "(tokenizer, 'dev.small', args.msmarco_dir, args.\n collection_memmap_dir, args.tokenize_dir, args.max_query_length, args.\n max_seq_length)\n", (733, 876), False, 'from dataset import RelevantDataset, get_collate_function\n'), ((1474, 1514), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""'}), "(eval_dataloader, desc='Evaluating')\n", (1478, 1514), False, 'from tqdm import tqdm, trange\n'), ((2467, 2492), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2490, 2492), False, 'import argparse\n'), ((3556, 3581), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3579, 3581), False, 'import torch\n'), ((3707, 3772), 'transformers.BertConfig.from_pretrained', 'BertConfig.from_pretrained', (['f"""{args.model_path}/bert_config.json"""'], {}), "(f'{args.model_path}/bert_config.json')\n", (3733, 3772), False, 'from transformers import BertConfig, BertTokenizer\n'), ((3821, 3919), 'modeling.MonoBERT.from_pretrained', 'MonoBERT.from_pretrained', (['f"""{args.model_path}/model.ckpt-100000"""'], {'from_tf': '(True)', 'config': 'config'}), "(f'{args.model_path}/model.ckpt-100000', from_tf=\n True, config=config)\n", (3845, 3919), False, 'from modeling import MonoBERT\n'), ((3940, 3986), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.model_path'], {}), '(args.model_path)\n', (3969, 3986), False, 'from transformers import BertConfig, BertTokenizer\n'), ((628, 659), 'os.path.exists', 'os.path.exists', (['eval_output_dir'], {}), '(eval_output_dir)\n', (642, 659), False, 'import os\n'), ((669, 697), 'os.makedirs', 'os.makedirs', (['eval_output_dir'], {}), '(eval_output_dir)\n', (680, 697), False, 'import os\n'), ((1203, 1231), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (1224, 1231), False, 'import torch\n'), ((1118, 1140), 'dataset.get_collate_function', 'get_collate_function', ([], {}), '()\n', (1138, 1140), False, 'from dataset import RelevantDataset, get_collate_function\n'), ((1615, 1630), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1628, 1630), False, 'import torch\n'), ((3480, 3505), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3503, 3505), False, 'import torch\n'), ((1861, 1890), 'os.path.exists', 'os.path.exists', (['attention_dir'], {}), '(attention_dir)\n', (1875, 1890), False, 'import os\n'), ((1912, 1938), 'os.makedirs', 'os.makedirs', (['attention_dir'], {}), '(attention_dir)\n', (1923, 1938), False, 'import os\n'), ((2382, 2419), 'numpy.array', 'np.array', (['attention'], {'dtype': 'np.float16'}), '(attention, dtype=np.float16)\n', (2390, 2419), True, 'import numpy as np\n'), ((2035, 2074), 'torch.sum', 'torch.sum', (["batch['attention_mask'][idx]"], {}), "(batch['attention_mask'][idx])\n", (2044, 2074), False, 'import torch\n')]
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
import re
import numpy as np
__author__ = '<NAME>'
kb = 8.617e-5 # unit eV / K
class ReadInput(object):
def __init__(self, filename='formation energy input.txt'):
with open(filename, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
else:
key, value = line.split('=')
if re.search('host', key.lower()):
self.host = float(value)
elif re.search('vbm', key.lower()):
self.vbm = float(value)
elif re.search('cbm', key.lower()):
self.cbm = float(value)
elif re.search('temperature', key.lower()):
self.temperature = float(value)
elif re.search('potential', key.lower()):
tmp = value.strip().split()
self.potential = np.array([float(x) for x in tmp])
else:
print('\nERROR: {0} tag is not found\n'.format(key))
exit(1)
class Data(object):
def __init__(self, filename='defects data.txt'):
raw_data = np.loadtxt(filename, comments='N')
with open(filename, 'r') as fp:
self.header = fp.readline().strip().split()
num = len(self.header)
if num < 7:
print('\nERROR: The information for defect calculation is not complete.\n')
exit(1)
self.no = raw_data[:, 0].T
self.charge = raw_data[:, 1].T
self.etot = raw_data[:, 2].T
self.weight = raw_data[:, 3].T
self.iic = raw_data[:, 4].T
self.apv = raw_data[:, 5].T
self.delta_n = raw_data[:, 6:num].T
if __name__ == "__main__":
order = ReadInput()
data = Data()
points = 100
defects = [[0, 1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12, 90, 91], [13, 14, 15, 92, 93], [16, 17, 18, 19],
[20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31], [40, 41, 42, 43], [44, 45, 46, 47],
[52, 53, 54, 55], [94, 56, 57, 58, 59], [95, 60, 61, 62, 63], [86, 87, 88]]
fermi_level = np.linspace(order.vbm, order.cbm, num=points)
formation_energy = (data.etot+data.iic)-order.host-np.dot(order.potential, data.delta_n)+data.charge*data.apv
formation_energy = formation_energy.reshape(formation_energy.shape[0], 1) + 0 * fermi_level
charge = data.charge
charge = charge.reshape(charge.shape[0], 1)
x = charge * fermi_level
formation_energy += charge * fermi_level
min_formation_energy = []
for defect in defects:
tmp = []
for index in defect:
tmp.append(formation_energy[index])
tmp = np.array(tmp)
min_formation_energy.append(tmp.min(axis=0))
result = np.vstack((fermi_level, min_formation_energy))
result = result.T
np.savetxt('formation energy.txt', result)
|
[
"numpy.savetxt",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"numpy.dot",
"numpy.vstack"
] |
[((2320, 2365), 'numpy.linspace', 'np.linspace', (['order.vbm', 'order.cbm'], {'num': 'points'}), '(order.vbm, order.cbm, num=points)\n', (2331, 2365), True, 'import numpy as np\n'), ((2982, 3028), 'numpy.vstack', 'np.vstack', (['(fermi_level, min_formation_energy)'], {}), '((fermi_level, min_formation_energy))\n', (2991, 3028), True, 'import numpy as np\n'), ((3057, 3099), 'numpy.savetxt', 'np.savetxt', (['"""formation energy.txt"""', 'result'], {}), "('formation energy.txt', result)\n", (3067, 3099), True, 'import numpy as np\n'), ((1326, 1360), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'comments': '"""N"""'}), "(filename, comments='N')\n", (1336, 1360), True, 'import numpy as np\n'), ((2900, 2913), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (2908, 2913), True, 'import numpy as np\n'), ((2422, 2459), 'numpy.dot', 'np.dot', (['order.potential', 'data.delta_n'], {}), '(order.potential, data.delta_n)\n', (2428, 2459), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 15:01:44 2017
@author:
"""
import sys
reload(sys)
sys.setdefaultencoding('cp932')
tes = sys.getdefaultencoding()
import os
import cv2
import numpy as np
import pyws as m
import winxpgui
from PIL import ImageGrab
from PyQt4 import QtGui, QtCore
from datetime import datetime
import ConfigParser
class ControllerBoxWidget(QtGui.QWidget):
def __init__(self, config, parent=None):
QtGui.QWidget.__init__(self, parent=parent)
self.setup(config)
def setup(self,config):
self.start = QtGui.QPushButton('Start', parent=self)
self.stop = QtGui.QPushButton('Stop', parent=self)
self.quit = QtGui.QPushButton('Quit', parent=self)
self.intervalLavel = QtGui.QLabel('Interval(msec)',parent=self)
self.intervalLavel.setAlignment(QtCore.Qt.AlignRight)
self.interval = QtGui.QLineEdit(parent=self)
self.interval.setValidator(QtGui.QIntValidator())
self.interval.setText(config.get('config', 'interval'))
self.windowTitleLavel = QtGui.QLabel('Window Title',parent=self)
self.windowTitleLavel.setAlignment(QtCore.Qt.AlignRight)
self.windowTitle = QtGui.QLineEdit(parent=self)
self.windowTitle.setText(config.get('config', 'title'))
layout = QtGui. QGridLayout()
layout.addWidget(self.start, 0,0)
layout.addWidget(self.stop, 0,1)
layout.addWidget(self.intervalLavel, 1,0)
layout.addWidget(self.interval, 1,1)
layout.addWidget(self.windowTitleLavel, 2,0)
layout.addWidget(self.windowTitle, 2,1)
layout.addWidget(self.quit, 3,1)
self.setLayout(layout)
def locInput(self):
self.interval.setReadOnly(True)
self.windowTitle.setReadOnly(True)
def releaseInput(self):
self.interval.setReadOnly(False)
self.windowTitle.setReadOnly(False)
class CaptureWidget(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent=parent)
self.setup()
def setup(self):
self.interval = 60000
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.getCapture)
self.message = QtGui.QLabel('Setup')
layout = QtGui.QVBoxLayout()
layout.addWidget(self.message)
self.setLayout(layout)
def getCapture(self):
directory = 'capture'
if not os.path.isdir(directory):
os.makedirs(directory)
try :
handle = m.getid(self.windowTitle)
rect = winxpgui.GetWindowRect(handle)
except IndexError as e:
self.setMessage(str(e))
return
cpimg = ImageGrab.grab(rect)
cpimg = np.asarray(cpimg)
cpimg = cv2.cvtColor(cpimg, cv2.COLOR_RGB2BGR)
cv2.imwrite(directory + '/' + datetime.now().strftime("%Y%m%d%H%M%S") + '.jpg', cpimg)
def start(self, windowTitle, interval):
try:
self.setInterval(interval)
self.setWindowTitle(windowTitle)
self.timer.start()
self.setMessage('Running...')
except ValueError as e:
self.setMessage(str(e))
def stop(self):
self.timer.stop()
self.setMessage('Stopped')
def setInterval(self, interval):
self.interval = int(interval)
self.timer.setInterval(self.interval)
def setWindowTitle(self, string):
self.windowTitle = str(string)
def setMessage(self, str):
self.message.setText(str)
def saveConfig(config, controller):
config.set('config', 'interval', controller.interval.text())
config.set('config', 'title', controller.windowTitle.text().toStdString())
with open('config.ini', 'wb') as configfile:
config.write(configfile)
pass
## main
config = ConfigParser.RawConfigParser({'interval': '1000', 'title':''})
config.read('config.ini')
if not config.has_section('config'):
config.add_section('config')
app = QtGui.QApplication(sys.argv)
panel = QtGui.QWidget()
panel_layout = QtGui.QVBoxLayout()
capture = CaptureWidget(panel)
controller = ControllerBoxWidget(config, panel)
controller.start.clicked.connect(controller.locInput)
controller.start.clicked.connect(lambda: capture.start(controller.windowTitle.text(), controller.interval.text()))
controller.stop.clicked.connect(capture.stop)
controller.stop.clicked.connect(controller.releaseInput)
controller.quit.clicked.connect(sys.exit)
panel_layout.addWidget(capture)
panel_layout.addWidget(controller)
panel.setLayout(panel_layout)
mw = QtGui.QMainWindow()
mw.setWindowTitle('App Capture')
mw.setCentralWidget(panel)
mw.show()
app.aboutToQuit.connect(app.deleteLater)
app.aboutToQuit.connect(lambda: saveConfig(config, controller))
app.exec_()
|
[
"PyQt4.QtCore.QTimer",
"PyQt4.QtGui.QWidget",
"PyQt4.QtGui.QLabel",
"sys.getdefaultencoding",
"PyQt4.QtGui.QVBoxLayout",
"PyQt4.QtGui.QLineEdit",
"cv2.cvtColor",
"PyQt4.QtGui.QMainWindow",
"sys.setdefaultencoding",
"datetime.datetime.now",
"PIL.ImageGrab.grab",
"numpy.asarray",
"PyQt4.QtGui.QApplication",
"PyQt4.QtGui.QPushButton",
"ConfigParser.RawConfigParser",
"pyws.getid",
"winxpgui.GetWindowRect",
"os.makedirs",
"PyQt4.QtGui.QGridLayout",
"os.path.isdir",
"PyQt4.QtGui.QIntValidator",
"PyQt4.QtGui.QWidget.__init__"
] |
[((114, 145), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""cp932"""'], {}), "('cp932')\n", (136, 145), False, 'import sys\n'), ((153, 177), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (175, 177), False, 'import sys\n'), ((4044, 4107), 'ConfigParser.RawConfigParser', 'ConfigParser.RawConfigParser', (["{'interval': '1000', 'title': ''}"], {}), "({'interval': '1000', 'title': ''})\n", (4072, 4107), False, 'import ConfigParser\n'), ((4221, 4249), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (4239, 4249), False, 'from PyQt4 import QtGui, QtCore\n'), ((4261, 4276), 'PyQt4.QtGui.QWidget', 'QtGui.QWidget', ([], {}), '()\n', (4274, 4276), False, 'from PyQt4 import QtGui, QtCore\n'), ((4293, 4312), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (4310, 4312), False, 'from PyQt4 import QtGui, QtCore\n'), ((4837, 4856), 'PyQt4.QtGui.QMainWindow', 'QtGui.QMainWindow', ([], {}), '()\n', (4854, 4856), False, 'from PyQt4 import QtGui, QtCore\n'), ((474, 517), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (496, 517), False, 'from PyQt4 import QtGui, QtCore\n'), ((607, 646), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Start"""'], {'parent': 'self'}), "('Start', parent=self)\n", (624, 646), False, 'from PyQt4 import QtGui, QtCore\n'), ((668, 706), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Stop"""'], {'parent': 'self'}), "('Stop', parent=self)\n", (685, 706), False, 'from PyQt4 import QtGui, QtCore\n'), ((728, 766), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Quit"""'], {'parent': 'self'}), "('Quit', parent=self)\n", (745, 766), False, 'from PyQt4 import QtGui, QtCore\n'), ((799, 842), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Interval(msec)"""'], {'parent': 'self'}), "('Interval(msec)', parent=self)\n", (811, 842), False, 'from PyQt4 import QtGui, QtCore\n'), ((930, 958), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', ([], {'parent': 'self'}), '(parent=self)\n', (945, 958), False, 'from PyQt4 import QtGui, QtCore\n'), ((1118, 1159), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Window Title"""'], {'parent': 'self'}), "('Window Title', parent=self)\n", (1130, 1159), False, 'from PyQt4 import QtGui, QtCore\n'), ((1253, 1281), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', ([], {'parent': 'self'}), '(parent=self)\n', (1268, 1281), False, 'from PyQt4 import QtGui, QtCore\n'), ((1377, 1396), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (1394, 1396), False, 'from PyQt4 import QtGui, QtCore\n'), ((2099, 2142), 'PyQt4.QtGui.QWidget.__init__', 'QtGui.QWidget.__init__', (['self'], {'parent': 'parent'}), '(self, parent=parent)\n', (2121, 2142), False, 'from PyQt4 import QtGui, QtCore\n'), ((2260, 2275), 'PyQt4.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (2273, 2275), False, 'from PyQt4 import QtGui, QtCore\n'), ((2355, 2376), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""Setup"""'], {}), "('Setup')\n", (2367, 2376), False, 'from PyQt4 import QtGui, QtCore\n'), ((2397, 2416), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (2414, 2416), False, 'from PyQt4 import QtGui, QtCore\n'), ((2856, 2876), 'PIL.ImageGrab.grab', 'ImageGrab.grab', (['rect'], {}), '(rect)\n', (2870, 2876), False, 'from PIL import ImageGrab\n'), ((2894, 2911), 'numpy.asarray', 'np.asarray', (['cpimg'], {}), '(cpimg)\n', (2904, 2911), True, 'import numpy as np\n'), ((2929, 2967), 'cv2.cvtColor', 'cv2.cvtColor', (['cpimg', 'cv2.COLOR_RGB2BGR'], {}), '(cpimg, cv2.COLOR_RGB2BGR)\n', (2941, 2967), False, 'import cv2\n'), ((995, 1016), 'PyQt4.QtGui.QIntValidator', 'QtGui.QIntValidator', ([], {}), '()\n', (1014, 1016), False, 'from PyQt4 import QtGui, QtCore\n'), ((2565, 2589), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (2578, 2589), False, 'import os\n'), ((2604, 2626), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (2615, 2626), False, 'import os\n'), ((2672, 2697), 'pyws.getid', 'm.getid', (['self.windowTitle'], {}), '(self.windowTitle)\n', (2679, 2697), True, 'import pyws as m\n'), ((2718, 2748), 'winxpgui.GetWindowRect', 'winxpgui.GetWindowRect', (['handle'], {}), '(handle)\n', (2740, 2748), False, 'import winxpgui\n'), ((3007, 3021), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3019, 3021), False, 'from datetime import datetime\n')]
|
import argparse
from td import OneStepTD
from off_pol_td import OffPolicyTD
from driving import DrivingEnv, TRAVEL_TIME
from sarsa import Sarsa
from windy_gridworld import WindyGridworld
import numpy as np
from randomwalk import RandomWalk, NotSoRandomWalk, LEFT, RIGHT
from cliff import TheCliff
import matplotlib.pyplot as plt
from qlearning import QLearning
from double_qlearning import DoubleQLearning
from expected_sarsa import ExpectedSarsa
from max_bias_mdp import MaxBiasMDP, S_A, LEFT
from double_expected_sarsa import DoubleExpectedSarsa
from car_rental_afterstate import CarRentalAfterstateEnv
from td_afterstate import TDAfterstate
from policy_iteration_afterstate import DynamicProgrammingAfterstate
import seaborn as sns
N_EP_EX_6_2 = 100
N_RUNS_EX_6_2 = 100
TRUE_VALUES_EX_6_2 = [1/6, 2/6, 3/6, 4/6, 5/6]
TD_STEPS_6_2 = [0.05, 0.1, 0.15]
MC_STEPS_6_2 = [0.01, 0.02, 0.03, 0.04]
TD_STEPS_6_4 = [0.025, 0.05, 0.1, 0.15, 0.2]
MC_STEPS_6_4 = [0.005, 0.01, 0.02, 0.03, 0.04, 0.05]
INIT_VAL_6_2 = 1/2
LEFT_GRAPH_STEP_SIZE = 0.1
DEFAULT_FONT = {'fontsize': 14}
SMALL_FONT = {'fontsize': 10}
UNDISCOUNTED = 1
BATCH_ALPHA = {'td': 0.002, 'mc': 0.001}
NOT_SO_RW_ALPHA = 0.001
EX_6_5_STEP_SIZE = 0.5
EX_6_5_EPS = 0.1
EX_6_5_XTICKS = [k * 1000 for k in range(9)]
EX_6_5_YTICKS = [0, 50, 100, 150, 170]
EX_6_9_XTICKS = [k * 2000 for k in range(20)]
EX_6_9_YTICKS = [0, 50, 100, 150, 170, 200, 500, 1000]
EX_6_10_N_SEEDS = 10
EX_6_6_N_EPS = 500
EX_6_6_YTICKS = [-100, -75, -50, -25]
EX_6_6_N_SEEDS = 10
EX_6_6_N_AVG = 50
FIG_6_3_N_INT_RUNS = 250
FIG_6_3_N_INT_EPS = 100
FIG_6_3_N_ASY_RUNS = 5
FIG_6_3_N_ASY_EPS = 1000
FIG_6_5_ALPHA = 0.1
FIG_6_5_N_RUNS = 100
FIG_6_5_N_EPS = 300
EX_6_13_N_EPS = 300
EX_6_13_N_RUNS = 10000
EX_6_14_SIZE = 4
EX_6_14_ALPHA = 0.01
EX_6_14_N_EPS = 1000
EX_6_14_GAMMA = 0.9
def print_driving_home(states, V_old, V_new, fig, fig_id, ax_title):
ax = fig.add_subplot(fig_id)
ax.set_title(ax_title)
def pred(V):
return [V[idx] + sum(TRAVEL_TIME[:idx]) for idx in range(len(V))]
ax.set_xticklabels(states, fontdict={'fontsize': 8})
ax.set_xlabel('Situation')
ax.set_ylabel('Predicted total travel time')
plt.plot(pred(V_old), color='#000000', label='actual outcome')
plt.plot(pred(V_new), color='blue', label='after update')
def fig_6_1():
fig = plt.figure()
fig.suptitle('Figure 6.1')
env = DrivingEnv()
pi = {(a, s): 1.0 for s in env.states for a in env.moves}
V_0 = [30, 35, 15, 10, 3, 0]
V_init = {s: V_0[idx] for (idx, s) in enumerate(env.states)}
# TD(0)
alg = OneStepTD(env, V_init=V_init, step_size=1, gamma=1)
alg.tabular_td_0(pi)
V_TD = alg.get_value_list()
print_driving_home(env.states, V_0, V_TD, fig, '121', 'one step TD')
# constant step size mc
alg.reset()
alg.constant_step_size_mc(pi)
V_MC = alg.get_value_list()
print_driving_home(env.states, V_0, V_MC, fig, '122', 'constant step size mc')
plt.legend()
plt.savefig('fig6.1.png')
plt.show()
def print_random_walk(ax, state_labels, td_vals):
ax.set_xticklabels(state_labels, fontdict=DEFAULT_FONT)
x_ticks = np.arange(len(state_labels))
ax.set_xticks(x_ticks)
ax.set_xlabel('state', fontdict=DEFAULT_FONT)
ax.set_ylabel('estimated value', fontdict=DEFAULT_FONT)
plt.plot(x_ticks, TRUE_VALUES_EX_6_2, label='true values')
for key,td_val in td_vals.items():
plt.plot(x_ticks, td_val[:-1], label=str(key) + ' episodes')
def init_random_walk(init_value, step_size=None):
env = RandomWalk()
pi = {(a, s): 1.0 for s in env.states for a in env.moves}
V_0 = [init_value for s in env.states[:-1]] + [0] # V = 0 for absorbing state
V_init = {s: V_0[idx] for (idx, s) in enumerate(env.states)}
alg = OneStepTD(env, V_init=V_init, step_size=step_size, gamma=UNDISCOUNTED)
return alg, pi
def left_graph(fig, fig_id, init_value):
alg, pi = init_random_walk(init_value, step_size=LEFT_GRAPH_STEP_SIZE)
tot_ep = 0
td_vals = {}
ax = fig.add_subplot('121')
for n_episodes in [0, 1, 10, 100]:
alg.tabular_td_0(pi, n_episodes - tot_ep)
td_vals[n_episodes] = alg.get_value_list()
print_random_walk(ax, ["A", "B", "C", "D", "E"], td_vals)
plt.legend()
def right_graph(fig, fig_id, init_value, td_step_sizes, mc_step_sizes, font=DEFAULT_FONT, remove_x_label=False, batch=False):
ax = fig.add_subplot(fig_id)
ax.set_title(f'V_init = {init_value}', fontdict=font)
alg, pi = init_random_walk(init_value)
runs_dict = {alpha: np.zeros(N_EP_EX_6_2) for alpha in td_step_sizes + mc_step_sizes}
td_0 = alg.tabular_td_0 if not batch else alg.td_0_batch
mc = alg.constant_step_size_mc if not batch else alg.constant_step_size_mc_batch
to_compare_list = [(td_step_sizes, td_0), (mc_step_sizes, mc)]
for (step_size_list, algorithm) in to_compare_list:
for step_size in step_size_list:
alg.step_size = step_size
print(f"running step size {step_size}")
for seed in range(N_RUNS_EX_6_2):
alg.reset()
alg.env.seed(seed)
err_l = []
for nb_ep in range(N_EP_EX_6_2):
algorithm(pi, 1)
v_arr = np.array(alg.get_value_list()[:-1])
err_l.append(np.linalg.norm(v_arr-TRUE_VALUES_EX_6_2))
runs_dict[step_size] += np.array(err_l)
for key in runs_dict.keys():
runs_dict[key] /= N_RUNS_EX_6_2
if not remove_x_label:
ax.set_xlabel('walks / episodes', fontdict=font)
ax.set_ylabel('empirical rms error averaged over states', fontdict=font)
for key,err_run in runs_dict.items():
(color, alg_name) = ('b','td') if key in td_step_sizes else ('r', 'mc')
linewidth = max(int(100 * key) / 10 if key in td_step_sizes else int(200 * key) / 10, 10 / (len(runs_dict) * 10))
linestyle = 'dashed' if key in [0.02, 0.03] else None
plt.plot(err_run, color=color, label=alg_name + ' (a=' + str(key) + ')', linewidth=linewidth, linestyle=linestyle)
plt.legend()
def example_6_2():
fig = plt.figure()
fig.suptitle('Example 6.2', fontdict=DEFAULT_FONT)
left_graph(fig, fig_id='121', init_value=INIT_VAL_6_2)
right_graph(fig, '122', INIT_VAL_6_2, TD_STEPS_6_2, MC_STEPS_6_2)
plt.savefig('example6.2.png')
plt.show()
def ex_6_4():
fig = plt.figure()
fig.suptitle('Exercise 6.4', fontdict=DEFAULT_FONT)
right_graph(fig, '111', INIT_VAL_6_2, TD_STEPS_6_4, MC_STEPS_6_4, SMALL_FONT)
plt.savefig('ex6.4.png')
plt.show()
def ex_6_5():
fig = plt.figure()
fig.suptitle('Exercise 6.5', fontdict=SMALL_FONT)
for (idx, init_val) in enumerate([0, 0.25, 0.75, 1]):
right_graph(fig, '22' + str(idx + 1), init_val, TD_STEPS_6_2, MC_STEPS_6_2, SMALL_FONT, idx < 2)
plt.savefig('ex6.5.png')
plt.show()
def fig_6_2():
fig = plt.figure()
fig.suptitle('Figure 6.2', fontdict=SMALL_FONT)
right_graph(fig, '111', INIT_VAL_6_2, [BATCH_ALPHA['td']], [BATCH_ALPHA['mc']], batch=True, font=SMALL_FONT)
plt.savefig('fig6.2.png')
plt.show()
def ex_6_7():
env = NotSoRandomWalk()
env.seed(0)
V_0 = [1/2 for s in env.states[:-1]] + [0]
V_init = {s: V_0[idx] for (idx, s) in enumerate(env.states)}
b = {(a, s): 1/2 for s in env.states for a in env.moves}
pi = {(a, s): float(a == RIGHT) for s in env.states for a in env.moves}
alg = OffPolicyTD(env, V_init, NOT_SO_RW_ALPHA, pi, b, UNDISCOUNTED)
alg.step_size = 0.01
alg.find_value_function(N_EP_EX_6_2 * 100)
print(alg.get_value_list())
def init_windy_gridworld_fig(title, xticks=None, yticks=None):
fig, ax = plt.subplots()
fig.suptitle(title)
ax.set_xlabel('Time steps')
ax.set_ylabel('Episodes')
if xticks is not None:
ax.set_xticks(xticks)
if yticks is not None:
ax.set_yticks(yticks)
return ax
def plot_sarsa(ax, n_ep, label=None, diags=False, stay=False, stoch=False, seed=0):
env = WindyGridworld(diags, stay, stoch)
alg = Sarsa(env, step_size=EX_6_5_STEP_SIZE, gamma=UNDISCOUNTED, eps=EX_6_5_EPS)
alg.seed(seed)
kwargs = {"label": label} if label else {}
plt.plot(alg.on_policy_td_control(n_ep), **kwargs)
def example_6_5():
ax = init_windy_gridworld_fig('Example 6.5', EX_6_5_XTICKS, EX_6_5_YTICKS)
plot_sarsa(ax, max(EX_6_5_YTICKS))
plt.savefig('example6.5.png')
plt.show()
def ex_6_9():
ax = init_windy_gridworld_fig('Exercise 6.9', EX_6_9_XTICKS, EX_6_9_YTICKS)
n_ep_urld, n_ep = EX_6_9_YTICKS[-2:]
plot_sarsa(ax, n_ep_urld, label='up right down left')
plot_sarsa(ax, n_ep, label='with diags', diags=True)
plot_sarsa(ax, n_ep, label='with diags and stay', diags=True, stay=True)
plt.legend()
plt.savefig('ex6.9.png')
plt.show()
def ex_6_10():
ax = init_windy_gridworld_fig(f'Exercise 6.10 ({EX_6_10_N_SEEDS} seeds)')
n_ep = max(EX_6_9_YTICKS)
for seed in range(EX_6_10_N_SEEDS):
plot_sarsa(ax, n_ep, diags=True, stay=True, stoch=True, seed=seed)
plt.savefig('ex6.10.png')
plt.show()
def smooth_rewards(arr, to_avg=5):
nb_rew = len(arr)
new_arr = np.zeros(nb_rew - to_avg + 1)
for i in range(nb_rew - to_avg + 1):
new_arr[i] = np.mean([arr[i + j] for j in range(to_avg)])
return new_arr
def example_6_6():
fig, ax = plt.subplots()
fig.suptitle(f'Example 6.6 (Averaged over {EX_6_6_N_SEEDS} seeds)')
ax.set_xlabel('Episodes')
ax.set_ylabel(f'(Average of last {EX_6_6_N_AVG}) sum of rewards during episodes')
ax.set_yticks(EX_6_6_YTICKS)
ax.set_ylim(bottom=min(EX_6_6_YTICKS))
n_ep = EX_6_6_N_EPS
env = TheCliff()
qlearning_alg = QLearning(env, step_size=EX_6_5_STEP_SIZE, gamma=UNDISCOUNTED, eps=EX_6_5_EPS)
sarsa_alg = Sarsa(env, step_size=EX_6_5_STEP_SIZE, gamma=UNDISCOUNTED, eps=EX_6_5_EPS)
qlearning_rew = np.zeros(n_ep)
sarsa_rew = np.zeros(n_ep)
for seed in range(EX_6_6_N_SEEDS):
print(f"seed={seed}")
qlearning_alg.seed(seed)
qlearning_rew += qlearning_alg.q_learning(n_ep)
sarsa_alg.seed(seed)
sarsa_rew += sarsa_alg.on_policy_td_control(n_ep, rews=True)
plt.plot(smooth_rewards(qlearning_rew / EX_6_6_N_SEEDS, EX_6_6_N_AVG), color='r', label='Q learning')
plt.plot(smooth_rewards(sarsa_rew / EX_6_6_N_SEEDS, EX_6_6_N_AVG), color='b', label='Sarsa')
plt.legend()
plt.savefig('example6.6.png')
plt.show()
def fig_6_3():
fig, ax = plt.subplots()
fig.suptitle(f'Figure 6.3')
step_sizes = np.linspace(0.1, 1, 19)
ax.set_xlabel(f'Step Sizes')
ax.set_xticks(step_sizes)
ax.set_yticks([0, -40, -80, -120])
ax.set_ylim(bottom=-160, top=0)
ax.set_ylabel('Sum of rewards per episodes')
env = TheCliff()
exp_sar_alg, sar_alg, qlear_alg = [name_alg(env, step_size=None, gamma=UNDISCOUNTED, eps=EX_6_5_EPS) for name_alg in [ExpectedSarsa, Sarsa, QLearning]]
exp_sar_opt, sar_opt, qlear_opt = exp_sar_alg.expected_sarsa, lambda n_ep: sar_alg.on_policy_td_control(n_ep, rews=True), qlear_alg.q_learning
for (alg, opt, alg_name, color, marker) in [(exp_sar_alg, exp_sar_opt, 'Expected Sarsa', 'r', 'x'), (sar_alg, sar_opt, 'Sarsa', 'b', 'v'), (qlear_alg, qlear_opt, 'Q-learning', 'k', 's')]:
print(f"\n\n\n@@@@@@@@ {alg_name} @@@@@@@@\n@@@@@@@@@@@@@@@@@@@@@@@@@\n\n\n")
for (n_ep, n_runs, run_type_name) in [(FIG_6_3_N_INT_EPS, FIG_6_3_N_INT_RUNS, 'Interim'), (FIG_6_3_N_ASY_EPS, FIG_6_3_N_ASY_RUNS, 'Asymptotic')]:
print(f"\n######## {run_type_name} ########\n")
rew_l = []
for step_size in step_sizes:
print(f"alpha={step_size}")
alg.step_size = step_size
rew_sum = 0
for seed in range(n_runs):
print(f"run #{seed}")
alg.seed(seed)
alg.reset()
rew_sum += np.mean(opt(n_ep))
rew_l.append(rew_sum / n_runs)
label = f"{alg_name} ({run_type_name})"
plt.plot(step_sizes, rew_l, label=label, color=color, marker=marker, linestyle='-' if run_type_name == 'Asymptotic' else '--')
plt.legend()
plt.savefig('fig6.3.png')
plt.show()
def plot_max_bias(title, filename, todo_list, n_runs, n_eps):
fig, ax = plt.subplots()
fig.suptitle(title)
ax.set_xlabel(f'Episodes')
xticks = [1, 100, 200, 300]
ax.set_xlim([min(xticks), max(xticks)])
ax.set_yticks([0, 5, 25, 50, 75, 100])
ax.set_ylim([0, 100])
ax.set_ylabel('% left actions from A')
for (alg, opt, color, label) in todo_list:
perc_left = np.zeros(n_eps)
for seed in range(n_runs):
print(seed)
alg.seed(seed)
alg.reset()
perc_left += opt(n_eps)
plt.plot(perc_left / n_runs, label=label, color=color)
plt.plot(np.zeros(n_eps) + 5, color='k', linestyle='--', label='optimal')
plt.legend()
plt.savefig(filename)
plt.show()
def fig_6_5():
env = MaxBiasMDP()
qlear_alg, dqlear_alg = [name_alg(env, step_size=FIG_6_5_ALPHA, gamma=UNDISCOUNTED, eps=EX_6_5_EPS) for name_alg in [QLearning, DoubleQLearning]]
qlear_opt = lambda n_ep: qlear_alg.q_learning_log_actions(n_ep, S_A, LEFT)
dqlear_opt = lambda n_ep: dqlear_alg.double_q_learning_log_actions(n_ep, S_A, LEFT)
todo = [(qlear_alg, qlear_opt, 'r', 'Q-learning'), (dqlear_alg, dqlear_opt, 'g', 'Double Q-learning')]
plot_max_bias('Figure 6.5', 'fig6.5.png', todo, FIG_6_5_N_RUNS, FIG_6_5_N_EPS)
def ex_6_13():
env = MaxBiasMDP()
esarsa_alg, desarsa_alg = [name_alg(env, step_size=FIG_6_5_ALPHA, gamma=UNDISCOUNTED, eps=EX_6_5_EPS) for name_alg in [ExpectedSarsa, DoubleExpectedSarsa]]
esarsa_opt = lambda n_ep: esarsa_alg.expected_sarsa_log_actions(n_ep, S_A, LEFT)
desarsa_opt = lambda n_ep: desarsa_alg.double_expected_sarsa_log_actions(n_ep, S_A, LEFT)
todo = [(desarsa_alg, desarsa_opt, 'g', 'Double Expected Sarsa'), (esarsa_alg, esarsa_opt, 'r', 'Expected Sarsa')]
plot_max_bias(f'Exercise 6.13 ({EX_6_13_N_RUNS} runs)', 'ex6.13.png', todo, EX_6_13_N_RUNS, EX_6_13_N_EPS)
def print_car_rental_value_function(size, V):
to_print = np.zeros((size, size))
idxs = list(range(size))
for x in idxs:
for y in idxs:
to_print[x][y] = V[(x, y)]
to_print_term = [[to_print[size - x - 1][y] for y in idxs] for x in idxs]
print(f"#####\n\nV mean = {np.mean(to_print_term)}\n\n######")
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#plt.title('Exercise 6.14 (value function)')
#(X, Y), Z = np.meshgrid(idxs, idxs), np.array(to_print).T
#ax.set_xlabel('# of cars at second location', fontsize=10)
#ax.set_ylabel('# of cars at first location', fontsize=10)
#ax.set_xticks([idxs[0], idxs[-1]])
#ax.set_yticks([idxs[0], idxs[-1]])
#ax.set_zticks([np.min(Z), np.max(Z)])
#ax.plot_surface(X, Y, Z)
#plt.show()
return np.mean(to_print_term)
def print_policy_car_rental(size, pi):
fig, ax = plt.subplots()
X = Y = list(range(size))
Z = [[pi[(x, y)] for y in Y] for x in X]
transposed_Z = [[Z[size - x - 1][y] for y in Y] for x in X]
sns.heatmap(transposed_Z)
print(*transposed_Z, sep='\n')
pol_range = list(range(np.min(transposed_Z), np.max(transposed_Z) + 1))
#CS = ax.contour(X, Y, Z, colors='k', levels=pol_range)
#ax.clabel(CS, inline=1, fontsize=10)
ax.set_title('Exercise 6.14 (policy)')
#plt.show()
def ex_6_14(size=None, ep_per_eval=None, alpha=None, max_ep=None):
size = EX_6_14_SIZE if size is None else size
env = CarRentalAfterstateEnv(size - 1)
env.seed(0)
#pi = {(a, s): (a == 0) for s in env.states for a in env.moves_d[s]}
pi = {s: 0 for s in env.states}
step_size_l = [0.003, 0.004, 0.005]
log_V_mean = {step_size: [] for step_size in step_size_l}
for step_size in step_size_l:
tot_ep = 0
alg = TDAfterstate(env, None, step_size=step_size, gamma=EX_6_14_GAMMA, pi_init=pi)
stable = False
while len(log_V_mean[step_size]) < 10:
print(f"tot_ep = {tot_ep}")
V, pi, stable = alg.policy_iteration(ep_per_eval=ep_per_eval, batch=True, max_ep=max_ep)
tot_ep += ((ep_per_eval) * (ep_per_eval + 1)) // 2
mean = print_car_rental_value_function(size, V)
log_V_mean[step_size].append(mean)
plt.savefig(f'ex6.14_val_{str(ep_per_eval)}_{str(alpha)[2:]}_{str(tot_ep)}ep.png')
plt.close()
for step_size in step_size_l:
plt.plot(log_V_mean[step_size], label=f'alpha={step_size}')
plt.legend()
plt.savefig('learning_rates.png')
plt.show()
#print_policy_car_rental(size, pi)
#plt.savefig('ex6.14_pol.png')
PLOT_FUNCTION = {
'6.1': fig_6_1,
'example6.2': example_6_2,
'ex6.4': ex_6_4,
'ex6.5': ex_6_5,
'6.2': fig_6_2,
'ex6.7': ex_6_7,
'example6.5': example_6_5,
'ex6.9': ex_6_9,
'ex6.10': ex_6_10,
'example6.6': example_6_6,
'6.3': fig_6_3,
'6.5': fig_6_5,
'ex6.13': ex_6_13,
'ex6.14': ex_6_14,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('figure', type=str, default=None,
help='Figure to reproduce.',
choices=PLOT_FUNCTION.keys())
parser.add_argument('-s', '--size', type=int, default=None,
help='Size of the environment (size * size states).')
parser.add_argument('-e', '--ep', type=int, default=None)
parser.add_argument('-a', '--alpha', type=float, default=None)
parser.add_argument('-m', '--max_ep', type=int, default=None)
args = parser.parse_args()
if args.figure == 'ex6.14':
PLOT_FUNCTION[args.figure](args.size, args.ep, args.alpha, args.max_ep)
else:
PLOT_FUNCTION[args.figure]()
if __name__ == "__main__":
main()
|
[
"seaborn.heatmap",
"argparse.ArgumentParser",
"td_afterstate.TDAfterstate",
"td.OneStepTD",
"off_pol_td.OffPolicyTD",
"max_bias_mdp.MaxBiasMDP",
"matplotlib.pyplot.figure",
"numpy.mean",
"car_rental_afterstate.CarRentalAfterstateEnv",
"numpy.linalg.norm",
"driving.DrivingEnv",
"matplotlib.pyplot.close",
"randomwalk.RandomWalk",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.min",
"windy_gridworld.WindyGridworld",
"randomwalk.NotSoRandomWalk",
"cliff.TheCliff",
"matplotlib.pyplot.plot",
"numpy.zeros",
"qlearning.QLearning",
"numpy.array",
"sarsa.Sarsa",
"matplotlib.pyplot.savefig"
] |
[((2297, 2309), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2307, 2309), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2359), 'driving.DrivingEnv', 'DrivingEnv', ([], {}), '()\n', (2357, 2359), False, 'from driving import DrivingEnv, TRAVEL_TIME\n'), ((2535, 2586), 'td.OneStepTD', 'OneStepTD', (['env'], {'V_init': 'V_init', 'step_size': '(1)', 'gamma': '(1)'}), '(env, V_init=V_init, step_size=1, gamma=1)\n', (2544, 2586), False, 'from td import OneStepTD\n'), ((2899, 2911), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2909, 2911), True, 'import matplotlib.pyplot as plt\n'), ((2914, 2939), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig6.1.png"""'], {}), "('fig6.1.png')\n", (2925, 2939), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2952), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2950, 2952), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3295), 'matplotlib.pyplot.plot', 'plt.plot', (['x_ticks', 'TRUE_VALUES_EX_6_2'], {'label': '"""true values"""'}), "(x_ticks, TRUE_VALUES_EX_6_2, label='true values')\n", (3245, 3295), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3469), 'randomwalk.RandomWalk', 'RandomWalk', ([], {}), '()\n', (3467, 3469), False, 'from randomwalk import RandomWalk, NotSoRandomWalk, LEFT, RIGHT\n'), ((3683, 3753), 'td.OneStepTD', 'OneStepTD', (['env'], {'V_init': 'V_init', 'step_size': 'step_size', 'gamma': 'UNDISCOUNTED'}), '(env, V_init=V_init, step_size=step_size, gamma=UNDISCOUNTED)\n', (3692, 3753), False, 'from td import OneStepTD\n'), ((4136, 4148), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4146, 4148), True, 'import matplotlib.pyplot as plt\n'), ((5854, 5866), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5864, 5866), True, 'import matplotlib.pyplot as plt\n'), ((5895, 5907), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5905, 5907), True, 'import matplotlib.pyplot as plt\n'), ((6088, 6117), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""example6.2.png"""'], {}), "('example6.2.png')\n", (6099, 6117), True, 'import matplotlib.pyplot as plt\n'), ((6120, 6130), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6128, 6130), True, 'import matplotlib.pyplot as plt\n'), ((6154, 6166), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6164, 6166), True, 'import matplotlib.pyplot as plt\n'), ((6303, 6327), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ex6.4.png"""'], {}), "('ex6.4.png')\n", (6314, 6327), True, 'import matplotlib.pyplot as plt\n'), ((6330, 6340), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6338, 6340), True, 'import matplotlib.pyplot as plt\n'), ((6364, 6376), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6374, 6376), True, 'import matplotlib.pyplot as plt\n'), ((6588, 6612), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ex6.5.png"""'], {}), "('ex6.5.png')\n", (6599, 6612), True, 'import matplotlib.pyplot as plt\n'), ((6615, 6625), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6623, 6625), True, 'import matplotlib.pyplot as plt\n'), ((6650, 6662), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6660, 6662), True, 'import matplotlib.pyplot as plt\n'), ((6826, 6851), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig6.2.png"""'], {}), "('fig6.2.png')\n", (6837, 6851), True, 'import matplotlib.pyplot as plt\n'), ((6854, 6864), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6862, 6864), True, 'import matplotlib.pyplot as plt\n'), ((6888, 6905), 'randomwalk.NotSoRandomWalk', 'NotSoRandomWalk', ([], {}), '()\n', (6903, 6905), False, 'from randomwalk import RandomWalk, NotSoRandomWalk, LEFT, RIGHT\n'), ((7170, 7232), 'off_pol_td.OffPolicyTD', 'OffPolicyTD', (['env', 'V_init', 'NOT_SO_RW_ALPHA', 'pi', 'b', 'UNDISCOUNTED'], {}), '(env, V_init, NOT_SO_RW_ALPHA, pi, b, UNDISCOUNTED)\n', (7181, 7232), False, 'from off_pol_td import OffPolicyTD\n'), ((7407, 7421), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7419, 7421), True, 'import matplotlib.pyplot as plt\n'), ((7710, 7744), 'windy_gridworld.WindyGridworld', 'WindyGridworld', (['diags', 'stay', 'stoch'], {}), '(diags, stay, stoch)\n', (7724, 7744), False, 'from windy_gridworld import WindyGridworld\n'), ((7753, 7827), 'sarsa.Sarsa', 'Sarsa', (['env'], {'step_size': 'EX_6_5_STEP_SIZE', 'gamma': 'UNDISCOUNTED', 'eps': 'EX_6_5_EPS'}), '(env, step_size=EX_6_5_STEP_SIZE, gamma=UNDISCOUNTED, eps=EX_6_5_EPS)\n', (7758, 7827), False, 'from sarsa import Sarsa\n'), ((8080, 8109), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""example6.5.png"""'], {}), "('example6.5.png')\n", (8091, 8109), True, 'import matplotlib.pyplot as plt\n'), ((8112, 8122), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8120, 8122), True, 'import matplotlib.pyplot as plt\n'), ((8443, 8455), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8453, 8455), True, 'import matplotlib.pyplot as plt\n'), ((8458, 8482), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ex6.9.png"""'], {}), "('ex6.9.png')\n", (8469, 8482), True, 'import matplotlib.pyplot as plt\n'), ((8485, 8495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8493, 8495), True, 'import matplotlib.pyplot as plt\n'), ((8727, 8752), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ex6.10.png"""'], {}), "('ex6.10.png')\n", (8738, 8752), True, 'import matplotlib.pyplot as plt\n'), ((8755, 8765), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8763, 8765), True, 'import matplotlib.pyplot as plt\n'), ((8834, 8863), 'numpy.zeros', 'np.zeros', (['(nb_rew - to_avg + 1)'], {}), '(nb_rew - to_avg + 1)\n', (8842, 8863), True, 'import numpy as np\n'), ((9015, 9029), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9027, 9029), True, 'import matplotlib.pyplot as plt\n'), ((9315, 9325), 'cliff.TheCliff', 'TheCliff', ([], {}), '()\n', (9323, 9325), False, 'from cliff import TheCliff\n'), ((9344, 9422), 'qlearning.QLearning', 'QLearning', (['env'], {'step_size': 'EX_6_5_STEP_SIZE', 'gamma': 'UNDISCOUNTED', 'eps': 'EX_6_5_EPS'}), '(env, step_size=EX_6_5_STEP_SIZE, gamma=UNDISCOUNTED, eps=EX_6_5_EPS)\n', (9353, 9422), False, 'from qlearning import QLearning\n'), ((9438, 9512), 'sarsa.Sarsa', 'Sarsa', (['env'], {'step_size': 'EX_6_5_STEP_SIZE', 'gamma': 'UNDISCOUNTED', 'eps': 'EX_6_5_EPS'}), '(env, step_size=EX_6_5_STEP_SIZE, gamma=UNDISCOUNTED, eps=EX_6_5_EPS)\n', (9443, 9512), False, 'from sarsa import Sarsa\n'), ((9532, 9546), 'numpy.zeros', 'np.zeros', (['n_ep'], {}), '(n_ep)\n', (9540, 9546), True, 'import numpy as np\n'), ((9561, 9575), 'numpy.zeros', 'np.zeros', (['n_ep'], {}), '(n_ep)\n', (9569, 9575), True, 'import numpy as np\n'), ((10011, 10023), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10021, 10023), True, 'import matplotlib.pyplot as plt\n'), ((10026, 10055), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""example6.6.png"""'], {}), "('example6.6.png')\n", (10037, 10055), True, 'import matplotlib.pyplot as plt\n'), ((10058, 10068), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10066, 10068), True, 'import matplotlib.pyplot as plt\n'), ((10098, 10112), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10110, 10112), True, 'import matplotlib.pyplot as plt\n'), ((10159, 10182), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1)', '(19)'], {}), '(0.1, 1, 19)\n', (10170, 10182), True, 'import numpy as np\n'), ((10368, 10378), 'cliff.TheCliff', 'TheCliff', ([], {}), '()\n', (10376, 10378), False, 'from cliff import TheCliff\n'), ((11672, 11684), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11682, 11684), True, 'import matplotlib.pyplot as plt\n'), ((11687, 11712), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig6.3.png"""'], {}), "('fig6.3.png')\n", (11698, 11712), True, 'import matplotlib.pyplot as plt\n'), ((11715, 11725), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11723, 11725), True, 'import matplotlib.pyplot as plt\n'), ((11802, 11816), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11814, 11816), True, 'import matplotlib.pyplot as plt\n'), ((12379, 12391), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12389, 12391), True, 'import matplotlib.pyplot as plt\n'), ((12394, 12415), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {}), '(filename)\n', (12405, 12415), True, 'import matplotlib.pyplot as plt\n'), ((12418, 12428), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12426, 12428), True, 'import matplotlib.pyplot as plt\n'), ((12453, 12465), 'max_bias_mdp.MaxBiasMDP', 'MaxBiasMDP', ([], {}), '()\n', (12463, 12465), False, 'from max_bias_mdp import MaxBiasMDP, S_A, LEFT\n'), ((12987, 12999), 'max_bias_mdp.MaxBiasMDP', 'MaxBiasMDP', ([], {}), '()\n', (12997, 12999), False, 'from max_bias_mdp import MaxBiasMDP, S_A, LEFT\n'), ((13620, 13642), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (13628, 13642), True, 'import numpy as np\n'), ((14347, 14369), 'numpy.mean', 'np.mean', (['to_print_term'], {}), '(to_print_term)\n', (14354, 14369), True, 'import numpy as np\n'), ((14422, 14436), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (14434, 14436), True, 'import matplotlib.pyplot as plt\n'), ((14572, 14597), 'seaborn.heatmap', 'sns.heatmap', (['transposed_Z'], {}), '(transposed_Z)\n', (14583, 14597), True, 'import seaborn as sns\n'), ((14982, 15014), 'car_rental_afterstate.CarRentalAfterstateEnv', 'CarRentalAfterstateEnv', (['(size - 1)'], {}), '(size - 1)\n', (15004, 15014), False, 'from car_rental_afterstate import CarRentalAfterstateEnv\n'), ((15915, 15927), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (15925, 15927), True, 'import matplotlib.pyplot as plt\n'), ((15930, 15963), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""learning_rates.png"""'], {}), "('learning_rates.png')\n", (15941, 15963), True, 'import matplotlib.pyplot as plt\n'), ((15966, 15976), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15974, 15976), True, 'import matplotlib.pyplot as plt\n'), ((16391, 16416), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16414, 16416), False, 'import argparse\n'), ((4427, 4448), 'numpy.zeros', 'np.zeros', (['N_EP_EX_6_2'], {}), '(N_EP_EX_6_2)\n', (4435, 4448), True, 'import numpy as np\n'), ((12108, 12123), 'numpy.zeros', 'np.zeros', (['n_eps'], {}), '(n_eps)\n', (12116, 12123), True, 'import numpy as np\n'), ((12246, 12300), 'matplotlib.pyplot.plot', 'plt.plot', (['(perc_left / n_runs)'], {'label': 'label', 'color': 'color'}), '(perc_left / n_runs, label=label, color=color)\n', (12254, 12300), True, 'import matplotlib.pyplot as plt\n'), ((15289, 15366), 'td_afterstate.TDAfterstate', 'TDAfterstate', (['env', 'None'], {'step_size': 'step_size', 'gamma': 'EX_6_14_GAMMA', 'pi_init': 'pi'}), '(env, None, step_size=step_size, gamma=EX_6_14_GAMMA, pi_init=pi)\n', (15301, 15366), False, 'from td_afterstate import TDAfterstate\n'), ((15853, 15912), 'matplotlib.pyplot.plot', 'plt.plot', (['log_V_mean[step_size]'], {'label': 'f"""alpha={step_size}"""'}), "(log_V_mean[step_size], label=f'alpha={step_size}')\n", (15861, 15912), True, 'import matplotlib.pyplot as plt\n'), ((11543, 11673), 'matplotlib.pyplot.plot', 'plt.plot', (['step_sizes', 'rew_l'], {'label': 'label', 'color': 'color', 'marker': 'marker', 'linestyle': "('-' if run_type_name == 'Asymptotic' else '--')"}), "(step_sizes, rew_l, label=label, color=color, marker=marker,\n linestyle='-' if run_type_name == 'Asymptotic' else '--')\n", (11551, 11673), True, 'import matplotlib.pyplot as plt\n'), ((12312, 12327), 'numpy.zeros', 'np.zeros', (['n_eps'], {}), '(n_eps)\n', (12320, 12327), True, 'import numpy as np\n'), ((14656, 14676), 'numpy.min', 'np.min', (['transposed_Z'], {}), '(transposed_Z)\n', (14662, 14676), True, 'import numpy as np\n'), ((15805, 15816), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15814, 15816), True, 'import matplotlib.pyplot as plt\n'), ((5196, 5211), 'numpy.array', 'np.array', (['err_l'], {}), '(err_l)\n', (5204, 5211), True, 'import numpy as np\n'), ((13844, 13866), 'numpy.mean', 'np.mean', (['to_print_term'], {}), '(to_print_term)\n', (13851, 13866), True, 'import numpy as np\n'), ((14678, 14698), 'numpy.max', 'np.max', (['transposed_Z'], {}), '(transposed_Z)\n', (14684, 14698), True, 'import numpy as np\n'), ((5122, 5164), 'numpy.linalg.norm', 'np.linalg.norm', (['(v_arr - TRUE_VALUES_EX_6_2)'], {}), '(v_arr - TRUE_VALUES_EX_6_2)\n', (5136, 5164), True, 'import numpy as np\n')]
|
'''Constructs project specific dictionary containing prior model related objects
To construct the dictionary, the code will create an instance of the PriorHandler
class. Utilizing the methods of this class then loads the covariance related
objects.
Inputs:
- hyperp: dictionary storing set hyperparameter values
- options: dictionary storing the set options
- filepaths: class instance storing the filepaths
- load_covariance_: flag that dictates whether to load variants
of the covariance
Author: <NAME>, Oden Institute, Austin, Texas 2020
'''
import numpy as np
import pandas as pd
from utils_data.prior_handler import PriorHandler
import pdb #Equivalent of keyboard in MATLAB, just add "pdb.set_trace()"
def construct_prior_dict(hyperp, options, filepaths,
load_mean = True,
load_covariance = True,
load_covariance_inverse = True,
load_covariance_cholesky = True,
load_covariance_cholesky_inverse = True):
prior_dict = {}
prior = PriorHandler(hyperp, options, filepaths,
options.parameter_dimensions)
#=== Prior Mean ===#
if load_mean == True:
prior_mean = prior.load_prior_mean()
prior_dict["prior_mean"] = np.expand_dims(prior_mean, 0)
#=== Prior Covariance ===#
if load_covariance == True:
prior_covariance = prior.load_prior_covariance()
prior_dict["prior_covariance"] = prior_covariance
#=== Prior Covariance Inverse ===#
if load_covariance_inverse == True:
prior_covariance_inverse = prior.load_prior_covariance_inverse()
prior_dict["prior_covariance_inverse"] = prior_covariance_inverse
#=== Prior Covariance Cholesky ===#
if load_covariance_cholesky == True:
prior_covariance_cholesky = prior.load_prior_covariance_cholesky()
prior_dict["prior_covariance_cholesky"] = prior_covariance_cholesky
#=== Prior Covariance Cholesky Inverse ===#
if load_covariance_cholesky_inverse == True:
prior_covariance_cholesky_inverse = prior.load_prior_covariance_cholesky_inverse()
prior_dict["prior_covariance_cholesky_inverse"] = prior_covariance_cholesky_inverse
return prior_dict
|
[
"utils_data.prior_handler.PriorHandler",
"numpy.expand_dims"
] |
[((1113, 1183), 'utils_data.prior_handler.PriorHandler', 'PriorHandler', (['hyperp', 'options', 'filepaths', 'options.parameter_dimensions'], {}), '(hyperp, options, filepaths, options.parameter_dimensions)\n', (1125, 1183), False, 'from utils_data.prior_handler import PriorHandler\n'), ((1341, 1370), 'numpy.expand_dims', 'np.expand_dims', (['prior_mean', '(0)'], {}), '(prior_mean, 0)\n', (1355, 1370), True, 'import numpy as np\n')]
|
import numpy as np
A=np.array([[1,-2j],[2j,5]])
print(A)
#A=L.dot(L^H) with A is positive definite matrix and L is lower triangular matrix.
L=np.linalg.cholesky(A)
print(L)
print(L.dot(L.T.conj()))
a=np.array([[4,12,-16],[12,37,-43],[-16,-43,98]])
L=np.linalg.cholesky(a)
print(L)
L_T=L.transpose()
print(L.dot(L_T))
|
[
"numpy.array",
"numpy.linalg.cholesky"
] |
[((22, 55), 'numpy.array', 'np.array', (['[[1, -2.0j], [2.0j, 5]]'], {}), '([[1, -2.0j], [2.0j, 5]])\n', (30, 55), True, 'import numpy as np\n'), ((145, 166), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['A'], {}), '(A)\n', (163, 166), True, 'import numpy as np\n'), ((204, 259), 'numpy.array', 'np.array', (['[[4, 12, -16], [12, 37, -43], [-16, -43, 98]]'], {}), '([[4, 12, -16], [12, 37, -43], [-16, -43, 98]])\n', (212, 259), True, 'import numpy as np\n'), ((255, 276), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['a'], {}), '(a)\n', (273, 276), True, 'import numpy as np\n')]
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2019 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
import matplotlib.pyplot as plt
import numpy as np
print ("Example: create a slider crank and plot results");
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
# ---------------------------------------------------------------------
#
# Create the simulation sys and add items
#
sys = chrono.ChSystemNSC()
# Some data shared in the following
crank_center = chrono.ChVectorD(-1,0.5,0)
crank_rad = 0.4
crank_thick = 0.1
rod_length = 1.5
# Create four rigid bodies: the truss, the crank, the rod, the piston.
# Create the floor truss
mfloor = chrono.ChBodyEasyBox(3, 1, 3, 1000)
mfloor.SetPos(chrono.ChVectorD(0,-0.5,0))
mfloor.SetBodyFixed(True)
sys.Add(mfloor)
# Create the flywheel crank
mcrank = chrono.ChBodyEasyCylinder(crank_rad, crank_thick, 1000)
mcrank.SetPos(crank_center + chrono.ChVectorD(0, 0, -0.1))
# Since ChBodyEasyCylinder creates a vertical (y up) cylinder, here rotate it:
mcrank.SetRot(chrono.Q_ROTATE_Y_TO_Z)
sys.Add(mcrank)
# Create a stylized rod
mrod = chrono.ChBodyEasyBox(rod_length, 0.1, 0.1, 1000)
mrod.SetPos(crank_center + chrono.ChVectorD(crank_rad+rod_length/2 , 0, 0))
sys.Add(mrod)
# Create a stylized piston
mpiston = chrono.ChBodyEasyCylinder(0.2, 0.3, 1000)
mpiston.SetPos(crank_center + chrono.ChVectorD(crank_rad+rod_length, 0, 0))
mpiston.SetRot(chrono.Q_ROTATE_Y_TO_X)
sys.Add(mpiston)
# Now create constraints and motors between the bodies.
# Create crank-truss joint: a motor that spins the crank flywheel
my_motor = chrono.ChLinkMotorRotationSpeed()
my_motor.Initialize(mcrank, # the first connected body
mfloor, # the second connected body
chrono.ChFrameD(crank_center)) # where to create the motor in abs.space
my_angularspeed = chrono.ChFunction_Const(chrono.CH_C_PI) # ang.speed: 180°/s
my_motor.SetMotorFunction(my_angularspeed)
sys.Add(my_motor)
# Create crank-rod joint
mjointA = chrono.ChLinkLockRevolute()
mjointA.Initialize(mrod,
mcrank,
chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad,0,0) ))
sys.Add(mjointA)
# Create rod-piston joint
mjointB = chrono.ChLinkLockRevolute()
mjointB.Initialize(mpiston,
mrod,
chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad+rod_length,0,0) ))
sys.Add(mjointB)
# Create piston-truss joint
mjointC = chrono.ChLinkLockPrismatic()
mjointC.Initialize(mpiston,
mfloor,
chrono.ChCoordsysD(
crank_center + chrono.ChVectorD(crank_rad+rod_length,0,0),
chrono.Q_ROTATE_Z_TO_X)
)
sys.Add(mjointC)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the sys
#
vis = chronoirr.ChVisualSystemIrrlicht()
sys.SetVisualSystem(vis)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('Crank demo')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVectorD(1,1,3), chrono.ChVectorD(0,1,0))
vis.AddTypicalLights()
# ---------------------------------------------------------------------
#
# Run the simulation
#
# Initialize these lists to store values to plot.
array_time = []
array_angle = []
array_pos = []
array_speed = []
# Run the interactive simulation loop
while vis.Run():
# for plotting, append instantaneous values:
array_time.append(sys.GetChTime())
array_angle.append(my_motor.GetMotorRot())
array_pos.append(mpiston.GetPos().x)
array_speed.append(mpiston.GetPos_dt().x)
# here happens the visualization and step time integration
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
sys.DoStepDynamics(5e-3)
# stop simulation after 2 seconds
if sys.GetChTime() > 20:
vis.GetDevice().closeDevice()
# Use matplotlib to make two plots when simulation ended:
fig, (ax1, ax2) = plt.subplots(2, sharex = True)
ax1.plot(array_angle, array_pos)
ax1.set(ylabel='position [m]')
ax1.grid()
ax2.plot(array_angle, array_speed, 'r--')
ax2.set(ylabel='speed [m]',xlabel='angle [rad]')
ax2.grid()
# trick to plot \pi on x axis of plots instead of 1 2 3 4 etc.
plt.xticks(np.linspace(0, 2*np.pi, 5),['0','$\pi/2$','$\pi$','$3\pi/2$','$2\pi$'])
|
[
"pychrono.core.ChFrameD",
"pychrono.core.ChBodyEasyCylinder",
"pychrono.irrlicht.ChVisualSystemIrrlicht",
"pychrono.core.ChLinkMotorRotationSpeed",
"pychrono.core.ChSystemNSC",
"pychrono.core.ChVectorD",
"pychrono.core.ChFunction_Const",
"pychrono.core.ChLinkLockPrismatic",
"numpy.linspace",
"pychrono.core.ChBodyEasyBox",
"matplotlib.pyplot.subplots",
"pychrono.core.ChLinkLockRevolute",
"pychrono.core.GetChronoDataFile"
] |
[((1084, 1104), 'pychrono.core.ChSystemNSC', 'chrono.ChSystemNSC', ([], {}), '()\n', (1102, 1104), True, 'import pychrono.core as chrono\n'), ((1157, 1185), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(-1)', '(0.5)', '(0)'], {}), '(-1, 0.5, 0)\n', (1173, 1185), True, 'import pychrono.core as chrono\n'), ((1349, 1384), 'pychrono.core.ChBodyEasyBox', 'chrono.ChBodyEasyBox', (['(3)', '(1)', '(3)', '(1000)'], {}), '(3, 1, 3, 1000)\n', (1369, 1384), True, 'import pychrono.core as chrono\n'), ((1507, 1562), 'pychrono.core.ChBodyEasyCylinder', 'chrono.ChBodyEasyCylinder', (['crank_rad', 'crank_thick', '(1000)'], {}), '(crank_rad, crank_thick, 1000)\n', (1532, 1562), True, 'import pychrono.core as chrono\n'), ((1787, 1835), 'pychrono.core.ChBodyEasyBox', 'chrono.ChBodyEasyBox', (['rod_length', '(0.1)', '(0.1)', '(1000)'], {}), '(rod_length, 0.1, 0.1, 1000)\n', (1807, 1835), True, 'import pychrono.core as chrono\n'), ((1964, 2005), 'pychrono.core.ChBodyEasyCylinder', 'chrono.ChBodyEasyCylinder', (['(0.2)', '(0.3)', '(1000)'], {}), '(0.2, 0.3, 1000)\n', (1989, 2005), True, 'import pychrono.core as chrono\n'), ((2274, 2307), 'pychrono.core.ChLinkMotorRotationSpeed', 'chrono.ChLinkMotorRotationSpeed', ([], {}), '()\n', (2305, 2307), True, 'import pychrono.core as chrono\n'), ((2533, 2572), 'pychrono.core.ChFunction_Const', 'chrono.ChFunction_Const', (['chrono.CH_C_PI'], {}), '(chrono.CH_C_PI)\n', (2556, 2572), True, 'import pychrono.core as chrono\n'), ((2690, 2717), 'pychrono.core.ChLinkLockRevolute', 'chrono.ChLinkLockRevolute', ([], {}), '()\n', (2715, 2717), True, 'import pychrono.core as chrono\n'), ((2914, 2941), 'pychrono.core.ChLinkLockRevolute', 'chrono.ChLinkLockRevolute', ([], {}), '()\n', (2939, 2941), True, 'import pychrono.core as chrono\n'), ((3152, 3180), 'pychrono.core.ChLinkLockPrismatic', 'chrono.ChLinkLockPrismatic', ([], {}), '()\n', (3178, 3180), True, 'import pychrono.core as chrono\n'), ((3601, 3635), 'pychrono.irrlicht.ChVisualSystemIrrlicht', 'chronoirr.ChVisualSystemIrrlicht', ([], {}), '()\n', (3633, 3635), True, 'import pychrono.irrlicht as chronoirr\n'), ((4759, 4787), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)'}), '(2, sharex=True)\n', (4771, 4787), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1427), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(0)', '(-0.5)', '(0)'], {}), '(0, -0.5, 0)\n', (1415, 1427), True, 'import pychrono.core as chrono\n'), ((2443, 2472), 'pychrono.core.ChFrameD', 'chrono.ChFrameD', (['crank_center'], {}), '(crank_center)\n', (2458, 2472), True, 'import pychrono.core as chrono\n'), ((3751, 3802), 'pychrono.core.GetChronoDataFile', 'chrono.GetChronoDataFile', (['"""logo_pychrono_alpha.png"""'], {}), "('logo_pychrono_alpha.png')\n", (3775, 3802), True, 'import pychrono.core as chrono\n'), ((3834, 3859), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(1)', '(1)', '(3)'], {}), '(1, 1, 3)\n', (3850, 3859), True, 'import pychrono.core as chrono\n'), ((3859, 3884), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(0)', '(1)', '(0)'], {}), '(0, 1, 0)\n', (3875, 3884), True, 'import pychrono.core as chrono\n'), ((5044, 5072), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(5)'], {}), '(0, 2 * np.pi, 5)\n', (5055, 5072), True, 'import numpy as np\n'), ((1592, 1620), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(0)', '(0)', '(-0.1)'], {}), '(0, 0, -0.1)\n', (1608, 1620), True, 'import pychrono.core as chrono\n'), ((1863, 1913), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(crank_rad + rod_length / 2)', '(0)', '(0)'], {}), '(crank_rad + rod_length / 2, 0, 0)\n', (1879, 1913), True, 'import pychrono.core as chrono\n'), ((2036, 2082), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(crank_rad + rod_length)', '(0)', '(0)'], {}), '(crank_rad + rod_length, 0, 0)\n', (2052, 2082), True, 'import pychrono.core as chrono\n'), ((2825, 2858), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['crank_rad', '(0)', '(0)'], {}), '(crank_rad, 0, 0)\n', (2841, 2858), True, 'import pychrono.core as chrono\n'), ((3050, 3096), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(crank_rad + rod_length)', '(0)', '(0)'], {}), '(crank_rad + rod_length, 0, 0)\n', (3066, 3096), True, 'import pychrono.core as chrono\n'), ((3323, 3369), 'pychrono.core.ChVectorD', 'chrono.ChVectorD', (['(crank_rad + rod_length)', '(0)', '(0)'], {}), '(crank_rad + rod_length, 0, 0)\n', (3339, 3369), True, 'import pychrono.core as chrono\n')]
|
import dataclasses
import logging
from typing import ClassVar
import numpy as np
import torch
from .annrescaler import AnnRescaler
from .. import headmeta
from ..visualizer import Cif as CifVisualizer
from ..utils import create_sink, mask_valid_area
LOG = logging.getLogger(__name__)
@dataclasses.dataclass
class Cif:
meta: headmeta.Cif
rescaler: AnnRescaler = None
v_threshold: int = 0
bmin: float = 0.1 #: in pixels
visualizer: CifVisualizer = None
side_length: ClassVar[int] = 4
padding: ClassVar[int] = 10
def __call__(self, image, anns, meta):
return CifGenerator(self)(image, anns, meta)
class CifGenerator():
def __init__(self, config: Cif):
self.config = config
self.rescaler = config.rescaler or AnnRescaler(
config.meta.stride, config.meta.pose)
self.visualizer = config.visualizer or CifVisualizer(config.meta)
self.intensities = None
self.fields_reg = None
self.fields_bmin = None
self.fields_scale = None
self.fields_reg_l = None
self.sink = create_sink(config.side_length)
self.s_offset = (config.side_length - 1.0) / 2.0
def __call__(self, image, anns, meta):
width_height_original = image.shape[2:0:-1]
keypoint_sets = self.rescaler.keypoint_sets(anns)
bg_mask = self.rescaler.bg_mask(anns, width_height_original,
crowd_margin=(self.config.side_length - 1) / 2)
valid_area = self.rescaler.valid_area(meta)
LOG.debug('valid area: %s, pif side length = %d', valid_area, self.config.side_length)
n_fields = len(self.config.meta.keypoints)
self.init_fields(n_fields, bg_mask)
self.fill(keypoint_sets)
fields = self.fields(valid_area)
self.visualizer.processed_image(image)
self.visualizer.targets(fields, annotation_dicts=anns)
return fields
def init_fields(self, n_fields, bg_mask):
field_w = bg_mask.shape[1] + 2 * self.config.padding
field_h = bg_mask.shape[0] + 2 * self.config.padding
self.intensities = np.zeros((n_fields, field_h, field_w), dtype=np.float32)
self.fields_reg = np.full((n_fields, 2, field_h, field_w), np.nan, dtype=np.float32)
self.fields_bmin = np.full((n_fields, field_h, field_w), np.nan, dtype=np.float32)
self.fields_scale = np.full((n_fields, field_h, field_w), np.nan, dtype=np.float32)
self.fields_reg_l = np.full((n_fields, field_h, field_w), np.inf, dtype=np.float32)
# bg_mask
p = self.config.padding
self.fields_reg_l[:, p:-p, p:-p][:, bg_mask == 0] = 1.0
self.intensities[:, p:-p, p:-p][:, bg_mask == 0] = np.nan
def fill(self, keypoint_sets):
for keypoints in keypoint_sets:
self.fill_keypoints(keypoints)
def fill_keypoints(self, keypoints):
scale = self.rescaler.scale(keypoints)
for f, xyv in enumerate(keypoints):
if xyv[2] <= self.config.v_threshold:
continue
joint_scale = (
scale
if self.config.meta.sigmas is None
else scale * self.config.meta.sigmas[f]
)
self.fill_coordinate(f, xyv, joint_scale)
def fill_coordinate(self, f, xyv, scale):
ij = np.round(xyv[:2] - self.s_offset).astype(np.int) + self.config.padding
minx, miny = int(ij[0]), int(ij[1])
maxx, maxy = minx + self.config.side_length, miny + self.config.side_length
if minx < 0 or maxx > self.intensities.shape[2] or \
miny < 0 or maxy > self.intensities.shape[1]:
return
offset = xyv[:2] - (ij + self.s_offset - self.config.padding)
offset = offset.reshape(2, 1, 1)
# mask
sink_reg = self.sink + offset
sink_l = np.linalg.norm(sink_reg, axis=0)
mask = sink_l < self.fields_reg_l[f, miny:maxy, minx:maxx]
mask_peak = np.logical_and(mask, sink_l < 0.7)
self.fields_reg_l[f, miny:maxy, minx:maxx][mask] = sink_l[mask]
# update intensity
self.intensities[f, miny:maxy, minx:maxx][mask] = 1.0
self.intensities[f, miny:maxy, minx:maxx][mask_peak] = 1.0
# update regression
patch = self.fields_reg[f, :, miny:maxy, minx:maxx]
patch[:, mask] = sink_reg[:, mask]
# update bmin
bmin = self.config.bmin / self.config.meta.stride
self.fields_bmin[f, miny:maxy, minx:maxx][mask] = bmin
# update scale
assert np.isnan(scale) or 0.0 < scale < 100.0
self.fields_scale[f, miny:maxy, minx:maxx][mask] = scale
def fields(self, valid_area):
p = self.config.padding
intensities = self.intensities[:, p:-p, p:-p]
fields_reg = self.fields_reg[:, :, p:-p, p:-p]
fields_bmin = self.fields_bmin[:, p:-p, p:-p]
fields_scale = self.fields_scale[:, p:-p, p:-p]
mask_valid_area(intensities, valid_area)
mask_valid_area(fields_reg[:, 0], valid_area, fill_value=np.nan)
mask_valid_area(fields_reg[:, 1], valid_area, fill_value=np.nan)
mask_valid_area(fields_bmin, valid_area, fill_value=np.nan)
mask_valid_area(fields_scale, valid_area, fill_value=np.nan)
return torch.from_numpy(np.concatenate([
np.expand_dims(intensities, 1),
fields_reg,
np.expand_dims(fields_bmin, 1),
np.expand_dims(fields_scale, 1),
], axis=1))
|
[
"numpy.full",
"numpy.logical_and",
"numpy.zeros",
"numpy.expand_dims",
"numpy.isnan",
"numpy.linalg.norm",
"numpy.round",
"logging.getLogger"
] |
[((259, 286), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'import logging\n'), ((2145, 2201), 'numpy.zeros', 'np.zeros', (['(n_fields, field_h, field_w)'], {'dtype': 'np.float32'}), '((n_fields, field_h, field_w), dtype=np.float32)\n', (2153, 2201), True, 'import numpy as np\n'), ((2228, 2294), 'numpy.full', 'np.full', (['(n_fields, 2, field_h, field_w)', 'np.nan'], {'dtype': 'np.float32'}), '((n_fields, 2, field_h, field_w), np.nan, dtype=np.float32)\n', (2235, 2294), True, 'import numpy as np\n'), ((2322, 2385), 'numpy.full', 'np.full', (['(n_fields, field_h, field_w)', 'np.nan'], {'dtype': 'np.float32'}), '((n_fields, field_h, field_w), np.nan, dtype=np.float32)\n', (2329, 2385), True, 'import numpy as np\n'), ((2414, 2477), 'numpy.full', 'np.full', (['(n_fields, field_h, field_w)', 'np.nan'], {'dtype': 'np.float32'}), '((n_fields, field_h, field_w), np.nan, dtype=np.float32)\n', (2421, 2477), True, 'import numpy as np\n'), ((2506, 2569), 'numpy.full', 'np.full', (['(n_fields, field_h, field_w)', 'np.inf'], {'dtype': 'np.float32'}), '((n_fields, field_h, field_w), np.inf, dtype=np.float32)\n', (2513, 2569), True, 'import numpy as np\n'), ((3884, 3916), 'numpy.linalg.norm', 'np.linalg.norm', (['sink_reg'], {'axis': '(0)'}), '(sink_reg, axis=0)\n', (3898, 3916), True, 'import numpy as np\n'), ((4004, 4038), 'numpy.logical_and', 'np.logical_and', (['mask', '(sink_l < 0.7)'], {}), '(mask, sink_l < 0.7)\n', (4018, 4038), True, 'import numpy as np\n'), ((4583, 4598), 'numpy.isnan', 'np.isnan', (['scale'], {}), '(scale)\n', (4591, 4598), True, 'import numpy as np\n'), ((3365, 3398), 'numpy.round', 'np.round', (['(xyv[:2] - self.s_offset)'], {}), '(xyv[:2] - self.s_offset)\n', (3373, 3398), True, 'import numpy as np\n'), ((5368, 5398), 'numpy.expand_dims', 'np.expand_dims', (['intensities', '(1)'], {}), '(intensities, 1)\n', (5382, 5398), True, 'import numpy as np\n'), ((5436, 5466), 'numpy.expand_dims', 'np.expand_dims', (['fields_bmin', '(1)'], {}), '(fields_bmin, 1)\n', (5450, 5466), True, 'import numpy as np\n'), ((5480, 5511), 'numpy.expand_dims', 'np.expand_dims', (['fields_scale', '(1)'], {}), '(fields_scale, 1)\n', (5494, 5511), True, 'import numpy as np\n')]
|
import keras
import matplotlib.pyplot as plt
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation
import numpy as np
from skimage.transform import resize
def draw(image):
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(111)
ax.set_aspect('equal')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.imshow(image.reshape(28, 28) * 255)
cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.set_frame_on(False)
plt.savefig("fig.png")
def create_model():
digitClassifier = Sequential()
digitClassifier.add(Dense(512, input_dim=28 * 28, activation='relu'))
digitClassifier.add(Dropout(0.2))
digitClassifier.add(Dense(512, activation='relu'))
digitClassifier.add(Dropout(0.2))
digitClassifier.add(Dense(10, activation='softmax'))
digitClassifier.compile(loss='categorical_crossentropy', metrics=[
'accuracy'], optimizer='adam')
digitClassifier.load_weights('digit_classifier_weights')
return digitClassifier
def classify(points, width, height):
image = np.zeros((width, height))
for point in points:
image[point[1]][point[0]] = 1
image = resize(image, (28, 28))
image = image.reshape(1, 28 * 28)
return digit_classifier.predict(image).reshape(10).tolist()
digit_classifier = create_model()
|
[
"keras.layers.core.Dense",
"numpy.zeros",
"matplotlib.pyplot.figure",
"skimage.transform.resize",
"keras.layers.core.Dropout",
"keras.models.Sequential",
"matplotlib.pyplot.savefig"
] |
[((234, 260), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (244, 260), True, 'import matplotlib.pyplot as plt\n'), ((597, 619), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig.png"""'], {}), "('fig.png')\n", (608, 619), True, 'import matplotlib.pyplot as plt\n'), ((663, 675), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (673, 675), False, 'from keras.models import Sequential, load_model\n'), ((1207, 1232), 'numpy.zeros', 'np.zeros', (['(width, height)'], {}), '((width, height))\n', (1215, 1232), True, 'import numpy as np\n'), ((1309, 1332), 'skimage.transform.resize', 'resize', (['image', '(28, 28)'], {}), '(image, (28, 28))\n', (1315, 1332), False, 'from skimage.transform import resize\n'), ((700, 748), 'keras.layers.core.Dense', 'Dense', (['(512)'], {'input_dim': '(28 * 28)', 'activation': '"""relu"""'}), "(512, input_dim=28 * 28, activation='relu')\n", (705, 748), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((774, 786), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (781, 786), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((812, 841), 'keras.layers.core.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (817, 841), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((867, 879), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (874, 879), False, 'from keras.layers.core import Dense, Dropout, Activation\n'), ((905, 936), 'keras.layers.core.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (910, 936), False, 'from keras.layers.core import Dense, Dropout, Activation\n')]
|
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.ticker import MaxNLocator
from typing import Union, List
import numpy as np
from ..storage import History
from .util import to_lists_or_default
def plot_epsilons(
histories: Union[List, History],
labels: Union[List, str] = None,
colors: List = None,
scale: str = None,
title: str = "Epsilon values",
size: tuple = None,
ax: mpl.axes.Axes = None):
"""
Plot epsilon trajectory.
Parameters
----------
histories: Union[List, History]
The histories to plot from. History ids must be set correctly.
labels: Union[List ,str], optional
Labels corresponding to the histories. If None are provided,
indices are used as labels.
colors: List, optional
Colors to use for the lines. If None, then the matplotlib
default values are used.
scale: str, optional (default='lin')
Scaling to apply to the y axis.
Must be one of 'lin', 'log', 'log10'.
title: str, optional (default = "Epsilon values")
Title for the plot.
size: tuple of float, optional
The size of the plot in inches.
ax: matplotlib.axes.Axes, optional
The axis object to use. A new one is created if None.
Returns
-------
ax: Axis of the generated plot.
"""
# preprocess input
histories, labels = to_lists_or_default(histories, labels)
if colors is None:
colors = [None for _ in range(len(histories))]
if scale is None:
scale = 'lin'
# create figure
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
# extract epsilons
eps = []
for history in histories:
# note: first entry is from calibration and thus translates to inf,
# thus must be discarded
eps.append(np.array(history.get_all_populations()['epsilon'][1:]))
# scale
eps = _apply_scale(eps, scale)
# plot
for ep, label, color in zip(eps, labels, colors):
ax.plot(ep, 'x-', label=label, color=color)
# format
ax.set_xlabel("Population index")
ax.set_ylabel(_get_ylabel(scale))
ax.legend()
ax.set_title(title)
# enforce integer ticks
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
# set size
if size is not None:
fig.set_size_inches(size)
fig.tight_layout()
return ax
def _apply_scale(eps, scale):
"""
Apply the `scale` transformation to `eps`.
"""
if scale == 'log':
eps = [np.log(ep) for ep in eps]
elif scale == 'log10':
eps = [np.log10(ep) for ep in eps]
elif scale != 'lin':
raise ValueError(f"Scale {scale} must be one of lin, log, log10.")
return eps
def _get_ylabel(scale):
"""
Get corect y axis label.
"""
if scale == 'log':
ylabel = "Log(Epsilon)"
elif scale == 'log10':
ylabel = "Log10(Epsilon)"
else:
ylabel = "Epsilon"
return ylabel
|
[
"numpy.log10",
"matplotlib.ticker.MaxNLocator",
"matplotlib.pyplot.subplots",
"numpy.log"
] |
[((1646, 1660), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1658, 1660), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2332), 'matplotlib.ticker.MaxNLocator', 'MaxNLocator', ([], {'integer': '(True)'}), '(integer=True)\n', (2318, 2332), False, 'from matplotlib.ticker import MaxNLocator\n'), ((2579, 2589), 'numpy.log', 'np.log', (['ep'], {}), '(ep)\n', (2585, 2589), True, 'import numpy as np\n'), ((2647, 2659), 'numpy.log10', 'np.log10', (['ep'], {}), '(ep)\n', (2655, 2659), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import socket
import numpy as np
import cv2
import os
import time
import struct
class Camera(object):
def __init__(self):
# Data options (change me)
self.im_height = 720 # 848x480, 1280x720
self.im_width = 1280
# self.resize_height = 720
# self.resize_width = 1280
self.tcp_host_ip = '127.0.0.1'
self.tcp_port = 50010
self.buffer_size = 10*4 + self.im_height*self.im_width*5 # in bytes
# Connect to server
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.connect((self.tcp_host_ip, self.tcp_port))
self.intrinsics = None
self.get_data()
def get_data(self):
# Ping the server with anything
self.tcp_socket.send(b'asdf')
# Fetch TCP data:
# color camera intrinsics, 9 floats, number of bytes: 9 x 4
# depth scale for converting depth from uint16 to float, 1 float, number of bytes: 4
# depth image, self.im_width x self.im_height uint16, number of bytes: self.im_width x self.im_height x 2
# color image, self.im_width x self.im_height x 3 uint8, number of bytes: self.im_width x self.im_height x 3
data = b''
while len(data) < ((9*4 + 9*4 + 16*4 + 4 + 8)+(self.im_height*self.im_width*5)):
data += self.tcp_socket.recv(self.buffer_size)
# while len(data) < (10*4 + self.im_height*self.im_width*5):
# data += self.tcp_socket.recv(self.buffer_size)
# Reorganize TCP data into color and depth frame
self.color_intr = np.fromstring(data[0:(9*4)],np.float32).reshape(3,3)
self.depth_intr = np.fromstring(data[(9*4):(9*4+9*4)],np.float32).reshape(3,3)
self.depth2color_extr = np.fromstring(data[(9*4+9*4):(9*4+9*4+16*4)],np.float32).reshape(4,4)
depth_scale = np.fromstring(data[(9*4+9*4+16*4):(9*4+9*4+16*4+4)],np.float32)[0]
self.timestamp = np.fromstring(data[(9*4+9*4+16*4+4):(9*4+9*4+16*4+4+8)],np.long)[0]
depth_im = np.fromstring(data[(9*4+9*4+16*4+4+8):((9*4+9*4+16*4+4+8)+self.im_width*self.im_height*2)],np.uint16).reshape(self.im_height,self.im_width)
self.color_im = np.fromstring(data[((9*4+9*4+16*4+4+8)+self.im_width*self.im_height*2):],np.uint8).reshape(self.im_height,self.im_width,3)
# TODO: Get depth scaled from data and use that hre
depth_im = depth_im.astype(float) * depth_scale # default: 0.001
# Set invalid depth pixels to zero
self.depth_im = depth_im
self.depth_im[np.isnan(self.depth_im)] = 0.0
self.depth_im[np.isinf(self.depth_im)] = 0.0
# self.color_im = cv2.resize(self.color_im, (self.resize_width, self.resize_height), interpolation=cv2.INTER_CUBIC)
# self.depth_im = cv2.resize(self.depth_im, (self.resize_width, self.resize_height), interpolation=cv2.INTER_NEAREST)
return self.color_im, self.depth_im
|
[
"socket.socket",
"numpy.isinf",
"numpy.fromstring",
"numpy.isnan"
] |
[((537, 586), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (550, 586), False, 'import socket\n'), ((1900, 1987), 'numpy.fromstring', 'np.fromstring', (['data[9 * 4 + 9 * 4 + 16 * 4:9 * 4 + 9 * 4 + 16 * 4 + 4]', 'np.float32'], {}), '(data[9 * 4 + 9 * 4 + 16 * 4:9 * 4 + 9 * 4 + 16 * 4 + 4], np.\n float32)\n', (1913, 1987), True, 'import numpy as np\n'), ((1992, 2084), 'numpy.fromstring', 'np.fromstring', (['data[9 * 4 + 9 * 4 + 16 * 4 + 4:9 * 4 + 9 * 4 + 16 * 4 + 4 + 8]', 'np.long'], {}), '(data[9 * 4 + 9 * 4 + 16 * 4 + 4:9 * 4 + 9 * 4 + 16 * 4 + 4 + \n 8], np.long)\n', (2005, 2084), True, 'import numpy as np\n'), ((2600, 2623), 'numpy.isnan', 'np.isnan', (['self.depth_im'], {}), '(self.depth_im)\n', (2608, 2623), True, 'import numpy as np\n'), ((2653, 2676), 'numpy.isinf', 'np.isinf', (['self.depth_im'], {}), '(self.depth_im)\n', (2661, 2676), True, 'import numpy as np\n'), ((1636, 1676), 'numpy.fromstring', 'np.fromstring', (['data[0:9 * 4]', 'np.float32'], {}), '(data[0:9 * 4], np.float32)\n', (1649, 1676), True, 'import numpy as np\n'), ((1715, 1767), 'numpy.fromstring', 'np.fromstring', (['data[9 * 4:9 * 4 + 9 * 4]', 'np.float32'], {}), '(data[9 * 4:9 * 4 + 9 * 4], np.float32)\n', (1728, 1767), True, 'import numpy as np\n'), ((1808, 1877), 'numpy.fromstring', 'np.fromstring', (['data[9 * 4 + 9 * 4:9 * 4 + 9 * 4 + 16 * 4]', 'np.float32'], {}), '(data[9 * 4 + 9 * 4:9 * 4 + 9 * 4 + 16 * 4], np.float32)\n', (1821, 1877), True, 'import numpy as np\n'), ((2079, 2214), 'numpy.fromstring', 'np.fromstring', (['data[9 * 4 + 9 * 4 + 16 * 4 + 4 + 8:9 * 4 + 9 * 4 + 16 * 4 + 4 + 8 + self.\n im_width * self.im_height * 2]', 'np.uint16'], {}), '(data[9 * 4 + 9 * 4 + 16 * 4 + 4 + 8:9 * 4 + 9 * 4 + 16 * 4 + \n 4 + 8 + self.im_width * self.im_height * 2], np.uint16)\n', (2092, 2214), True, 'import numpy as np\n'), ((2243, 2347), 'numpy.fromstring', 'np.fromstring', (['data[9 * 4 + 9 * 4 + 16 * 4 + 4 + 8 + self.im_width * self.im_height * 2:]', 'np.uint8'], {}), '(data[9 * 4 + 9 * 4 + 16 * 4 + 4 + 8 + self.im_width * self.\n im_height * 2:], np.uint8)\n', (2256, 2347), True, 'import numpy as np\n')]
|
"""
======================
DMP as Potential Field
======================
A Dynamical Movement Primitive defines a potential field that superimposes
several components: transformation system (goal-directed movement), forcing
term (learned shape), and coupling terms (e.g., obstacle avoidance).
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from movement_primitives.dmp import DMP, CouplingTermObstacleAvoidance2D
from movement_primitives.dmp_potential_field import plot_potential_field_2d
start_y = np.array([0, 0], dtype=float)
goal_y = np.array([1, 1], dtype=float)
obstacle = np.array([0.85, 0.5])
random_state = np.random.RandomState(1)
dmp = DMP(n_dims=2, n_weights_per_dim=10, dt=0.01, execution_time=1.0)
dmp.configure(start_y=start_y, goal_y=goal_y)
dmp_ft = DMP(n_dims=2, n_weights_per_dim=10, dt=0.01, execution_time=1.0)
dmp_ft.forcing_term.weights[:, :] = random_state.randn(
*dmp_ft.forcing_term.weights.shape) * 500.0
dmp_ft.configure(start_y=start_y, goal_y=goal_y)
dmp_ct = DMP(n_dims=2, n_weights_per_dim=10, dt=0.01, execution_time=1.0)
dmp_ct.forcing_term.weights[:, :] = dmp_ft.forcing_term.weights[:, :]
dmp_ct.configure(start_y=start_y, goal_y=goal_y)
coupling_term = CouplingTermObstacleAvoidance2D(obstacle)
n_rows, n_cols = 2, 4
n_subplots = n_rows * n_cols
x_range = -0.2, 1.2
y_range = -0.2, 1.2
position = np.copy(start_y)
velocity = np.zeros_like(start_y)
position_ft = np.copy(start_y)
velocity_ft = np.zeros_like(start_y)
position_ct = np.copy(start_y)
velocity_ct = np.zeros_like(start_y)
plt.figure(figsize=(12, 6))
positions = [position]
positions_ft = [position_ft]
positions_ct = [position_ct]
for i in range(n_subplots):
ax = plt.subplot(n_rows, n_cols, i + 1, aspect="equal")
ax.set_title(f"t = {dmp.t:.02f}", backgroundcolor="#ffffffff", y=0.05)
plot_potential_field_2d(
ax, dmp_ct, x_range=x_range, y_range=y_range, n_ticks=15,
obstacle=obstacle)
plt.plot(start_y[0], start_y[1], "o", color="b", markersize=10)
plt.plot(goal_y[0], goal_y[1], "o", color="g", markersize=10)
plt.plot(obstacle[0], obstacle[1], "o", color="y", markersize=10)
path = np.array(positions)
plt.plot(path[:, 0], path[:, 1], lw=5, color="g", label="Transformation System")
path_ft = np.array(positions_ft)
plt.plot(path_ft[:, 0], path_ft[:, 1], lw=5, color="r", label="+ Forcing Term")
path_ct = np.array(positions_ct)
plt.plot(path_ct[:, 0], path_ct[:, 1], lw=5, color="y", label="+ Obstacle Avoidance")
ax.set_xlim(x_range)
ax.set_ylim(y_range)
plt.setp(ax, xticks=(), yticks=())
if i == 0:
ax.legend(loc="upper left")
if i == n_subplots - 1:
break
while dmp.t <= dmp.execution_time * (1 + i) / (n_subplots - 1):
position, velocity = dmp.step(position, velocity)
positions.append(position)
position_ft, velocity_ft = dmp_ft.step(position_ft, velocity_ft)
positions_ft.append(position_ft)
position_ct, velocity_ct = dmp_ct.step(
position_ct, velocity_ct, coupling_term=coupling_term)
positions_ct.append(position_ct)
plt.subplots_adjust(
left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.01, hspace=0.01)
plt.show()
|
[
"matplotlib.pyplot.subplot",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"movement_primitives.dmp.CouplingTermObstacleAvoidance2D",
"numpy.copy",
"matplotlib.pyplot.plot",
"movement_primitives.dmp_potential_field.plot_potential_field_2d",
"matplotlib.pyplot.setp",
"numpy.random.RandomState",
"movement_primitives.dmp.DMP",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.subplots_adjust"
] |
[((527, 556), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'float'}), '([0, 0], dtype=float)\n', (535, 556), True, 'import numpy as np\n'), ((566, 595), 'numpy.array', 'np.array', (['[1, 1]'], {'dtype': 'float'}), '([1, 1], dtype=float)\n', (574, 595), True, 'import numpy as np\n'), ((607, 628), 'numpy.array', 'np.array', (['[0.85, 0.5]'], {}), '([0.85, 0.5])\n', (615, 628), True, 'import numpy as np\n'), ((644, 668), 'numpy.random.RandomState', 'np.random.RandomState', (['(1)'], {}), '(1)\n', (665, 668), True, 'import numpy as np\n'), ((676, 740), 'movement_primitives.dmp.DMP', 'DMP', ([], {'n_dims': '(2)', 'n_weights_per_dim': '(10)', 'dt': '(0.01)', 'execution_time': '(1.0)'}), '(n_dims=2, n_weights_per_dim=10, dt=0.01, execution_time=1.0)\n', (679, 740), False, 'from movement_primitives.dmp import DMP, CouplingTermObstacleAvoidance2D\n'), ((797, 861), 'movement_primitives.dmp.DMP', 'DMP', ([], {'n_dims': '(2)', 'n_weights_per_dim': '(10)', 'dt': '(0.01)', 'execution_time': '(1.0)'}), '(n_dims=2, n_weights_per_dim=10, dt=0.01, execution_time=1.0)\n', (800, 861), False, 'from movement_primitives.dmp import DMP, CouplingTermObstacleAvoidance2D\n'), ((1025, 1089), 'movement_primitives.dmp.DMP', 'DMP', ([], {'n_dims': '(2)', 'n_weights_per_dim': '(10)', 'dt': '(0.01)', 'execution_time': '(1.0)'}), '(n_dims=2, n_weights_per_dim=10, dt=0.01, execution_time=1.0)\n', (1028, 1089), False, 'from movement_primitives.dmp import DMP, CouplingTermObstacleAvoidance2D\n'), ((1225, 1266), 'movement_primitives.dmp.CouplingTermObstacleAvoidance2D', 'CouplingTermObstacleAvoidance2D', (['obstacle'], {}), '(obstacle)\n', (1256, 1266), False, 'from movement_primitives.dmp import DMP, CouplingTermObstacleAvoidance2D\n'), ((1371, 1387), 'numpy.copy', 'np.copy', (['start_y'], {}), '(start_y)\n', (1378, 1387), True, 'import numpy as np\n'), ((1399, 1421), 'numpy.zeros_like', 'np.zeros_like', (['start_y'], {}), '(start_y)\n', (1412, 1421), True, 'import numpy as np\n'), ((1437, 1453), 'numpy.copy', 'np.copy', (['start_y'], {}), '(start_y)\n', (1444, 1453), True, 'import numpy as np\n'), ((1468, 1490), 'numpy.zeros_like', 'np.zeros_like', (['start_y'], {}), '(start_y)\n', (1481, 1490), True, 'import numpy as np\n'), ((1506, 1522), 'numpy.copy', 'np.copy', (['start_y'], {}), '(start_y)\n', (1513, 1522), True, 'import numpy as np\n'), ((1537, 1559), 'numpy.zeros_like', 'np.zeros_like', (['start_y'], {}), '(start_y)\n', (1550, 1559), True, 'import numpy as np\n'), ((1561, 1588), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1571, 1588), True, 'import matplotlib.pyplot as plt\n'), ((3141, 3232), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.0)', 'bottom': '(0.0)', 'right': '(1.0)', 'top': '(1.0)', 'wspace': '(0.01)', 'hspace': '(0.01)'}), '(left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.01,\n hspace=0.01)\n', (3160, 3232), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3244), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3242, 3244), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1757), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'n_cols', '(i + 1)'], {'aspect': '"""equal"""'}), "(n_rows, n_cols, i + 1, aspect='equal')\n", (1718, 1757), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1942), 'movement_primitives.dmp_potential_field.plot_potential_field_2d', 'plot_potential_field_2d', (['ax', 'dmp_ct'], {'x_range': 'x_range', 'y_range': 'y_range', 'n_ticks': '(15)', 'obstacle': 'obstacle'}), '(ax, dmp_ct, x_range=x_range, y_range=y_range,\n n_ticks=15, obstacle=obstacle)\n', (1861, 1942), False, 'from movement_primitives.dmp_potential_field import plot_potential_field_2d\n'), ((1960, 2023), 'matplotlib.pyplot.plot', 'plt.plot', (['start_y[0]', 'start_y[1]', '"""o"""'], {'color': '"""b"""', 'markersize': '(10)'}), "(start_y[0], start_y[1], 'o', color='b', markersize=10)\n", (1968, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2089), 'matplotlib.pyplot.plot', 'plt.plot', (['goal_y[0]', 'goal_y[1]', '"""o"""'], {'color': '"""g"""', 'markersize': '(10)'}), "(goal_y[0], goal_y[1], 'o', color='g', markersize=10)\n", (2036, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2159), 'matplotlib.pyplot.plot', 'plt.plot', (['obstacle[0]', 'obstacle[1]', '"""o"""'], {'color': '"""y"""', 'markersize': '(10)'}), "(obstacle[0], obstacle[1], 'o', color='y', markersize=10)\n", (2102, 2159), True, 'import matplotlib.pyplot as plt\n'), ((2172, 2191), 'numpy.array', 'np.array', (['positions'], {}), '(positions)\n', (2180, 2191), True, 'import numpy as np\n'), ((2196, 2281), 'matplotlib.pyplot.plot', 'plt.plot', (['path[:, 0]', 'path[:, 1]'], {'lw': '(5)', 'color': '"""g"""', 'label': '"""Transformation System"""'}), "(path[:, 0], path[:, 1], lw=5, color='g', label='Transformation System'\n )\n", (2204, 2281), True, 'import matplotlib.pyplot as plt\n'), ((2291, 2313), 'numpy.array', 'np.array', (['positions_ft'], {}), '(positions_ft)\n', (2299, 2313), True, 'import numpy as np\n'), ((2318, 2397), 'matplotlib.pyplot.plot', 'plt.plot', (['path_ft[:, 0]', 'path_ft[:, 1]'], {'lw': '(5)', 'color': '"""r"""', 'label': '"""+ Forcing Term"""'}), "(path_ft[:, 0], path_ft[:, 1], lw=5, color='r', label='+ Forcing Term')\n", (2326, 2397), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2434), 'numpy.array', 'np.array', (['positions_ct'], {}), '(positions_ct)\n', (2420, 2434), True, 'import numpy as np\n'), ((2439, 2529), 'matplotlib.pyplot.plot', 'plt.plot', (['path_ct[:, 0]', 'path_ct[:, 1]'], {'lw': '(5)', 'color': '"""y"""', 'label': '"""+ Obstacle Avoidance"""'}), "(path_ct[:, 0], path_ct[:, 1], lw=5, color='y', label=\n '+ Obstacle Avoidance')\n", (2447, 2529), True, 'import matplotlib.pyplot as plt\n'), ((2580, 2614), 'matplotlib.pyplot.setp', 'plt.setp', (['ax'], {'xticks': '()', 'yticks': '()'}), '(ax, xticks=(), yticks=())\n', (2588, 2614), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright (c) 2021, <NAME>.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import cugraph
from cugraph.generators import rmat
def generate_edgelist(scale,
edgefactor,
seed=None,
unweighted=False,
):
"""
Returns a cudf DataFrame created using the R-MAT graph generator.
The resulting graph is weighted with random values of a uniform distribution
from the interval [0, 1)
scale is used to determine the number of vertices to be generated (num_verts
= 2^scale), which is also used to determine the data type for the vertex ID
values in the DataFrame.
edgefactor determies the number of edges (num_edges = num_edges*edgefactor)
seed, if specified, will be used as the seed to the RNG.
unweighted determines if the resulting edgelist will have randomly-generated
weightes ranging in value between [0, 1). If True, an edgelist with only 2
columns is returned.
"""
df = rmat(
scale,
(2**scale)*edgefactor,
0.1,
0.2,
0.3,
seed or 42,
clip_and_flip=False,
scramble_vertex_ids=True,
create_using=None, # return edgelist instead of Graph instance
mg=False
)
if not unweighted:
rng = np.random.default_rng(seed)
df["weight"] = rng.random(size=len(df))
return df
def read_csv(input_csv_file, scale):
"""
Returns a cudf DataFrame from reading input_csv_file.
All input CSV files should be weighted with random values of a uniform
distribution from the interval [0, 1) in order to best simulate the output
of a Graph500-compliant graph generator.
scale is used to determine the data type for the vertex ID values in the
DataFrame. (num verts = 2^scale), which is used to determine
"""
vertex_t = "int32" if scale <= 32 else "int64"
dtypes = [vertex_t, vertex_t, "float32"]
names=["src", "dst", "weight"],
chunksize = cugraph.dask.get_chunksize(input_csv_file)
return cudf.read_csv(input_csv_file,
chunksize=chunksize,
delimiter=" ",
#names=names,
dtype=dtypes,
header=None,
)
################################################################################
# Benchmarked functions
#
# The "benchmark_name" attr is used by the benchmark infra for reporting and is
# set to assign more meaningful names to be displayed in reports.
def construct_graph(dataframe, symmetric=False):
"""
dataframe contains weighted and undirected edges with self loops. Multiple
edges will likely be present as well. The returned Graph object must be
symmetrized and have self loops removed.
"""
if symmetric:
G = cugraph.Graph()
else:
G = cugraph.DiGraph()
if len(dataframe.columns) > 2:
G.from_cudf_edgelist(
dataframe, source="src", destination="dst", edge_attr="weight")
#G.from_cudf_edgelist(
# dataframe, source="0", destination="1", edge_attr="2")
else:
G.from_cudf_edgelist(
dataframe, source="src", destination="dst")
#G.from_cudf_edgelist(
# dataframe, source="0", destination="1")
return G
construct_graph.benchmark_name = "from_cudf_edgelist"
def bfs(G, start):
return cugraph.bfs(G, start=start)
def sssp(G, start):
return cugraph.sssp(G, source=start)
def wcc(G):
return cugraph.weakly_connected_components(G)
def louvain(G):
return cugraph.louvain(G)
def pagerank(G):
return cugraph.pagerank(G)
def katz(G, alpha=None):
return cugraph.katz_centrality(G, alpha)
################################################################################
# Session-wide setup and teardown
def setup(*args, **kwargs):
return tuple()
def teardown(*args, **kwargs):
pass
|
[
"cugraph.louvain",
"cugraph.dask.get_chunksize",
"cugraph.Graph",
"cugraph.sssp",
"cugraph.pagerank",
"cugraph.weakly_connected_components",
"numpy.random.default_rng",
"cugraph.katz_centrality",
"cugraph.generators.rmat",
"cugraph.bfs",
"cugraph.DiGraph"
] |
[((1534, 1677), 'cugraph.generators.rmat', 'rmat', (['scale', '(2 ** scale * edgefactor)', '(0.1)', '(0.2)', '(0.3)', '(seed or 42)'], {'clip_and_flip': '(False)', 'scramble_vertex_ids': '(True)', 'create_using': 'None', 'mg': '(False)'}), '(scale, 2 ** scale * edgefactor, 0.1, 0.2, 0.3, seed or 42,\n clip_and_flip=False, scramble_vertex_ids=True, create_using=None, mg=False)\n', (1538, 1677), False, 'from cugraph.generators import rmat\n'), ((2535, 2577), 'cugraph.dask.get_chunksize', 'cugraph.dask.get_chunksize', (['input_csv_file'], {}), '(input_csv_file)\n', (2561, 2577), False, 'import cugraph\n'), ((3975, 4002), 'cugraph.bfs', 'cugraph.bfs', (['G'], {'start': 'start'}), '(G, start=start)\n', (3986, 4002), False, 'import cugraph\n'), ((4036, 4065), 'cugraph.sssp', 'cugraph.sssp', (['G'], {'source': 'start'}), '(G, source=start)\n', (4048, 4065), False, 'import cugraph\n'), ((4091, 4129), 'cugraph.weakly_connected_components', 'cugraph.weakly_connected_components', (['G'], {}), '(G)\n', (4126, 4129), False, 'import cugraph\n'), ((4159, 4177), 'cugraph.louvain', 'cugraph.louvain', (['G'], {}), '(G)\n', (4174, 4177), False, 'import cugraph\n'), ((4208, 4227), 'cugraph.pagerank', 'cugraph.pagerank', (['G'], {}), '(G)\n', (4224, 4227), False, 'import cugraph\n'), ((4266, 4299), 'cugraph.katz_centrality', 'cugraph.katz_centrality', (['G', 'alpha'], {}), '(G, alpha)\n', (4289, 4299), False, 'import cugraph\n'), ((1840, 1867), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (1861, 1867), True, 'import numpy as np\n'), ((3399, 3414), 'cugraph.Graph', 'cugraph.Graph', ([], {}), '()\n', (3412, 3414), False, 'import cugraph\n'), ((3437, 3454), 'cugraph.DiGraph', 'cugraph.DiGraph', ([], {}), '()\n', (3452, 3454), False, 'import cugraph\n')]
|
import os
import cv2
import argparse
import subprocess
import numpy as np
import time
import signal
import curses
def interrupted(signum, frame):
raise TimeoutError
signal.signal(signal.SIGALRM, interrupted)
#sense_usuage=500 # MB
#update_intervel=5#300 #300 # 5 min
def Arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-C", "--sense_capability_usage", type=int, default=500, help="set a bounding of gpu active such as lower than 500MB is active")
parser.add_argument("-S", "--update_second", type=int, default=300, help="set update gpu status frequency (second)")
parser.add_argument("-F", "--server_file_path", type=str, default="config/observe_server_list.cfg", help="Read server list from file")
parser.add_argument("-DM", "--display_terminal", action="store_true")
args = parser.parse_args()
print(args)
return args
def main(args):
readtext = open_server_list(args.server_file_path)
display_method = draw_gpu_info if not args.display_terminal else diplay_gpu_info
while True:
gpu_info_list = server_status_check(readtext, args.sense_capability_usage)
# gpu_info_list = [('hihi', []), ('haha', [])]
if not draw_gpu_info(gpu_info_list, args.update_second): break
def open_server_list(FilePath):
if not os.path.exists(FilePath):
raise IOError("{} path doesn't exists".format(FilePath))
with open(FilePath, 'r') as F:
readtext = F.readlines()
return readtext
def server_status_check(readtext, sense_capability_MB):
gpu_info_tuple_list = []
for _readtext in readtext:
text_blob = [text for text in _readtext.rstrip().split(" ") if text!=""]
if text_blob[0] == "#": continue
server_name, username, IP, port, passward = text_blob
command_blob = ['sshpass', "-p", passward, "ssh", "{}@{}".format(username, IP), "-p", port, 'nvidia-smi', '--query-gpu=timestamp,memory.used', '--format=csv']
# print(" ".join(command_blob))
ssh = subprocess.Popen(
command_blob, \
stdin=subprocess.PIPE,
stdout=subprocess.PIPE
)
back = ssh.stdout.readlines()
gpu_status = []
if len(back)>1:
back = back[1:]
count = 0
for gpu_info in back:
MB_size = int(str(gpu_info.rstrip()).split(",")[1].split(' ')[1])
if MB_size <= sense_capability_MB:
count += 1
gpu_status.append(True)
else:
gpu_status.append(False)
# print("{}, can used/Total gpu, {}/{}".format(server_name, count, len(back)))
else:
# print("{}'s gpu doesn't work".format(server_name))
pass
gpu_info_tuple_list.append((server_name, gpu_status))
return gpu_info_tuple_list
def draw_gpu_info(infos, update_second):
length = len(infos)
lp_w_loc = [10, 210, 260, 310, 360]
lp_h_loc = [50 + 50*i for i in range(length)]#[10, 60, 110, 160, 210]
drawer = np.zeros((50 + length*50, 500, 3), np.uint8)
for index_h, _info in enumerate(infos):
server_name, SV_status = _info
h_loc = lp_h_loc[index_h]
cv2.putText(drawer, server_name, (lp_w_loc[0], h_loc), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 3, cv2.LINE_AA)
for index, status in enumerate(SV_status):
loc = (lp_w_loc[index+1], h_loc)
color = (0,255,0) if status==True else (0,0,255)
cv2.circle(drawer, loc, 25, color, -1)
cv2.imshow("gpu_info", drawer)
#window_close = cv2.getWindowProperty('gpu_info', 0)
key = cv2.waitKey(update_second*1000)
if key ==ord('q') or cv2.getWindowProperty('gpu_info', cv2.WND_PROP_AUTOSIZE)<0:
#cv2.destroyAllWindows()
return False
#exit(0)
#cv2.destroyAllWindows()
print("updating")
return True
def display_gpu_info(infos, update_second):
date = time.strftime("%Y/%m/%d-%H:%M:%S")
message = []
message.append("Sample server status @ time : {}".format(date))
for index_h, _info in enumerate(infos):
server_name, SV_status = _info
_signal = ", ".join(["o" if _status else "x" for _status in SV_status])
message.append("Server: {:20s}, GPU status: {:20s}".format(server_name, _signal))
message.append("Close press by q, wait for {} seconds".format(update_second))
message = "\n".join(message)
key = input_char(message, update_second)
if key is None:
return True
return False if key == ord("q") else True
def input_char(message, timeout):
signal.alarm(timeout)
try:
win = curses.initscr()
win.addstr(0, 0, message)
while True:
ch = win.getch()
if ch in range(32, 127): break
time.sleep(0.05)
except:
ch = None
finally:
curses.endwin()
signal.alarm(0)
return ch
if __name__ == "__main__":
args = Arguments()
main(args)
|
[
"subprocess.Popen",
"cv2.circle",
"cv2.putText",
"argparse.ArgumentParser",
"cv2.waitKey",
"curses.initscr",
"numpy.zeros",
"time.strftime",
"os.path.exists",
"curses.endwin",
"time.sleep",
"signal.alarm",
"signal.signal",
"cv2.imshow",
"cv2.getWindowProperty"
] |
[((169, 211), 'signal.signal', 'signal.signal', (['signal.SIGALRM', 'interrupted'], {}), '(signal.SIGALRM, interrupted)\n', (182, 211), False, 'import signal\n'), ((303, 328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (326, 328), False, 'import argparse\n'), ((3049, 3095), 'numpy.zeros', 'np.zeros', (['(50 + length * 50, 500, 3)', 'np.uint8'], {}), '((50 + length * 50, 500, 3), np.uint8)\n', (3057, 3095), True, 'import numpy as np\n'), ((3547, 3577), 'cv2.imshow', 'cv2.imshow', (['"""gpu_info"""', 'drawer'], {}), "('gpu_info', drawer)\n", (3557, 3577), False, 'import cv2\n'), ((3645, 3678), 'cv2.waitKey', 'cv2.waitKey', (['(update_second * 1000)'], {}), '(update_second * 1000)\n', (3656, 3678), False, 'import cv2\n'), ((3957, 3991), 'time.strftime', 'time.strftime', (['"""%Y/%m/%d-%H:%M:%S"""'], {}), "('%Y/%m/%d-%H:%M:%S')\n", (3970, 3991), False, 'import time\n'), ((4627, 4648), 'signal.alarm', 'signal.alarm', (['timeout'], {}), '(timeout)\n', (4639, 4648), False, 'import signal\n'), ((4915, 4930), 'signal.alarm', 'signal.alarm', (['(0)'], {}), '(0)\n', (4927, 4930), False, 'import signal\n'), ((1307, 1331), 'os.path.exists', 'os.path.exists', (['FilePath'], {}), '(FilePath)\n', (1321, 1331), False, 'import os\n'), ((2017, 2094), 'subprocess.Popen', 'subprocess.Popen', (['command_blob'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE'}), '(command_blob, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n', (2033, 2094), False, 'import subprocess\n'), ((3219, 3338), 'cv2.putText', 'cv2.putText', (['drawer', 'server_name', '(lp_w_loc[0], h_loc)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 255)', '(3)', 'cv2.LINE_AA'], {}), '(drawer, server_name, (lp_w_loc[0], h_loc), cv2.\n FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 3, cv2.LINE_AA)\n', (3230, 3338), False, 'import cv2\n'), ((4672, 4688), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (4686, 4688), False, 'import curses\n'), ((4895, 4910), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (4908, 4910), False, 'import curses\n'), ((3504, 3542), 'cv2.circle', 'cv2.circle', (['drawer', 'loc', '(25)', 'color', '(-1)'], {}), '(drawer, loc, 25, color, -1)\n', (3514, 3542), False, 'import cv2\n'), ((3702, 3758), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['"""gpu_info"""', 'cv2.WND_PROP_AUTOSIZE'], {}), "('gpu_info', cv2.WND_PROP_AUTOSIZE)\n", (3723, 3758), False, 'import cv2\n'), ((4827, 4843), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (4837, 4843), False, 'import time\n')]
|
""" Functions and Classes used to fit an estimate of an unabsorbed
continuum to a QSO spectrum.
"""
# p2.6+ compatibility
from __future__ import division, print_function, unicode_literals
try:
unicode
except NameError:
unicode = basestring = str
import numpy as np
import matplotlib.pyplot as pl
import matplotlib.transforms as mtran
from .stats import Gaussian
from .utilities import between, stats, indexnear
from .convolve import convolve_psf
from .io import loadobj, saveobj
from .interp import AkimaSpline
from .sed import qso_template
import os
def spline_continuum(wa, fl, er, edges, minfrac=0.01, nsig=3.0,
resid_std=1.3, debug=False):
""" Fit a continuum to a chunk of a spectrum.
Very loosely based on the method in Aguirre et al. 2002.
Parameters
----------
wa : Wavelengths.
fl : Fluxes.
er : One sigma errors.
edges : Wavelengths giving the chunk edges.
minfrac = 0.01 : At least this fraction of pixels in a single chunk
contributes to the fit.
nsig = 3.0 : No. of sigma for rejection for clipping.
resid_std = 1.3 : Maximum residual st. dev. in a given chunk.
debug = False : If True, make helpful plots.
Returns
-------
Continuum array and spline points
"""
# Overview:
# (1) Calculate the median flux value for each wavelength chunk.
# (2) fit a 1st order spline (i.e. series of straight line
# segments) through the set of points given by the central
# wavelength for each chunk and the median flux value for each
# chunk.
# (3) Remove any flux values that fall more than nsig*er below
# the spline.
# Repeat 1-3 until the continuum converges on a solution (if it
# doesn't throw hands up in despair! Essential to choose a
# suitable first guess with small enough chunks).
if len(edges) < 2:
raise ValueError('must be at least two bin edges!')
wa,fl,er = (np.asarray(a, np.float64) for a in (wa,fl,er))
if debug:
ax = pl.gca()
ax.cla()
ax.plot(wa,fl)
ax.plot(wa,er)
ax.axhline(0, color='0.7')
good = ~np.isnan(fl) & ~np.isnan(er) & ~np.isinf(fl)
ymax = 2*np.percentile(fl[good], 0.90)
ax.set_ylim(-0.1*ymax, ymax)
ax.set_xlim(min(edges), max(edges))
ax.set_autoscale_on(0)
pl.draw()
npts = len(wa)
mask = np.ones(npts, bool)
oldco = np.zeros(npts, float)
co = np.zeros(npts, float)
# find indices of chunk edges and central wavelengths of chunks
indices = wa.searchsorted(edges)
indices = [(i0,i1) for i0,i1 in zip(indices[:-1],indices[1:])]
if debug: print(' indices', indices)
wavc = [0.5*(w1 + w2) for w1,w2 in zip(edges[:-1],edges[1:])]
# information per chunks
npts = len(indices)
mfl = np.zeros(npts, float) # median fluxes at chunk centres
goodfit = np.zeros(npts, bool) # is fit acceptable?
res_std = np.zeros(npts, float) # residuals standard dev
res_med = np.zeros(npts, float) # residuals median
if debug:
print('chunk centres', wavc)
cont, = ax.plot(wa,co,'k')
midpoints, = ax.plot(wavc, mfl,'rx',mew=1.5,ms=8)
# loop that iterative fits continuum
while True:
for i,(j1,j2) in enumerate(indices):
if goodfit[i]: continue
# calculate median flux
#print(i,j1,j2)
w,f,e,m = (item[j1:j2] for item in (wa,fl,er,mask))
ercond = (e > 0) & (~np.isnan(f))
cond = m & ercond
chfl = f[cond]
chflgood = f[ercond]
if len(chflgood) == 0: continue
#print(len(chfl), len(chflgood))
if float(len(chfl)) / len(chflgood) < minfrac:
f_cutoff = np.percentile(chflgood, minfrac)
cond = ercond & (f >= f_cutoff)
if len(f[cond]) == 0: continue
mfl[i] = np.median(f[cond])
# calculate the spline. add extra points on either end to give
# a nice slope at the end points.
extwavc = ([wavc[0] - (wavc[1] - wavc[0])] + list(wavc) +
[wavc[-1] + (wavc[-1] - wavc[-2])])
extmfl = ([mfl[0] - (mfl[1] - mfl[0])] + list(mfl) +
[mfl[-1] + (mfl[-1] - mfl[-2])])
co = np.interp(wa, extwavc, extmfl)
if debug:
cont.set_ydata(co)
midpoints.set_xdata(wavc)
midpoints.set_ydata(mfl)
pl.draw()
# calculate residuals for each chunk
for i,(j1,j2) in enumerate(indices):
if goodfit[i]: continue
ercond = er[j1:j2] > 0
cond = ercond & mask[j1:j2]
chfl = fl[j1:j2][cond]
chflgood = fl[j1:j2][ercond]
if len(chflgood) == 0: continue
if float(len(chfl)) / len(chflgood) < minfrac:
f_cutoff = np.percentile(chflgood, minfrac)
cond = ercond & (fl[j1:j2] > f_cutoff)
#print(len(co), len(fl), i1, j1, j2)
residuals = (fl[j1:j2][cond] - co[j1:j2][cond]
) / er[j1:j2][cond]
res_std[i] = residuals.std()
if len(residuals) == 0:
continue
res_med[i] = np.median(residuals)
# If residuals have std < 1.0 and mean ~1.0, we might have
# a reasonable fit.
if res_std[i] <= resid_std:
goodfit[i] = True
if debug:
print('median and st. dev. of residuals by region - aiming for 0,1')
for i,(f0,f1) in enumerate(zip(res_med, res_std)):
print('{0} {0:.2f} {0:.2f}'.format(i,f0,f1))
raw_input('Enter...')
# (3) Remove flux values that fall more than N*sigma below the
# spline fit.
cond = (co - fl) > nsig * er
if debug:
print(np.nanmax(np.abs(co - oldco)/co))
# Finish when the biggest change between the new and old
# medians is smaller than the number below.
if np.nanmax(np.abs(co - oldco)/co) < 4e-3:
break
oldco = co.copy()
mask[cond] = False
# finally fit a cubic spline through the median values to
# get a smooth continuum.
final = AkimaSpline(wavc, mfl)
return final(wa), list(zip(wavc,mfl))
def fitqsocont(wa, fl, er, redshift, oldco=None, knots=None,
nbin=1, divmult=1, forest_divmult=1, atmos=True, debug=False):
""" Find an estimate of a QSO continuum.
divmult=3 works well for R~40000, S/N~10, z=3 QSO spectrum.
nbin bins the data for plotting and continuum fitting (obsolete)
"""
# choose initial reference continuum points. Increase divmult for
# fewer initial continuum points (generally needed for poorer S/N
# spectra).
zp1 = 1 + redshift
#reflines = np.array([1025.72, 1215.6701, 1240.14, 1398.0,
# 1549.06, 1908, 2800 ])
# generate the edges of wavelength chunks to send to fitting routine
# these edges and divisions are generated by trial and error
# for S/N = 15ish and resolution = 2000ish
div = np.rec.fromrecords([(500. , 800. , 25),
(800. , 1190., 25),
(1190., 1213., 4),
(1213., 1230., 6),
(1230., 1263., 6),
(1263., 1290., 5),
(1290., 1340., 5),
(1340., 1370., 2),
(1370., 1410., 5),
(1410., 1515., 5),
(1515., 1600., 15),
(1600., 1800., 8),
(1800., 1900., 5),
(1900., 1940., 5),
(1940., 2240., 15),
(2240., 3000., 25),
(3000., 6000., 80),
(6000., 20000., 100),
], names=str('left,right,num'))
div.num[2:] = np.ceil(div.num[2:] * divmult)
div.num[:2] = np.ceil(div.num[:2] * forest_divmult)
div.left *= zp1
div.right *= zp1
if debug: print(div.tolist())
temp = [np.linspace(left, right, n+1)[:-1] for left,right,n in div]
edges = np.concatenate(temp)
if debug: stats(edges)
i0,i1,i2 = edges.searchsorted([wa[0], 1210*zp1, wa[-1]])
if debug: print(i0,i1,i2)
contpoints = []
if knots is not None:
contpoints.extend(knots)
else:
co,cp = spline_continuum(wa, fl, er, edges[i0:i2], debug=debug)
contpoints.extend(cp)
fig = pl.figure(figsize=(11, 7))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.1, top=0.95)
wrapper = InteractiveCoFit(wa, fl, er, contpoints, co=oldco, nbin=nbin,
redshift=redshift, fig=fig, atmos=atmos)
while True:
if wrapper.finished: break
pl.waitforbuttonpress()
return wrapper.continuum, wrapper.contpoints
class InteractiveCoFit(object):
help_message = """
'a' : add a new continuum point
'd' : delete the nearest point
'b' : add a break in the continuum
'r' : remove a break in the continuum
's' : smooth the spectrum
'k' : keep continuum
'q' : quit without keeping continuum
"""
def __init__(self, wa, fl, er, contpoints, co=None,
nbin=8, redshift=None, atmos=None, fig=None):
""" Initialise figure, plots and variables.
Parameters
----------
wa : Wavelengths
fl : Fluxes
er : One sigma errors
nbin : int (8)
Number of pixels to bin arrays in wavelength. Default 8.
contpoints : list of x,y tuple pairs (None)
The points through which a cubic spline is passed,
defining the continuum.
redshift : float (None)
Redshift used to plot reference emission lines.
atmos : list of wavelength pairs (None)
Regions of atmospheric absorption to plot.
Notes
-----
Updates the following attributes:
self.spec : Dictionary of wa, fl, er.
self.contpoints : Points used to define the continuum.
self.nbin : The input nbin value.
self.markers : Dictionary of matplotlib plotting artists.
self.connections : Callback connections.
self.fig : The plotting figure instance.
"""
#setup
#print co
self.WMIN_LYA = 1040
self.WMAX_LYA = 1190
self.spec = dict(wa=wa, fl=fl, er=er, co=co)
self.nbin = nbin
self.breaks = [wa[0], wa[-1]] # wavelengths of breaks in the continuum
self.contpoints = list(contpoints)
if os.path.lexists('./_knots.sav'):
c = raw_input('temporary knots file exists, use these knots? (y) ')
if c.lower() != 'n':
self.contpoints = loadobj('./_knots.sav')
self.markers = dict()
self.art_fl = None
if fig is None:
self.fig = pl.figure()
else:
self.fig = fig
# disable any existing key press callbacks
cids = list(fig.canvas.callbacks.callbacks['key_press_event'])
for cid in cids:
fig.canvas.callbacks.disconnect(cid)
self.template = None
if redshift is not None:
self.template = qso_template(wa, redshift)
self.connections = []
self.continuum = None
self.finished = False
self.redshift = redshift
self.atmos = atmos
self.smoothby = None
self.plotinit()
self.update()
self.modifypoints()
pl.draw()
def plotinit(self):
""" Set up the figure and do initial plots.
Updates the following attributes:
self.markers
"""
wa,fl,er = [self.spec[k][0:-1:self.nbin] for k in 'wa fl er'.split()]
if self.spec['co'] is not None:
co = self.spec['co'][0:-1:self.nbin]
# axis for spectrum & continuum
a0 = self.fig.add_axes((0.05,0.1,0.9,0.6))
a0.set_autoscale_on(0)
# axis for residuals
a1 = self.fig.add_axes((0.05,0.75,0.9,0.2),sharex=a0)
a1.set_autoscale_on(0)
a1.axhline(0,color='k',alpha=0.7, zorder=99)
a1.axhline(1,color='k',alpha=0.7, zorder=99)
a1.axhline(-1,color='k',alpha=0.7, zorder=99)
a1.axhline(2,color='k',linestyle='dashed',zorder=99)
a1.axhline(-2,color='k',linestyle='dashed',zorder=99)
m0, = a1.plot([0],[0],'.r',marker='.', mec='none', lw=0, mew=0, ms=6, alpha=0.5)
a1.set_ylim(-4, 4)
a0.axhline(0, color='0.7')
if self.spec['co'] is not None:
a0.plot(wa,co, color='0.7', lw=1, ls='dashed')
self.art_fl, = a0.plot(wa, fl, 'b', lw=0.5, linestyle='steps-mid')
a0.plot(wa, er, lw=0.5, color='orange')
m1, = a0.plot([0], [0], 'r', alpha=0.7)
m2, = a0.plot([0], [0], 'o', mfc='None',mew=1, ms=8, mec='r', picker=5,
alpha=0.7)
a0.set_xlim(min(wa), max(wa))
good = (er > 0) & ~np.isnan(fl) & ~np.isinf(fl)
ymax = 2 * np.abs(np.percentile(fl[good], 95))
a0.set_ylim(-0.1*ymax, ymax)
a0.text(0.9,0.9, 'z=%.2f' % self.redshift, transform=a0.transAxes)
# for histogram
trans = mtran.blended_transform_factory(a1.transAxes, a1.transData)
hist, = a1.plot([], [], color='k', transform=trans)
x = np.linspace(-3,3)
a1.plot(Gaussian(x,0,1,0.05), x, color='k', transform=trans, lw=0.5)
if self.template is not None:
trans = mtran.blended_transform_factory(a0.transData, a0.transAxes)
a0.plot(self.spec['wa'], self.template/self.template.max(), '-c', lw=2,
alpha=0.5, transform=trans)
self.fig.canvas.draw()
self.markers.update(contpoints=m2, cont=m1, resid=m0, hist_left=hist)
def update(self):
""" Calculates the new continuum, residuals and updates the plots.
Updates the following attributes:
self.markers
self.continuum
"""
wa,fl,er = (self.spec[key] for key in 'wa fl er'.split())
co = np.empty(len(wa))
co.fill(np.nan)
for b0,b1 in zip(self.breaks[:-1], self.breaks[1:]):
cpts = [(x,y) for x,y in self.contpoints if b0 <= x <= b1]
if len(cpts) < 3:
continue
spline = AkimaSpline(*list(zip(*cpts)))
i,j = wa.searchsorted([b0,b1])
co[i:j] = spline(wa[i:j])
resid = (fl - co) / er
# histogram
bins = np.arange(0, 5+0.1, 0.2)
w0,w1 = self.fig.axes[1].get_xlim()
x,_ = np.histogram(resid[between(wa, w0, w1)],
bins=bins)
b = np.repeat(bins, 2)
X = np.concatenate([[0], np.repeat(x,2), [0]])
Xmax = X.max()
X = 0.05 * X / Xmax
self.markers['hist_left'].set_data(X, b)
self.markers['contpoints'].set_data(list(zip(*self.contpoints)))
nbin = self.nbin
self.markers['cont'].set_data(wa[::nbin], co[::nbin])
self.markers['resid'].set_data(wa[::nbin], resid[::nbin])
if self.smoothby is not None:
sfl = convolve_psf(fl, self.smoothby)
self.art_fl.set_data(wa, sfl)
else:
self.art_fl.set_data(wa, fl)
self.continuum = co
saveobj('_knots.sav', self.contpoints, overwrite=True)
self.fig.canvas.draw()
def on_keypress(self, event):
""" Interactive fiddling via the keyboard
Updates:
self.contpoints
"""
if event.key == 'q':
for item in self.connections:
self.fig.canvas.mpl_disconnect(item)
self.contpoints = None
self.continuum = None
self.finished = True
return
if event.key == 'k':
for item in self.connections:
self.fig.canvas.mpl_disconnect(item)
self.finished = True
return
if event.inaxes != self.fig.axes[0]: return
if event.key == 'a':
# add a point to contpoints
x,y = event.xdata,event.ydata
if x not in zip(*self.contpoints)[0]:
self.contpoints.append((x,y))
self.update()
elif event.key == 'd':
# remove a point from contpoints
contx,conty = zip(*self.contpoints)
sep = np.hypot(event.xdata - contx, event.ydata - conty)
self.contpoints.remove(self.contpoints[sep.argmin()])
self.update()
elif event.key == 'm':
# Move a point
contx,conty = zip(*self.contpoints)
sep = np.hypot(event.xdata - contx, event.ydata - conty)
#import pdb
#pdb.set_trace()
self.contpoints[sep.argmin()] = (event.xdata,event.ydata)
self.update()
elif event.key == 'b':
# Add a break to the continuum.
self.breaks.append(event.xdata)
self.breaks.sort()
self.update()
elif event.key == 'r':
# remove a break
i = indexnear(self.breaks, event.xdata)
if i not in (0, len(self.breaks)-1):
self.breaks.remove(self.breaks[i])
self.update()
elif event.key == 'S':
# Save fit to a temporary file
print( 'fitcont: Writing output to temporary file tmp.sav')
saveobj('tmp.sav', (self.continuum, self.contpoints), overwrite=1)
elif event.key == 's':
c = raw_input('New FWHM in pixels of Gaussian to convolve with? '
'(blank for no smoothing) ')
if c == '':
# restore spectrum
self.smoothby = None
self.update()
else:
try:
fwhm = float(c)
except TypeError:
print('FWHM must be a floating point number >= 1')
if fwhm < 1:
self.smoothby = None
else:
self.smoothby = fwhm
self.update()
elif event.key == '?':
print(self.help_message)
def on_button_release(self, event):
self.update()
def modifypoints(self):
""" Add/remove continuum points."""
print(self.help_message)
id1 = self.fig.canvas.mpl_connect('key_press_event',self.on_keypress)
id2 = self.fig.canvas.mpl_connect('button_release_event',self.on_button_release)
self.connections.extend([id1, id2])
|
[
"numpy.abs",
"numpy.ones",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.interp",
"os.path.lexists",
"matplotlib.pyplot.draw",
"matplotlib.transforms.blended_transform_factory",
"numpy.linspace",
"numpy.repeat",
"numpy.ceil",
"numpy.median",
"numpy.asarray",
"numpy.isinf",
"matplotlib.pyplot.waitforbuttonpress",
"numpy.percentile",
"numpy.hypot",
"numpy.concatenate",
"numpy.zeros"
] |
[((2483, 2502), 'numpy.ones', 'np.ones', (['npts', 'bool'], {}), '(npts, bool)\n', (2490, 2502), True, 'import numpy as np\n'), ((2515, 2536), 'numpy.zeros', 'np.zeros', (['npts', 'float'], {}), '(npts, float)\n', (2523, 2536), True, 'import numpy as np\n'), ((2546, 2567), 'numpy.zeros', 'np.zeros', (['npts', 'float'], {}), '(npts, float)\n', (2554, 2567), True, 'import numpy as np\n'), ((2913, 2934), 'numpy.zeros', 'np.zeros', (['npts', 'float'], {}), '(npts, float)\n', (2921, 2934), True, 'import numpy as np\n'), ((2986, 3006), 'numpy.zeros', 'np.zeros', (['npts', 'bool'], {}), '(npts, bool)\n', (2994, 3006), True, 'import numpy as np\n'), ((3043, 3064), 'numpy.zeros', 'np.zeros', (['npts', 'float'], {}), '(npts, float)\n', (3051, 3064), True, 'import numpy as np\n'), ((3104, 3125), 'numpy.zeros', 'np.zeros', (['npts', 'float'], {}), '(npts, float)\n', (3112, 3125), True, 'import numpy as np\n'), ((8231, 8261), 'numpy.ceil', 'np.ceil', (['(div.num[2:] * divmult)'], {}), '(div.num[2:] * divmult)\n', (8238, 8261), True, 'import numpy as np\n'), ((8280, 8317), 'numpy.ceil', 'np.ceil', (['(div.num[:2] * forest_divmult)'], {}), '(div.num[:2] * forest_divmult)\n', (8287, 8317), True, 'import numpy as np\n'), ((8481, 8501), 'numpy.concatenate', 'np.concatenate', (['temp'], {}), '(temp)\n', (8495, 8501), True, 'import numpy as np\n'), ((8823, 8849), 'matplotlib.pyplot.figure', 'pl.figure', ([], {'figsize': '(11, 7)'}), '(figsize=(11, 7))\n', (8832, 8849), True, 'import matplotlib.pyplot as pl\n'), ((2032, 2057), 'numpy.asarray', 'np.asarray', (['a', 'np.float64'], {}), '(a, np.float64)\n', (2042, 2057), True, 'import numpy as np\n'), ((2107, 2115), 'matplotlib.pyplot.gca', 'pl.gca', ([], {}), '()\n', (2113, 2115), True, 'import matplotlib.pyplot as pl\n'), ((2442, 2451), 'matplotlib.pyplot.draw', 'pl.draw', ([], {}), '()\n', (2449, 2451), True, 'import matplotlib.pyplot as pl\n'), ((4393, 4423), 'numpy.interp', 'np.interp', (['wa', 'extwavc', 'extmfl'], {}), '(wa, extwavc, extmfl)\n', (4402, 4423), True, 'import numpy as np\n'), ((9126, 9149), 'matplotlib.pyplot.waitforbuttonpress', 'pl.waitforbuttonpress', ([], {}), '()\n', (9147, 9149), True, 'import matplotlib.pyplot as pl\n'), ((10980, 11011), 'os.path.lexists', 'os.path.lexists', (['"""./_knots.sav"""'], {}), "('./_knots.sav')\n", (10995, 11011), False, 'import os\n'), ((11918, 11927), 'matplotlib.pyplot.draw', 'pl.draw', ([], {}), '()\n', (11925, 11927), True, 'import matplotlib.pyplot as pl\n'), ((13614, 13673), 'matplotlib.transforms.blended_transform_factory', 'mtran.blended_transform_factory', (['a1.transAxes', 'a1.transData'], {}), '(a1.transAxes, a1.transData)\n', (13645, 13673), True, 'import matplotlib.transforms as mtran\n'), ((13746, 13764), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)'], {}), '(-3, 3)\n', (13757, 13764), True, 'import numpy as np\n'), ((14936, 14962), 'numpy.arange', 'np.arange', (['(0)', '(5 + 0.1)', '(0.2)'], {}), '(0, 5 + 0.1, 0.2)\n', (14945, 14962), True, 'import numpy as np\n'), ((15110, 15128), 'numpy.repeat', 'np.repeat', (['bins', '(2)'], {}), '(bins, 2)\n', (15119, 15128), True, 'import numpy as np\n'), ((2292, 2320), 'numpy.percentile', 'np.percentile', (['fl[good]', '(0.9)'], {}), '(fl[good], 0.9)\n', (2305, 2320), True, 'import numpy as np\n'), ((4014, 4032), 'numpy.median', 'np.median', (['f[cond]'], {}), '(f[cond])\n', (4023, 4032), True, 'import numpy as np\n'), ((4560, 4569), 'matplotlib.pyplot.draw', 'pl.draw', ([], {}), '()\n', (4567, 4569), True, 'import matplotlib.pyplot as pl\n'), ((5348, 5368), 'numpy.median', 'np.median', (['residuals'], {}), '(residuals)\n', (5357, 5368), True, 'import numpy as np\n'), ((8409, 8440), 'numpy.linspace', 'np.linspace', (['left', 'right', '(n + 1)'], {}), '(left, right, n + 1)\n', (8420, 8440), True, 'import numpy as np\n'), ((11289, 11300), 'matplotlib.pyplot.figure', 'pl.figure', ([], {}), '()\n', (11298, 11300), True, 'import matplotlib.pyplot as pl\n'), ((13900, 13959), 'matplotlib.transforms.blended_transform_factory', 'mtran.blended_transform_factory', (['a0.transData', 'a0.transAxes'], {}), '(a0.transData, a0.transAxes)\n', (13931, 13959), True, 'import matplotlib.transforms as mtran\n'), ((2262, 2274), 'numpy.isinf', 'np.isinf', (['fl'], {}), '(fl)\n', (2270, 2274), True, 'import numpy as np\n'), ((3868, 3900), 'numpy.percentile', 'np.percentile', (['chflgood', 'minfrac'], {}), '(chflgood, minfrac)\n', (3881, 3900), True, 'import numpy as np\n'), ((4980, 5012), 'numpy.percentile', 'np.percentile', (['chflgood', 'minfrac'], {}), '(chflgood, minfrac)\n', (4993, 5012), True, 'import numpy as np\n'), ((13393, 13405), 'numpy.isinf', 'np.isinf', (['fl'], {}), '(fl)\n', (13401, 13405), True, 'import numpy as np\n'), ((13432, 13459), 'numpy.percentile', 'np.percentile', (['fl[good]', '(95)'], {}), '(fl[good], 95)\n', (13445, 13459), True, 'import numpy as np\n'), ((15162, 15177), 'numpy.repeat', 'np.repeat', (['x', '(2)'], {}), '(x, 2)\n', (15171, 15177), True, 'import numpy as np\n'), ((16833, 16883), 'numpy.hypot', 'np.hypot', (['(event.xdata - contx)', '(event.ydata - conty)'], {}), '(event.xdata - contx, event.ydata - conty)\n', (16841, 16883), True, 'import numpy as np\n'), ((2230, 2242), 'numpy.isnan', 'np.isnan', (['fl'], {}), '(fl)\n', (2238, 2242), True, 'import numpy as np\n'), ((2246, 2258), 'numpy.isnan', 'np.isnan', (['er'], {}), '(er)\n', (2254, 2258), True, 'import numpy as np\n'), ((3590, 3601), 'numpy.isnan', 'np.isnan', (['f'], {}), '(f)\n', (3598, 3601), True, 'import numpy as np\n'), ((6144, 6162), 'numpy.abs', 'np.abs', (['(co - oldco)'], {}), '(co - oldco)\n', (6150, 6162), True, 'import numpy as np\n'), ((13377, 13389), 'numpy.isnan', 'np.isnan', (['fl'], {}), '(fl)\n', (13385, 13389), True, 'import numpy as np\n'), ((17100, 17150), 'numpy.hypot', 'np.hypot', (['(event.xdata - contx)', '(event.ydata - conty)'], {}), '(event.xdata - contx, event.ydata - conty)\n', (17108, 17150), True, 'import numpy as np\n'), ((5982, 6000), 'numpy.abs', 'np.abs', (['(co - oldco)'], {}), '(co - oldco)\n', (5988, 6000), True, 'import numpy as np\n')]
|
from pathlib import Path
import shutil
import pandas as pd
import torch
from torch.utils.data import Dataset
import pickle
import numpy as np
import torchvision.transforms.functional as F
from torchvision import transforms
import tarfile
import datetime
import pytz
from PIL import Image
from tqdm import tqdm
from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support
from sustainbench.common.utils import subsample_idxs
from sustainbench.common.metrics.all_metrics import Accuracy
from sustainbench.common.grouper import CombinatorialGrouper
from sustainbench.datasets.sustainbench_dataset import SustainBenchDataset
class CropSegmentationDataset(SustainBenchDataset):
"""
The Farmland Parcel Delineation dataset.
This is a processed version of the farmland dataset used in https://arxiv.org/abs/2004.05471.
Input (x, image):
224 x 224 x 3 RGB satellite image.
Label (y, image):
if filled_mask == True, y is shows the boundary of the farmland, 224 x 224 image
if filled_mask == False, y is shows the filled boundary of the farmland, 224 x 224 image
Metadata:
each image is annotated with a location coordinate, denoted as 'max_lat', 'max_lon', 'min_lat', 'min_lon'.
Original publication:
@inproceedings{aung2020farm,
title={Farm Parcel Delineation Using Spatio-temporal Convolutional Networks},
author={<NAME> <NAME> and <NAME> and <NAME> and <NAME>},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops},
pages={76--77},
year={2020}
}
"""
_dataset_name = 'crop_seg'
_versions_dict = {
'1.1': {
'download_url': 'https://worksheets.codalab.org/rest/bundles/0xaec91eb7c9d548ebb15e1b5e60f966ab/contents/blob/', # TODO, change url
'compressed_size': 53_893_324_800 # TODO: change compressed size
}
}
def __init__(self, version=None, root_dir='data', download=False, split_scheme='official', oracle_training_set=False, seed=111, filled_mask=False, use_ood_val=False):
self._version = version
self._data_dir = "/atlas/u/chenlin/dataset_benchmark_private/hanBurakData/sentinel_jul_sept_v2" # TODO: implementation only
#self._data_dir = self.initialize_data_dir(root_dir, download) # TODO: uncomment
self._split_dict = {'train': 0, 'val': 1, 'test': 2}
self._split_names = {'train': 'Train', 'val': 'Val', 'test': 'Test'}
self._split_scheme = split_scheme
self.oracle_training_set = oracle_training_set
self.root = Path(self._data_dir)
self.seed = int(seed)
self._original_resolution = (224, 224) #checked
self.metadata = pd.read_csv(self.root / 'clean_data.csv')
self.filled_mask = filled_mask
self._split_array = -1 * np.ones(len(self.metadata))
for split in self._split_dict.keys():
if split == 'test':
test_mask = np.asarray(self.metadata['split'] == 'test')
id = self.metadata['ids'][test_mask]
elif split == 'val':
val_mask = np.asarray(self.metadata['split'] == 'val')
id = self.metadata['ids'][val_mask]
else:
split_mask = np.asarray(self.metadata['split'] == split)
id = self.metadata['ids'][split_mask]
self._split_array[id] = self._split_dict[split]
self.full_idxs = self.metadata['indices']
if self.filled_mask:
self._y_array = np.asarray([self.root / 'masks_filled' / f'{y}.png' for y in self.full_idxs])
else:
self._y_array = np.asarray([self.root / 'masks' / f'{y}.png' for y in self.full_idxs])
self.metadata['y'] = self._y_array
self._y_size = 1
self._metadata_fields = ['y', 'max_lat', 'max_lon', 'min_lat', 'min_lon']
self._metadata_array = self.metadata[self._metadata_fields].to_numpy()
#torch.from_numpy(self.metadata[self._metadata_fields].to_numpy())
# self._eval_groupers = {
# 'max_lat': CombinatorialGrouper(dataset=self, groupby_fields=['max_lat']),
# 'max_lon': CombinatorialGrouper(dataset=self, groupby_fields=['max_lon']),
# 'min_lat': CombinatorialGrouper(dataset=self, groupby_fields=['min_lat']),
# 'min_lon': CombinatorialGrouper(dataset=self, groupby_fields=['min_lon']),
# }
super().__init__(root_dir, download, split_scheme)
def get_input(self, idx):
"""
Returns x for a given idx.
"""
idx = self.full_idxs[idx]
img = Image.open(self.root / 'imgs' / f'{idx}.jpeg').convert('RGB')
return img
def get_output_image(self, path):
"""
Returns x for a given idx.
"""
img = Image.open(path).convert('RGB')
return img
def crop_segmentation_metrics(self, y_true, y_pred, binarized=True):
y_true = y_true.flatten()
y_pred = y_pred.flatten()
assert (y_true.shape == y_pred.shape)
if not binarized:
y_pred[y_pred > 0.5] = 1
y_pred[y_pred != 1] = 0
y_true = y_true.astype(int)
y_pred = y_pred.astype(int)
f1 = f1_score(y_true, y_pred, average='binary', pos_label=1)
acc = accuracy_score(y_true, y_pred)
precision_recall = precision_recall_fscore_support(y_true, y_pred, average='binary', pos_label=1)
print('Dice/ F1 score:', f1)
print('Accuracy score:', acc)
print("Precision recall fscore", precision_recall)
return f1, acc, precision_recall
def eval(self, y_pred, y_true, metadata, binarized=False): # TODO
"""
Computes all evaluation metrics.
Args:
- y_pred (Tensor): Predictions from a model.
- y_true (Tensor): Ground-truth boundary images
- metadata (Tensor): Metadata
- binarized: Whether to use binarized prediction
Output:
- results (list): List of evaluation metrics
- results_str (str): String summarizing the evaluation metrics
"""
f1, acc, precision_recall = self.crop_segmentation_metrics(y_true, y_pred, binarized=binarized)
results = [f1, acc, precision_recall]
results_str = 'Dice/ F1 score: {}, Accuracy score: {}, Precision recall fscore: '.format(f1, acc, precision_recall)
return results, results_str
# metric = Accuracy(prediction_fn=prediction_fn)
# # Overall evaluation + evaluate by year
# all_results, all_results_str = self.standard_group_eval(
# metric,
# self._eval_groupers['year'],
# y_pred, y_true, metadata)
# # Evaluate by region and ignore the "Other" region
# region_grouper = self._eval_groupers['region']
# region_results = metric.compute_group_wise(
# y_pred,
# y_true,
# region_grouper.metadata_to_group(metadata),
# region_grouper.n_groups)
# all_results[f'{metric.name}_worst_year'] = all_results.pop(metric.worst_group_metric_field)
# region_metric_list = []
# for group_idx in range(region_grouper.n_groups):
# group_str = region_grouper.group_field_str(group_idx)
# group_metric = region_results[metric.group_metric_field(group_idx)]
# group_counts = region_results[metric.group_count_field(group_idx)]
# all_results[f'{metric.name}_{group_str}'] = group_metric
# all_results[f'count_{group_str}'] = group_counts
# if region_results[metric.group_count_field(group_idx)] == 0 or "Other" in group_str:
# continue
# all_results_str += (
# f' {region_grouper.group_str(group_idx)} '
# f"[n = {region_results[metric.group_count_field(group_idx)]:6.0f}]:\t"
# f"{metric.name} = {region_results[metric.group_metric_field(group_idx)]:5.3f}\n")
# region_metric_list.append(region_results[metric.group_metric_field(group_idx)])
# all_results[f'{metric.name}_worst_region'] = metric.worst(region_metric_list)
# all_results_str += f"Worst-group {metric.name}: {all_results[f'{metric.name}_worst_region']:.3f}\n"
#
# return all_results, all_results_str
|
[
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"numpy.asarray",
"PIL.Image.open",
"pathlib.Path",
"sklearn.metrics.f1_score",
"sklearn.metrics.precision_recall_fscore_support"
] |
[((2625, 2645), 'pathlib.Path', 'Path', (['self._data_dir'], {}), '(self._data_dir)\n', (2629, 2645), False, 'from pathlib import Path\n'), ((2765, 2806), 'pandas.read_csv', 'pd.read_csv', (["(self.root / 'clean_data.csv')"], {}), "(self.root / 'clean_data.csv')\n", (2776, 2806), True, 'import pandas as pd\n'), ((5295, 5350), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""binary"""', 'pos_label': '(1)'}), "(y_true, y_pred, average='binary', pos_label=1)\n", (5303, 5350), False, 'from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support\n'), ((5365, 5395), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5379, 5395), False, 'from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support\n'), ((5423, 5501), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_true', 'y_pred'], {'average': '"""binary"""', 'pos_label': '(1)'}), "(y_true, y_pred, average='binary', pos_label=1)\n", (5454, 5501), False, 'from sklearn.metrics import f1_score, accuracy_score, precision_recall_fscore_support\n'), ((3581, 3660), 'numpy.asarray', 'np.asarray', (["[(self.root / 'masks_filled' / f'{y}.png') for y in self.full_idxs]"], {}), "([(self.root / 'masks_filled' / f'{y}.png') for y in self.full_idxs])\n", (3591, 3660), True, 'import numpy as np\n'), ((3701, 3773), 'numpy.asarray', 'np.asarray', (["[(self.root / 'masks' / f'{y}.png') for y in self.full_idxs]"], {}), "([(self.root / 'masks' / f'{y}.png') for y in self.full_idxs])\n", (3711, 3773), True, 'import numpy as np\n'), ((3014, 3058), 'numpy.asarray', 'np.asarray', (["(self.metadata['split'] == 'test')"], {}), "(self.metadata['split'] == 'test')\n", (3024, 3058), True, 'import numpy as np\n'), ((4679, 4725), 'PIL.Image.open', 'Image.open', (["(self.root / 'imgs' / f'{idx}.jpeg')"], {}), "(self.root / 'imgs' / f'{idx}.jpeg')\n", (4689, 4725), False, 'from PIL import Image\n'), ((4872, 4888), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (4882, 4888), False, 'from PIL import Image\n'), ((3172, 3215), 'numpy.asarray', 'np.asarray', (["(self.metadata['split'] == 'val')"], {}), "(self.metadata['split'] == 'val')\n", (3182, 3215), True, 'import numpy as np\n'), ((3315, 3358), 'numpy.asarray', 'np.asarray', (["(self.metadata['split'] == split)"], {}), "(self.metadata['split'] == split)\n", (3325, 3358), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 9 11:37:59 2021
@author: Prachi
"""
import numpy as np
import argparse
import sys
import os
import matplotlib.pyplot as plt
import pickle
from pdb import set_trace as bp
import subprocess
import scipy.io as sio
from scipy.sparse import coo_matrix
import pic_dihard_ami as mypic
sys.path.insert(0,os.getcwd())
sys.path.insert(0,os.getcwd()+'/../SelfSup_PLDA')
def setup():
"""Get cmds and setup directories."""
cmdparser = argparse.ArgumentParser(description='Do speaker clsutering based on'\
'my ahc',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cmdparser.add_argument('--threshold', help='threshold for clustering',
type=float, default=None)
cmdparser.add_argument('--lamda', help='lamda for clustering',
type=float, default=0)
# cmdparser.add_argument('--custom-dist', help='e.g. euclidean, cosine', type=str, default=None)
cmdparser.add_argument('--reco2utt', help='spk2utt to create labels', default='../swbd_diar/exp/callhome1/spk2utt')
cmdparser.add_argument('--reco2num', help='reco2num_spk to get true speakers', default='None')
cmdparser.add_argument('--label-out', dest='out_file',
help='output file used for storing labels', default='../generated_rttm_new/rttm_callhome_my_clustering/cosine/labels')
# cmdparser.add_argument('--minMaxK', nargs=2, default=[2, 10])
cmdparser.add_argument('--score_file', help='file containing list of score matrices', type=str,default='../lists/callhome1/callhome1.list')
cmdparser.add_argument('--score_path', help='path of scores', type=str,default='../scores_cosine/callhome1_scores')
cmdparser.add_argument('--using_init', help='if initialisation is needed', type=int,default=0)
cmdparser.add_argument('--dataset', help='dataset name', type=str, default="callhome1")
cmdparser.add_argument('--k', type=float, default=30)
cmdparser.add_argument('--z', type=float, default=0.1)
cmdparser.add_argument('--clustering', type=str, default='PIC')
# cmdparser.add_argument('--out_path', help='path of output scores', type=str, default=None)
cmdparser.add_argument('--weight', help='weight for fusion',
type=float, default=1.0)
cmdargs = cmdparser.parse_args()
return cmdargs
def AHC(sim_mx, threshold=None,nspeaker=1):
""" Performs UPGMA variant (wikipedia.org/wiki/UPGMA) of Agglomerative
Hierarchical Clustering using the input pairwise similarity matrix.
Input:
sim_mx - NxN pairwise similarity matrix
threshold - threshold for stopping the clustering algorithm
(see function twoGMMcalib_lin for its estimation)
Output:
cluster labels stored in an array of length N containing (integers in
the range from 0 to C-1, where C is the number of dicovered clusters)
"""
dist = -sim_mx
dist[np.diag_indices_from(dist)] = np.inf
clsts = [[i] for i in range(len(dist))]
clst_count = len(dist)
print('start speaker count: ',clst_count)
while True:
mi, mj = np.sort(np.unravel_index(dist.argmin(), dist.shape))
if threshold is None:
if clst_count==nspeaker:
print('nspeaker: ',clst_count)
break
else:
if dist[mi, mj] > -threshold:
break
dist[:, mi] = dist[mi,:] = (dist[mi,:]*len(clsts[mi])+dist[mj,:]*len(clsts[mj]))/(len(clsts[mi])+len(clsts[mj]))
dist[:, mj] = dist[mj,:] = np.inf
clsts[mi].extend(clsts[mj])
clsts[mj] = None
clst_count = clst_count - 1
labs= np.empty(len(dist), dtype=int)
for i, c in enumerate([e for e in clsts if e]):
labs[c] = i
return labs
def compute_affinity_matrix(X):
"""Compute the affinity matrix from data.
Note that the range of affinity is [0,1].
Args:
X: numpy array of shape (n_samples, n_features)
Returns:
affinity: numpy array of shape (n_samples, n_samples)
"""
# Normalize the data.
l2_norms = np.linalg.norm(X, axis=1)
X_normalized = X / l2_norms[:, None]
# Compute cosine similarities. Range is [-1,1].
cosine_similarities = np.matmul(X_normalized, np.transpose(X_normalized))
# Compute the affinity. Range is [0,1].
# Note that this step is not mentioned in the paper!
affinity = cosine_similarities
# affinity = (cosine_similarities + 1.0) / 2.0
return affinity
def unique(arr, return_ind=False):
if return_ind:
k = 0
d = dict()
uniques = np.empty(arr.size, dtype=arr.dtype)
indexes = np.empty(arr.size, dtype='i')
for i, a in enumerate(arr):
if a in d:
indexes[i] = d[a]
else:
indexes[i] = k
uniques[k] = a
d[a] = k
k += 1
return uniques[:k], indexes
else:
_, idx = np.unique(arr, return_index=True)
return arr[np.sort(idx)]
class clustering:
def __init__(self,n_clusters,clusterlen,labelfull,lamda=0.0,dist=None):
self.n_clusters = n_clusters
self.labelfull = labelfull.copy()
self.mergeind = []
self.eta = 0.1
self.kc = 2
self.max_10per_scores = 5
self.lamda = lamda
self.clusterlen = clusterlen.copy()
# self.clusterlen=[1]*len(labelfull)
self.dist = dist
self.minloss_current = 1000
def initialize_clusters(self,A):
sampleNum = len(A)
NNIndex = np.argsort(A)[:,::-1]
clusterLabels = np.ones((sampleNum, 1),dtype=int)*(-1)
counter = 0
for i in range(sampleNum):
idx = NNIndex[i,:2]
assignedCluster = clusterLabels[idx]
assignedCluster = np.unique(assignedCluster[assignedCluster >= 0])
if len(assignedCluster) == 0:
clusterLabels[idx] = counter
counter = counter + 1
elif len(assignedCluster) == 1:
clusterLabels[idx] = assignedCluster
else:
clusterLabels[idx] = assignedCluster[0];
for j in range(1,len(assignedCluster)):
clusterLabels[clusterLabels == assignedCluster[j]] = assignedCluster[0]
uniqueLabels = np.unique(clusterLabels)
clusterNumber = len(uniqueLabels)
self.labelfull = clusterLabels[:,0].astype(int)
initialClusters = []
output_new = A.copy()
clusterlist=[]
for i,lab in enumerate(uniqueLabels):
ind=np.where(clusterLabels==lab)[0]
cluster_count = len(ind)
initialClusters.append(cluster_count)
clusterlist.append(ind[0])
avg=np.sum(output_new[ind],axis=0)
output_new[ind[0]]=avg
output_new[:,ind[0]]=avg
# initialClusters{i} = find(clusterLabels(:) == uniqueLabels(i));
self.clusterlen = initialClusters
output_new = output_new[np.ix_(clusterlist,clusterlist)]
return self.labelfull,self.clusterlen,output_new
def compute_distance(self):
colvec = np.array(self.clusterlen).reshape(-1,1)
tmp_mat = np.dot(colvec,colvec.T)
return (1/tmp_mat)
def Ahc_full(self,A):
self.A = A.copy()
while 1:
B = self.A.copy()
tmp_mat=self.compute_distance()
self.A = self.A*tmp_mat # all elementwise operation
self.A = np.triu(self.A,k=1)
cur_samp = self.A.shape[0]
minA = np.min(self.A)
self.A[np.tril_indices(cur_samp)]=-abs(minA)*100
if cur_samp < 20:
min_len = min(20,int(0.1*len(self.labelfull)))
predicted_clusters =len(np.array(self.clusterlen)[np.array(self.clusterlen)>=min_len])
if cur_samp <=10:
print('predicted_clusters:',predicted_clusters)
if self.n_clusters != None:
if cur_samp == self.n_clusters:
return self.labelfull,self.clusterlen
if self.dist!=None:
if ((self.A<self.dist).all() or cur_samp==1):
return self.labelfull,self.clusterlen
else:
if (self.A<self.dist).all() or cur_samp==1:
if predicted_clusters >= cur_samp:
return self.labelfull,self.clusterlen
ind = np.where(self.A==np.amax(self.A))
minind = min(ind[0][0],ind[1][0])
maxind = max(ind[0][0],ind[1][0])
trackind = [list(np.where(self.labelfull==minind)[0])]
trackind.extend(np.where(self.labelfull==maxind)[0])
if minind == maxind:
print(minind,maxind)
self.clusterlen[minind] +=self.clusterlen[maxind]
self.clusterlen.pop(maxind)
self.labelfull[np.where(self.labelfull==maxind)[0]]=minind
unifull = list(np.unique(self.labelfull))
labelfullnew = np.zeros(self.labelfull.shape).astype(int)
for i in range(len(self.labelfull)):
labelfullnew[i]=unifull.index(self.labelfull[i])
self.labelfull = labelfullnew
self.mergeind.append(trackind)
newsamp = cur_samp -1
# recomputation
B[:,minind] =B[:,minind]+B[:,maxind]
B[minind] = B[:,minind]
B = np.delete(B,maxind,1)
B = np.delete(B,maxind,0)
B[np.diag_indices(newsamp)]=np.min(B)
B[np.diag_indices(newsamp)] = np.max(B,axis=1)
self.A = B.copy()
return self.labelfull,self.clusterlen
def get_params(self):
return self.labelfull, self.mergeind
def write_results_dict(results_dict, output_file,reco2utt):
"""Writes the results in label file"""
output_label = open(output_file,'w')
reco2utt = open(reco2utt,'r').readlines()
i=0
for meeting_name, hypothesis in results_dict.items():
reco = reco2utt[i].split()[0]
utts = reco2utt[i].rstrip().split()[1:]
if reco == meeting_name:
for j,utt in enumerate(utts):
if np.isscalar(hypothesis[j]):
towrite = utt +' '+str(hypothesis[j])+'\n'
else:
if hypothesis[j,1]==-1:
towrite = utt +'\t'+str(hypothesis[j,0])+'\n'
else:
towrite = utt +'\t'+str(hypothesis[j,0])+' '+str(hypothesis[j,1])+'\n'
output_label.writelines(towrite)
else:
print('reco mismatch!')
i=i+1
def PIC_clustering():
args = setup()
fold = args.score_path
file_list = open(args.score_file,'r').readlines()
out_file = args.out_file
reco2utt = args.reco2utt
reco2num = args.reco2num
threshold=args.threshold
dataset = args.dataset
weight = args.weight
neb = 2
beta1 = 0.95
per_k=args.k
k= int(args.k)
z=args.z
print(threshold)
if reco2num != 'None':
reco2num_spk = open(args.reco2num).readlines()
results_dict ={}
for i,fn in enumerate(file_list):
f=fn.rsplit()[0]
print("filename:",f)
out_affinity = None
# out_affinity = os.path.dirname(out_file)+'/pic_affinity_10/'+f+'.npy'
if "baseline" in fold :
b = np.load(fold+'/'+f+'.npy')
# b = b/np.max(abs(b))
# bp()
b = 1/(1+np.exp(-b))
# b = (b+1)/2
else:
if os.path.isfile(fold+'/'+f+'.pkl'):
deepahcmodel = pickle.load(open(fold+'/'+f+'.pkl','rb'))
b = deepahcmodel['output']
else:
b = np.load(fold+'/'+f+'.npy')
#b = b/np.max(abs(b))
b = 1/(1+np.exp(-b))
#fold_plda='/data1/prachis/Dihard_2020/SSC/plda_pca_baseline/dihard_dev_2020_track1_wide_scores/plda_scores'
# fold_plda='/data1/prachis/Dihard_2020/SSC/plda_pca_baseline/dihard_dev_2020_track1_fbank_jhu_wide_scores/plda_scores'
# fold_plda='/data1/prachis/Dihard_2020/SSC/plda_pca_baseline/dihard_dev_2020_track1_ftdnn_scores/plda_scores'
#b_plda = np.load(fold_plda+'/'+f+'.npy')
#b_plda = b_plda/np.max(abs(b_plda))
#b = weight * b + (1-weight)*b_plda
#b = 1/(1+np.exp(-b))
# nframe = b.shape[0]
# # # weighting for temporal weightage
N= b.shape[0]
toep = np.abs(np.arange(N).reshape(N,1)-np.arange(N).reshape(1,N))
toep[toep>neb] = neb
weighting = beta1**(toep)
b = weighting*b
# F_ratio,_ = compute_f_ratio(f,dataset,b)
# print('F_ratio: {}'.format(F_ratio))
clusterlen = [1]*b.shape[0]
labels = np.arange(b.shape[0])
filelength = len(b)
# if filelength <= k:
# k= int(0.5*filelength)
# k = int(max(1,per_k*filelength))
k = min(k,filelength-1)
print('filelength:',len(b))
print('k:{}, z:{}'.format(k,z))
# bp()
if reco2num != 'None':
try:
n_clusters = int(reco2num_spk[i].split()[1])
except:
n_clusters = 2
n_clusters = min(n_clusters,len(clusterlen)) # whichever is minimum
if f!=reco2num_spk[i].split()[0]:
print('file mismatch',f,reco2num_spk[i].split()[0])
threshold = None
affinity = b.copy()
clus = mypic.PIC_dihard_threshold(n_clusters,clusterlen,labels,affinity,K=k,z=z)
if "baseline" in fold :
labelfull,clusterlen= clus.gacCluster_oracle_org()
else:
labelfull,clusterlen= clus.gacCluster_oracle()
n_clusters = len(clusterlen)
print("filename: {} n_clusters:{} clusterlen:{}".format(f,n_clusters,clusterlen))
else:
affinity = b.copy()
n_clusters = 1 # atleast 1 speaker
clus = mypic.PIC_dihard_threshold(n_clusters,clusterlen,labels,affinity,threshold,K=k,z=z,)
if "baseline" in fold:
labelfull,clusterlen= clus.gacCluster_org()
else:
labelfull,clusterlen= clus.gacCluster()
n_clusters = len(clusterlen)
print("filename: {} n_clusters:{} clusterlen:{}".format(f,n_clusters,clusterlen))
uni1,method1=unique(labelfull,True)
results_dict[f]=method1
write_results_dict(results_dict, out_file,reco2utt)
def PIC_clustering_init():
args = setup()
fold = args.score_path
file_list = open(args.score_file,'r').readlines()
out_file = args.out_file
reco2utt = args.reco2utt
reco2num = args.reco2num
threshold=args.threshold
weight = args.weight
neb = 2
beta1 = 0.95
per_k=args.k
k= int(args.k)
z=args.z
print(threshold)
if reco2num != 'None':
reco2num_spk = open(args.reco2num).readlines()
results_dict ={}
for i,fn in enumerate(file_list):
f=fn.rsplit()[0]
print("filename:",f)
out_affinity = None
# out_affinity = os.path.dirname(out_file)+'/pic_affinity_10/'+f+'.npy'
if "baseline" in fold :
b = np.load(fold+'/'+f+'.npy')
# b = b/np.max(abs(b))
# b = 1/(1+np.exp(-b))
# b = (b+1)/2
else:
if os.path.isfile(fold+'/'+f+'.pkl'):
deepahcmodel = pickle.load(open(fold+'/'+f+'.pkl','rb'))
b = deepahcmodel['output']
else:
b = np.load(fold+'/'+f+'.npy')
fold_plda='/data1/prachis/Dihard_2020/SSC/plda_pca_baseline/dihard_dev_2020_track1_wide_scores/plda_scores'
b_plda = np.load(fold_plda+'/'+f+'.npy')
b_plda = b_plda/np.max(abs(b_plda))
b_plda = 1/(1+np.exp(-b_plda))
b = b/np.max(abs(b))
b = 1/(1+np.exp(-b))
# nframe = b.shape[0]
# # # weighting for temporal weightage
# N= b.shape[0]
# toep = np.abs(np.arange(N).reshape(N,1)-np.arange(N).reshape(1,N))
# toep[toep>neb] = neb
# weighting = beta1**(toep)
# b = weighting*b
clusterlen = [1]*b.shape[0]
labels = np.arange(b.shape[0])
filelength = len(b)
# if filelength <= k:
# k= int(0.5*filelength)
# k = int(max(1,per_k*filelength))
k = min(k,filelength-1)
print('filelength:',len(b))
print('k:{}, z:{}'.format(k,z))
# bp()
if reco2num != 'None':
try:
n_clusters = int(reco2num_spk[i].split()[1])
except:
n_clusters = 2
n_clusters = min(n_clusters,len(clusterlen)) # whichever is minimum
if f!=reco2num_spk[i].split()[0]:
print('file mismatch',f,reco2num_spk[i].split()[0])
threshold = None
affinity = b.copy()
clus_org = mypic.PIC_dihard_threshold(n_clusters,clusterlen,labels,b_plda,K=k,z=z)
labelfull,clusterlen= clus_org.gacCluster_oracle_org(init=1)
clus = mypic.PIC_dihard_threshold(n_clusters,clusterlen,labelfull,affinity,K=k,z=z)
if "baseline" in fold or filelength <200:
labelfull,clusterlen= clus.gacCluster_oracle_org()
else:
labelfull,clusterlen= clus.gacCluster_oracle()
n_clusters = len(clusterlen)
print("filename: {} n_clusters:{} clusterlen:{}".format(f,n_clusters,clusterlen))
else:
affinity = b.copy()
n_clusters = 1 # atleast 1 speaker
clus = mypic.PIC_dihard_threshold(n_clusters,clusterlen,labels,affinity,K=k,z=z)
if "baseline" in fold:
labelfull,clusterlen= clus.gacCluster_org()
else:
labelfull,clusterlen= clus.gacCluster()
n_clusters = len(clusterlen)
print("filename: {} n_clusters:{} clusterlen:{}".format(f,n_clusters,clusterlen))
uni1,method1=unique(labelfull,True)
results_dict[f]=method1
write_results_dict(results_dict, out_file,reco2utt)
def AHC_clustering():
args = setup()
fold = args.score_path
file_list = np.genfromtxt(args.score_file,dtype=str)
out_file = args.out_file
reco2utt = args.reco2utt
reco2num = args.reco2num
threshold=args.threshold
dataset = fold.split('/')[-3]
print(threshold)
if reco2num != 'None':
reco2num_spk = open(args.reco2num).readlines()
results_dict ={}
for i,f in enumerate(file_list):
print(f)
if "baseline" in fold:
b = np.load(fold+'/'+f+'.npy')
b = b/np.max(abs(b))
# b = 1/(1+np.exp(-b))
else:
if os.path.isfile(fold+'/'+f+'.pkl'):
deepahcmodel = pickle.load(open(fold+'/'+f+'.pkl','rb'))
b = deepahcmodel['output']
else:
b = np.load(fold+'/'+f+'.npy')
N = b.shape[0]
if reco2num != 'None':
n_clusters = int(reco2num_spk[i].split()[1])
n_clusters = min(n_clusters,N) # whichever is minimum
if f!=reco2num_spk[i].split()[0]:
print('file mismatch',f,reco2num_spk[i].split()[0])
threshold = None
else:
n_clusters = None
labelfull = AHC(b, threshold=threshold,nspeaker=n_clusters)
###################################################################
n_clusters = len(np.unique(labelfull))
print("filename: {} n_clusters:{}".format(f,n_clusters))
uni1,method1=unique(labelfull,True)
results_dict[f]=method1
write_results_dict(results_dict, out_file,reco2utt)
if __name__ == "__main__":
args = setup()
if args.clustering == "PIC":
print('In PIC !')
PIC_clustering()
else:
AHC_clustering()
|
[
"numpy.load",
"numpy.diag_indices_from",
"argparse.ArgumentParser",
"numpy.sum",
"numpy.triu",
"numpy.empty",
"numpy.ones",
"numpy.argsort",
"os.path.isfile",
"numpy.linalg.norm",
"numpy.arange",
"numpy.exp",
"numpy.unique",
"numpy.transpose",
"numpy.genfromtxt",
"pic_dihard_ami.PIC_dihard_threshold",
"numpy.max",
"numpy.tril_indices",
"numpy.diag_indices",
"numpy.sort",
"numpy.min",
"numpy.dot",
"numpy.delete",
"os.getcwd",
"numpy.isscalar",
"numpy.ix_",
"numpy.zeros",
"numpy.amax",
"numpy.where",
"numpy.array"
] |
[((366, 377), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (375, 377), False, 'import os\n'), ((508, 643), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Do speaker clsutering based onmy ahc"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Do speaker clsutering based onmy ahc',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (531, 643), False, 'import argparse\n'), ((4355, 4380), 'numpy.linalg.norm', 'np.linalg.norm', (['X'], {'axis': '(1)'}), '(X, axis=1)\n', (4369, 4380), True, 'import numpy as np\n'), ((19328, 19369), 'numpy.genfromtxt', 'np.genfromtxt', (['args.score_file'], {'dtype': 'str'}), '(args.score_file, dtype=str)\n', (19341, 19369), True, 'import numpy as np\n'), ((398, 409), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (407, 409), False, 'import os\n'), ((3123, 3149), 'numpy.diag_indices_from', 'np.diag_indices_from', (['dist'], {}), '(dist)\n', (3143, 3149), True, 'import numpy as np\n'), ((4539, 4565), 'numpy.transpose', 'np.transpose', (['X_normalized'], {}), '(X_normalized)\n', (4551, 4565), True, 'import numpy as np\n'), ((4915, 4950), 'numpy.empty', 'np.empty', (['arr.size'], {'dtype': 'arr.dtype'}), '(arr.size, dtype=arr.dtype)\n', (4923, 4950), True, 'import numpy as np\n'), ((4970, 4999), 'numpy.empty', 'np.empty', (['arr.size'], {'dtype': '"""i"""'}), "(arr.size, dtype='i')\n", (4978, 4999), True, 'import numpy as np\n'), ((5295, 5328), 'numpy.unique', 'np.unique', (['arr'], {'return_index': '(True)'}), '(arr, return_index=True)\n', (5304, 5328), True, 'import numpy as np\n'), ((6740, 6764), 'numpy.unique', 'np.unique', (['clusterLabels'], {}), '(clusterLabels)\n', (6749, 6764), True, 'import numpy as np\n'), ((7667, 7691), 'numpy.dot', 'np.dot', (['colvec', 'colvec.T'], {}), '(colvec, colvec.T)\n', (7673, 7691), True, 'import numpy as np\n'), ((13513, 13534), 'numpy.arange', 'np.arange', (['b.shape[0]'], {}), '(b.shape[0])\n', (13522, 13534), True, 'import numpy as np\n'), ((17208, 17229), 'numpy.arange', 'np.arange', (['b.shape[0]'], {}), '(b.shape[0])\n', (17217, 17229), True, 'import numpy as np\n'), ((5349, 5361), 'numpy.sort', 'np.sort', (['idx'], {}), '(idx)\n', (5356, 5361), True, 'import numpy as np\n'), ((5930, 5943), 'numpy.argsort', 'np.argsort', (['A'], {}), '(A)\n', (5940, 5943), True, 'import numpy as np\n'), ((5977, 6011), 'numpy.ones', 'np.ones', (['(sampleNum, 1)'], {'dtype': 'int'}), '((sampleNum, 1), dtype=int)\n', (5984, 6011), True, 'import numpy as np\n'), ((6187, 6235), 'numpy.unique', 'np.unique', (['assignedCluster[assignedCluster >= 0]'], {}), '(assignedCluster[assignedCluster >= 0])\n', (6196, 6235), True, 'import numpy as np\n'), ((7202, 7233), 'numpy.sum', 'np.sum', (['output_new[ind]'], {'axis': '(0)'}), '(output_new[ind], axis=0)\n', (7208, 7233), True, 'import numpy as np\n'), ((7462, 7494), 'numpy.ix_', 'np.ix_', (['clusterlist', 'clusterlist'], {}), '(clusterlist, clusterlist)\n', (7468, 7494), True, 'import numpy as np\n'), ((7964, 7984), 'numpy.triu', 'np.triu', (['self.A'], {'k': '(1)'}), '(self.A, k=1)\n', (7971, 7984), True, 'import numpy as np\n'), ((8044, 8058), 'numpy.min', 'np.min', (['self.A'], {}), '(self.A)\n', (8050, 8058), True, 'import numpy as np\n'), ((9973, 9996), 'numpy.delete', 'np.delete', (['B', 'maxind', '(1)'], {}), '(B, maxind, 1)\n', (9982, 9996), True, 'import numpy as np\n'), ((10012, 10035), 'numpy.delete', 'np.delete', (['B', 'maxind', '(0)'], {}), '(B, maxind, 0)\n', (10021, 10035), True, 'import numpy as np\n'), ((10075, 10084), 'numpy.min', 'np.min', (['B'], {}), '(B)\n', (10081, 10084), True, 'import numpy as np\n'), ((10128, 10145), 'numpy.max', 'np.max', (['B'], {'axis': '(1)'}), '(B, axis=1)\n', (10134, 10145), True, 'import numpy as np\n'), ((12056, 12088), 'numpy.load', 'np.load', (["(fold + '/' + f + '.npy')"], {}), "(fold + '/' + f + '.npy')\n", (12063, 12088), True, 'import numpy as np\n'), ((12233, 12272), 'os.path.isfile', 'os.path.isfile', (["(fold + '/' + f + '.pkl')"], {}), "(fold + '/' + f + '.pkl')\n", (12247, 12272), False, 'import os\n'), ((14279, 14357), 'pic_dihard_ami.PIC_dihard_threshold', 'mypic.PIC_dihard_threshold', (['n_clusters', 'clusterlen', 'labels', 'affinity'], {'K': 'k', 'z': 'z'}), '(n_clusters, clusterlen, labels, affinity, K=k, z=z)\n', (14305, 14357), True, 'import pic_dihard_ami as mypic\n'), ((14825, 14918), 'pic_dihard_ami.PIC_dihard_threshold', 'mypic.PIC_dihard_threshold', (['n_clusters', 'clusterlen', 'labels', 'affinity', 'threshold'], {'K': 'k', 'z': 'z'}), '(n_clusters, clusterlen, labels, affinity,\n threshold, K=k, z=z)\n', (14851, 14918), True, 'import pic_dihard_ami as mypic\n'), ((16116, 16148), 'numpy.load', 'np.load', (["(fold + '/' + f + '.npy')"], {}), "(fold + '/' + f + '.npy')\n", (16123, 16148), True, 'import numpy as np\n'), ((16273, 16312), 'os.path.isfile', 'os.path.isfile', (["(fold + '/' + f + '.pkl')"], {}), "(fold + '/' + f + '.pkl')\n", (16287, 16312), False, 'import os\n'), ((16636, 16673), 'numpy.load', 'np.load', (["(fold_plda + '/' + f + '.npy')"], {}), "(fold_plda + '/' + f + '.npy')\n", (16643, 16673), True, 'import numpy as np\n'), ((17978, 18054), 'pic_dihard_ami.PIC_dihard_threshold', 'mypic.PIC_dihard_threshold', (['n_clusters', 'clusterlen', 'labels', 'b_plda'], {'K': 'k', 'z': 'z'}), '(n_clusters, clusterlen, labels, b_plda, K=k, z=z)\n', (18004, 18054), True, 'import pic_dihard_ami as mypic\n'), ((18148, 18233), 'pic_dihard_ami.PIC_dihard_threshold', 'mypic.PIC_dihard_threshold', (['n_clusters', 'clusterlen', 'labelfull', 'affinity'], {'K': 'k', 'z': 'z'}), '(n_clusters, clusterlen, labelfull, affinity, K=k,\n z=z)\n', (18174, 18233), True, 'import pic_dihard_ami as mypic\n'), ((18715, 18793), 'pic_dihard_ami.PIC_dihard_threshold', 'mypic.PIC_dihard_threshold', (['n_clusters', 'clusterlen', 'labels', 'affinity'], {'K': 'k', 'z': 'z'}), '(n_clusters, clusterlen, labels, affinity, K=k, z=z)\n', (18741, 18793), True, 'import pic_dihard_ami as mypic\n'), ((19771, 19803), 'numpy.load', 'np.load', (["(fold + '/' + f + '.npy')"], {}), "(fold + '/' + f + '.npy')\n", (19778, 19803), True, 'import numpy as np\n'), ((19899, 19938), 'os.path.isfile', 'os.path.isfile', (["(fold + '/' + f + '.pkl')"], {}), "(fold + '/' + f + '.pkl')\n", (19913, 19938), False, 'import os\n'), ((20690, 20710), 'numpy.unique', 'np.unique', (['labelfull'], {}), '(labelfull)\n', (20699, 20710), True, 'import numpy as np\n'), ((7024, 7054), 'numpy.where', 'np.where', (['(clusterLabels == lab)'], {}), '(clusterLabels == lab)\n', (7032, 7054), True, 'import numpy as np\n'), ((7608, 7633), 'numpy.array', 'np.array', (['self.clusterlen'], {}), '(self.clusterlen)\n', (7616, 7633), True, 'import numpy as np\n'), ((8079, 8104), 'numpy.tril_indices', 'np.tril_indices', (['cur_samp'], {}), '(cur_samp)\n', (8094, 8104), True, 'import numpy as np\n'), ((9504, 9529), 'numpy.unique', 'np.unique', (['self.labelfull'], {}), '(self.labelfull)\n', (9513, 9529), True, 'import numpy as np\n'), ((10049, 10073), 'numpy.diag_indices', 'np.diag_indices', (['newsamp'], {}), '(newsamp)\n', (10064, 10073), True, 'import numpy as np\n'), ((10100, 10124), 'numpy.diag_indices', 'np.diag_indices', (['newsamp'], {}), '(newsamp)\n', (10115, 10124), True, 'import numpy as np\n'), ((10768, 10794), 'numpy.isscalar', 'np.isscalar', (['hypothesis[j]'], {}), '(hypothesis[j])\n', (10779, 10794), True, 'import numpy as np\n'), ((12426, 12458), 'numpy.load', 'np.load', (["(fold + '/' + f + '.npy')"], {}), "(fold + '/' + f + '.npy')\n", (12433, 12458), True, 'import numpy as np\n'), ((16466, 16498), 'numpy.load', 'np.load', (["(fold + '/' + f + '.npy')"], {}), "(fold + '/' + f + '.npy')\n", (16473, 16498), True, 'import numpy as np\n'), ((20092, 20124), 'numpy.load', 'np.load', (["(fold + '/' + f + '.npy')"], {}), "(fold + '/' + f + '.npy')\n", (20099, 20124), True, 'import numpy as np\n'), ((8983, 8998), 'numpy.amax', 'np.amax', (['self.A'], {}), '(self.A)\n', (8990, 8998), True, 'import numpy as np\n'), ((9191, 9225), 'numpy.where', 'np.where', (['(self.labelfull == maxind)'], {}), '(self.labelfull == maxind)\n', (9199, 9225), True, 'import numpy as np\n'), ((9432, 9466), 'numpy.where', 'np.where', (['(self.labelfull == maxind)'], {}), '(self.labelfull == maxind)\n', (9440, 9466), True, 'import numpy as np\n'), ((9559, 9589), 'numpy.zeros', 'np.zeros', (['self.labelfull.shape'], {}), '(self.labelfull.shape)\n', (9567, 9589), True, 'import numpy as np\n'), ((12163, 12173), 'numpy.exp', 'np.exp', (['(-b)'], {}), '(-b)\n', (12169, 12173), True, 'import numpy as np\n'), ((12510, 12520), 'numpy.exp', 'np.exp', (['(-b)'], {}), '(-b)\n', (12516, 12520), True, 'import numpy as np\n'), ((16744, 16759), 'numpy.exp', 'np.exp', (['(-b_plda)'], {}), '(-b_plda)\n', (16750, 16759), True, 'import numpy as np\n'), ((16819, 16829), 'numpy.exp', 'np.exp', (['(-b)'], {}), '(-b)\n', (16825, 16829), True, 'import numpy as np\n'), ((8258, 8283), 'numpy.array', 'np.array', (['self.clusterlen'], {}), '(self.clusterlen)\n', (8266, 8283), True, 'import numpy as np\n'), ((9124, 9158), 'numpy.where', 'np.where', (['(self.labelfull == minind)'], {}), '(self.labelfull == minind)\n', (9132, 9158), True, 'import numpy as np\n'), ((13201, 13213), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (13210, 13213), True, 'import numpy as np\n'), ((13227, 13239), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (13236, 13239), True, 'import numpy as np\n'), ((8284, 8309), 'numpy.array', 'np.array', (['self.clusterlen'], {}), '(self.clusterlen)\n', (8292, 8309), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import json
import math
import os
import numpy as np
from PIL import Image
from PIL import ImageFile
import tensorflow.compat.v2 as tf
from absl import logging
from absl import app
from absl import flags
import cv2
flags.DEFINE_string('acivity', 'swing', 'The class of test images.')
flags.DEFINE_string('input_dir', '/media/felicia/Data/mlb-youtube/%s_videos/rm_noise/videos', 'Path to videos.')
flags.DEFINE_string('name', 'image_bbgame_swing', 'Name of the dataset being created. This will'
'be used as a prefix.')
flags.DEFINE_string('file_pattern', '*.mp4', 'Pattern used to searh for files'
'in the given directory.')
flags.DEFINE_string('label_file', None, 'Provide a corresponding labels file'
'that stores per-frame or per-sequence labels. This info'
'will get stored.')
flags.DEFINE_string('output_dir', '/media/felicia/Data/object_detection/data/%s/', 'Output directory where'
'tfrecords will be stored.')
flags.DEFINE_integer('files_per_shard', 50, 'Number of videos to store in a'
'shard.')
flags.DEFINE_boolean('rotate', False, 'Rotate videos by 90 degrees before'
'creating tfrecords')
flags.DEFINE_boolean('resize', True, 'Resize videos to a given size.')
flags.DEFINE_integer('width', 1280, 'Width of frames in the TFRecord.')
flags.DEFINE_integer('height', 720, 'Height of frames in the TFRecord.')
flags.DEFINE_list(
'frame_labels', '', 'Comma separated list of descriptions '
'for labels given on a per frame basis. For example: '
'winding_up,early_cocking,acclerating,follow_through')
flags.DEFINE_integer('action_label',0 , 'Action label of all videos.') # swing:0, ball:1, strike:2, foul:3, hit:4
flags.DEFINE_integer('expected_segments', -1, 'Expected number of segments.')
flags.DEFINE_integer('fps', 10, 'Frames per second of video. If 0, fps will be '
'read from metadata of video.') # Original:
FLAGS = flags.FLAGS
gfile = tf.io.gfile
def video_to_frames(video_filename, rotate, fps=0, resize=False,
width=224, height=224):
"""Returns all frames from a video.
Args:
video_filename: string, filename of video.
rotate: Boolean: if True, rotates video by 90 degrees.
fps: Integer, frames per second of video. If 0, it will be inferred from
metadata of video.
resize: Boolean, if True resizes images to given size.
width: Integer, Width of image.
height: Integer, Height of image.
Raises:
ValueError: if fps is greater than the rate of video.
"""
logging.info('Loading %s', video_filename)
cap = cv2.VideoCapture(video_filename)
if fps == 0:
fps = cap.get(cv2.CAP_PROP_FPS)
keep_frequency = 1
else:
if fps > cap.get(cv2.CAP_PROP_FPS):
raise ValueError('Cannot sample at a frequency higher than FPS of video')
keep_frequency = int(float(cap.get(cv2.CAP_PROP_FPS)) / fps)
frames = []
timestamps = []
counter = 0
if cap.isOpened():
while True:
success, frame_bgr = cap.read()
if not success:
break
if counter % keep_frequency == 0:
# Convert BGR to RGB
frame_rgb = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2RGB)
if resize:
frame_rgb = cv2.resize(frame_rgb, (width, height))
if rotate:
frame_rgb = cv2.transpose(frame_rgb)
frame_rgb = cv2.flip(frame_rgb, 1)
frames.append(frame_rgb)
timestamps.append(cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0)
counter += 1
return frames, timestamps, fps
def create_numpy(name, output_dir, input_dir, label_file, input_pattern,
files_per_shard, action_label, frame_labels,
expected_segments, orig_fps, rotate, resize, width,
height):
"""Create Numpy file from videos in a given path.
Args:
name: string, name of the dataset being created.
output_dir: string, path to output directory.
input_dir: string, path to input videos directory.
label_file: None or string, JSON file that contains annotations.
input_pattern: string, regex pattern to look up videos in directory.
files_per_shard: int, number of files to keep in each shard.
action_label: int, Label of actions in video.
frame_labels: list, list of string describing each class. Class label is
the index in list.
expected_segments: int, expected number of segments.
orig_fps: int, frame rate at which tfrecord will be created.
rotate: boolean, if True rotate videos by 90 degrees.
resize: boolean, if True resize to given height and width.
width: int, Width of frames.
height: int, Height of frames.
Raises:
ValueError: If invalid args are passed.
"""
labels={
'swing':0,'ball':1
}
ACTIVITY=FLAGS.acivity
LABEL=labels[ACTIVITY]
input_dir=input_dir%ACTIVITY
output_path=output_dir%ACTIVITY
if not gfile.exists(output_path):
logging.info('Creating output directory: %s', output_path)
gfile.makedirs(output_path)
if not isinstance(input_pattern, list):
file_pattern = os.path.join(input_dir, input_pattern)
filenames = [os.path.basename(x) for x in gfile.glob(file_pattern)]
else:
filenames = []
for file_pattern in input_pattern:
file_pattern = os.path.join(input_dir, file_pattern)
filenames += [os.path.basename(x) for x in gfile.glob(file_pattern)]
num_shards = int(math.ceil(len(filenames)/files_per_shard))
len_num_shards = len(str(num_shards))
shard_id = 0
image_minibatch=list()
step_minibatch=list()
label_minibatch=list()
video_minibatch=list()
print('-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+')
print('shard_id',shard_id)
for i, filename in enumerate(filenames):
frames, video_timestamps, _ = video_to_frames(
os.path.join(input_dir, filename),
rotate,
orig_fps,
resize=resize,
width=width,
height=height)
vid_name=os.path.splitext(filename)[0]
vid_name=str.encode(vid_name)
image_minibatch.append(frames)
# duration=video_timestamps[1]
steps=np.array([x for x in range(len(video_timestamps))])
# print(i,filename,steps,video_timestamps)
step_minibatch.append(steps)
labels=[LABEL]*len(steps)
label_minibatch.append(labels)
vids=[vid_name]*len(steps)
video_minibatch+=vids
if (i + 1) % files_per_shard == 0 or i == len(filenames) - 1:
# if shard_id==2:
output_filename = os.path.join(
output_path,
'%s-%s-of-%s.npy' % (name,
str(shard_id).zfill(len_num_shards),
str(num_shards).zfill(len_num_shards)))
image_minibatch=np.concatenate(image_minibatch,axis=0)
step_minibatch=np.concatenate(step_minibatch,axis=0)
label_minibatch=np.concatenate(label_minibatch,axis=0)
numpy_dict={
'images':image_minibatch, # np.array: B*H*W*3
'activity':label_minibatch, # np.array: B*1
'steps':step_minibatch, # np.array:B*1
'videos':video_minibatch,# list
}
with open(output_filename,'wb') as file:
np.save(file,numpy_dict)
shard_id += 1
image_minibatch=list()
step_minibatch=list()
label_minibatch=list()
video_minibatch=list()
# print('-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+')
# print('shard_id',shard_id)
def main(_):
create_numpy(FLAGS.name, FLAGS.output_dir, FLAGS.input_dir,
FLAGS.label_file, FLAGS.file_pattern, FLAGS.files_per_shard,
FLAGS.action_label, FLAGS.frame_labels,
FLAGS.expected_segments, FLAGS.fps, FLAGS.rotate,
FLAGS.resize, FLAGS.width, FLAGS.height)
if __name__ == '__main__':
app.run(main)
|
[
"cv2.resize",
"numpy.save",
"os.path.basename",
"cv2.cvtColor",
"cv2.transpose",
"absl.flags.DEFINE_string",
"absl.logging.info",
"cv2.VideoCapture",
"absl.app.run",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_boolean",
"os.path.splitext",
"cv2.flip",
"absl.flags.DEFINE_list",
"os.path.join",
"numpy.concatenate"
] |
[((343, 411), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""acivity"""', '"""swing"""', '"""The class of test images."""'], {}), "('acivity', 'swing', 'The class of test images.')\n", (362, 411), False, 'from absl import flags\n'), ((412, 532), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""input_dir"""', '"""/media/felicia/Data/mlb-youtube/%s_videos/rm_noise/videos"""', '"""Path to videos."""'], {}), "('input_dir',\n '/media/felicia/Data/mlb-youtube/%s_videos/rm_noise/videos',\n 'Path to videos.')\n", (431, 532), False, 'from absl import flags\n'), ((525, 646), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""name"""', '"""image_bbgame_swing"""', '"""Name of the dataset being created. This willbe used as a prefix."""'], {}), "('name', 'image_bbgame_swing',\n 'Name of the dataset being created. This willbe used as a prefix.')\n", (544, 646), False, 'from absl import flags\n'), ((666, 772), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""file_pattern"""', '"""*.mp4"""', '"""Pattern used to searh for filesin the given directory."""'], {}), "('file_pattern', '*.mp4',\n 'Pattern used to searh for filesin the given directory.')\n", (685, 772), False, 'from absl import flags\n'), ((792, 950), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""label_file"""', 'None', '"""Provide a corresponding labels filethat stores per-frame or per-sequence labels. This infowill get stored."""'], {}), "('label_file', None,\n 'Provide a corresponding labels filethat stores per-frame or per-sequence labels. This infowill get stored.'\n )\n", (811, 950), False, 'from absl import flags\n'), ((988, 1129), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""', '"""/media/felicia/Data/object_detection/data/%s/"""', '"""Output directory wheretfrecords will be stored."""'], {}), "('output_dir',\n '/media/felicia/Data/object_detection/data/%s/',\n 'Output directory wheretfrecords will be stored.')\n", (1007, 1129), False, 'from absl import flags\n'), ((1145, 1232), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""files_per_shard"""', '(50)', '"""Number of videos to store in ashard."""'], {}), "('files_per_shard', 50,\n 'Number of videos to store in ashard.')\n", (1165, 1232), False, 'from absl import flags\n'), ((1253, 1350), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""rotate"""', '(False)', '"""Rotate videos by 90 degrees beforecreating tfrecords"""'], {}), "('rotate', False,\n 'Rotate videos by 90 degrees beforecreating tfrecords')\n", (1273, 1350), False, 'from absl import flags\n'), ((1371, 1441), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""resize"""', '(True)', '"""Resize videos to a given size."""'], {}), "('resize', True, 'Resize videos to a given size.')\n", (1391, 1441), False, 'from absl import flags\n'), ((1442, 1513), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""width"""', '(1280)', '"""Width of frames in the TFRecord."""'], {}), "('width', 1280, 'Width of frames in the TFRecord.')\n", (1462, 1513), False, 'from absl import flags\n'), ((1514, 1586), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""height"""', '(720)', '"""Height of frames in the TFRecord."""'], {}), "('height', 720, 'Height of frames in the TFRecord.')\n", (1534, 1586), False, 'from absl import flags\n'), ((1587, 1777), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""frame_labels"""', '""""""', '"""Comma separated list of descriptions for labels given on a per frame basis. For example: winding_up,early_cocking,acclerating,follow_through"""'], {}), "('frame_labels', '',\n 'Comma separated list of descriptions for labels given on a per frame basis. For example: winding_up,early_cocking,acclerating,follow_through'\n )\n", (1604, 1777), False, 'from absl import flags\n'), ((1788, 1858), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""action_label"""', '(0)', '"""Action label of all videos."""'], {}), "('action_label', 0, 'Action label of all videos.')\n", (1808, 1858), False, 'from absl import flags\n'), ((1902, 1979), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""expected_segments"""', '(-1)', '"""Expected number of segments."""'], {}), "('expected_segments', -1, 'Expected number of segments.')\n", (1922, 1979), False, 'from absl import flags\n'), ((1980, 2098), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""fps"""', '(10)', '"""Frames per second of video. If 0, fps will be read from metadata of video."""'], {}), "('fps', 10,\n 'Frames per second of video. If 0, fps will be read from metadata of video.'\n )\n", (2000, 2098), False, 'from absl import flags\n'), ((2743, 2785), 'absl.logging.info', 'logging.info', (['"""Loading %s"""', 'video_filename'], {}), "('Loading %s', video_filename)\n", (2755, 2785), False, 'from absl import logging\n'), ((2794, 2826), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_filename'], {}), '(video_filename)\n', (2810, 2826), False, 'import cv2\n'), ((8030, 8043), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (8037, 8043), False, 'from absl import app\n'), ((5117, 5175), 'absl.logging.info', 'logging.info', (['"""Creating output directory: %s"""', 'output_path'], {}), "('Creating output directory: %s', output_path)\n", (5129, 5175), False, 'from absl import logging\n'), ((5270, 5308), 'os.path.join', 'os.path.join', (['input_dir', 'input_pattern'], {}), '(input_dir, input_pattern)\n', (5282, 5308), False, 'import os\n'), ((5326, 5345), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (5342, 5345), False, 'import os\n'), ((5468, 5505), 'os.path.join', 'os.path.join', (['input_dir', 'file_pattern'], {}), '(input_dir, file_pattern)\n', (5480, 5505), False, 'import os\n'), ((6017, 6050), 'os.path.join', 'os.path.join', (['input_dir', 'filename'], {}), '(input_dir, filename)\n', (6029, 6050), False, 'import os\n'), ((6171, 6197), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (6187, 6197), False, 'import os\n'), ((6934, 6973), 'numpy.concatenate', 'np.concatenate', (['image_minibatch'], {'axis': '(0)'}), '(image_minibatch, axis=0)\n', (6948, 6973), True, 'import numpy as np\n'), ((6994, 7032), 'numpy.concatenate', 'np.concatenate', (['step_minibatch'], {'axis': '(0)'}), '(step_minibatch, axis=0)\n', (7008, 7032), True, 'import numpy as np\n'), ((7054, 7093), 'numpy.concatenate', 'np.concatenate', (['label_minibatch'], {'axis': '(0)'}), '(label_minibatch, axis=0)\n', (7068, 7093), True, 'import numpy as np\n'), ((3342, 3384), 'cv2.cvtColor', 'cv2.cvtColor', (['frame_bgr', 'cv2.COLOR_BGR2RGB'], {}), '(frame_bgr, cv2.COLOR_BGR2RGB)\n', (3354, 3384), False, 'import cv2\n'), ((5526, 5545), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (5542, 5545), False, 'import os\n'), ((7380, 7405), 'numpy.save', 'np.save', (['file', 'numpy_dict'], {}), '(file, numpy_dict)\n', (7387, 7405), True, 'import numpy as np\n'), ((3426, 3464), 'cv2.resize', 'cv2.resize', (['frame_rgb', '(width, height)'], {}), '(frame_rgb, (width, height))\n', (3436, 3464), False, 'import cv2\n'), ((3506, 3530), 'cv2.transpose', 'cv2.transpose', (['frame_rgb'], {}), '(frame_rgb)\n', (3519, 3530), False, 'import cv2\n'), ((3553, 3575), 'cv2.flip', 'cv2.flip', (['frame_rgb', '(1)'], {}), '(frame_rgb, 1)\n', (3561, 3575), False, 'import cv2\n')]
|
# we need to cross validate all the methods
import sys
# sys.path.append(r'C:\Leenoy\Postdoc 1st year\IBL\Code_camp_September_2019\data_code_camp\dim_red_WG\umap-master')
sys.path.append('/Users/dep/Workspaces/Rodent/IBL/ibllib')
sys.path.append('/Users/dep/Workspaces/Rodent/IBL/code_camp/ibl-dimensionality_reduction/')
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import numpy.ma as ma
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import Isomap, MDS, TSNE, LocallyLinearEmbedding
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# import umap
from sklearn.datasets import fetch_openml
import matplotlib.pyplot as plt
import seaborn as sns
import alf.io
from brainbox.processing import bincount2D
import ibllib.plots as iblplt
import dim_reduce
# define the path to the sessions we downloaded
# main_path = Path(r'C:\Leenoy\Postdoc 1st year\IBL\Code_camp_September_2019\data_code_camp')
main_path = Path('/Users/dep/Workspaces/Rodent/IBL/code_camp/data/')
SES = {
'A': main_path.joinpath(Path('ZM_1735/2019-08-01/001')), # RSC --> CA1 --> midbrain, good behavior, bad recroding
'B': main_path.joinpath(Path('ibl_witten_04_002/2019-08-04/002')), # visual cortex, good behavior, noisy recording
'C': main_path.joinpath(Path('ZM_1736/2019-08-09/004')), # left probe, bad behavior, good recording
'D': main_path.joinpath(Path('ibl_witten_04_001/2018-08-11/001')), # motor cortex, bad beahvior, good recording
'E': main_path.joinpath(Path('KS005/2019-08-29/001')), # activity in in red nucleaus, bad recording (serious lick artifacts and some units saturated)
# 'F': main_path.joinpath(Path('KS005/2019-08-30/001')), # too large, didnt download for now
}
# select a session from the bunch
sid = 'A'
ses_path = Path(SES[sid])
# read in the alf objects
alf_path = ses_path / 'alf'
spikes = alf.io.load_object(alf_path, 'spikes') #can be addressed as spikes['time'] or spikes.time
clusters = alf.io.load_object(alf_path, 'clusters')
channels = alf.io.load_object(alf_path, 'channels')
trials = alf.io.load_object(alf_path, '_ibl_trials')
wheel = alf.io.load_object(alf_path, '_ibl_wheel')
T_BIN = 0.1
# compute raster map as a function of cluster number
# R, times, clusters = bincount2D(spikes['times']/30000, spikes['clusters'], T_BIN)
## plot raster map
#plt.imshow(R, aspect='auto', cmap='binary', vmax=T_BIN / 0.001 / 4,
# extent=np.r_[times[[0, -1]], clusters[[0, -1]]], origin='lower')
## plot trial start and reward time
#reward = trials['feedback_times'][trials['feedbackType'] == 1]
#iblplt.vertical_lines(trials['intervals'][:, 0], ymin=0, ymax=clusters[-1],
# color='k', linewidth=0.5, label='trial starts')
#iblplt.vertical_lines(reward, ymin=0, ymax=clusters[-1], color='m', linewidth=0.5,
# label='valve openings')
#plt.xlabel('Time (s)')
#plt.ylabel('Cluster #')
#plt.legend()
### Playing with Isomap, PCA, TSNE, UMAP
plt.ion()
T_BIN = 0.01
def Isomap_colored(binned_data, trial_range, n_comps, n_neigh, behav_var):
# trial_range is an array of two numbers defining the range of concatenated trials
# Find first and last bin index for given trial
a = list(binned_data['trial_number'])
#first = a.index(trial_number)
#last = len(a) - 1 - a[::-1].index(trial_number)
first = a.index(trial_range[0])
last = len(a) - 1 - a[::-1].index(trial_range[1])
# load neural data and reduce dimensions
X = binned_data['summed_spike_amps'][:, first:last].T
Y = Isomap(n_components=n_comps, n_neighbors=n_neigh).fit_transform(X)
# color it with some other experimental parameter
x, y, z = np.split(Y, 3, axis=1)
fig = plt.figure()
ax = Axes3D(fig)
p = ax.scatter(x, y, z, s=40, alpha=0.25, c=abs(
binned_data[behav_var][first:last]), cmap='bwr')
fig.colorbar(p)
#plt.title("Isomap Alex's visual cortex --> hippocampus recording vs wheel speed")
#plt.title("Isomap Alex's motor cortex --> thalamus recording vs %s" %behav_var)
plt.title("Isomap Guido's RSC --> CA1 --> midbrain recording vs %s" %behav_var, fontsize=40)
# plt.scatter(Y.T[0,:],Y.T[1,:],s=1,alpha=0.9,c=D_trial['wheel_velocity'][trial])
plt.show()
def PCA_colored(binned_data, trial_range, n_comps, behav_var):
# trial_range is an array of two numbers defining the range of concatenated trials
# Find first and last bin index for given trial
a = list(binned_data['trial_number'])
#first = a.index(trial_number)
#last = len(a) - 1 - a[::-1].index(trial_number)
first = a.index(trial_range[0])
last = len(a) - 1 - a[::-1].index(trial_range[1])
# load neural data and reduce dimensions
X = binned_data['summed_spike_amps'][:, first:last].T
Y = PCA(n_components=n_comps, svd_solver='full').fit_transform(X)
# color it with some other experimental parameter
x, y, z = np.split(Y, 3, axis=1)
fig = plt.figure()
ax = Axes3D(fig)
p = ax.scatter(x, y, z, s=40, alpha=0.25, c=abs(
binned_data[behav_var][first:last]), cmap='ocean')
fig.colorbar(p)
#plt.title("PCA Alex's visual cortex --> hippocampus recording vs wheel speed")
#plt.title("PCA Alex's motor cortex --> thalamus recording vs %s" %behav_var)
plt.title("PCA Guido's RSC --> CA1 --> midbrain recording vs %s" %behav_var, fontsize=40)
# plt.scatter(Y.T[0,:],Y.T[1,:],s=1,alpha=0.9,c=D_trial['wheel_velocity'][trial])
plt.show()
def TSNE_colored(binned_data, trial_range, n_comps, perp, behav_var):
# trial_range is an array of two numbers defining the range of concatenated trials
# Find first and last bin index for given trial
a = list(binned_data['trial_number'])
#first = a.index(trial_number)
#last = len(a) - 1 - a[::-1].index(trial_number)
first = a.index(trial_range[0])
last = len(a) - 1 - a[::-1].index(trial_range[1])
# load neural data and reduce dimensions
X = binned_data['summed_spike_amps'][:, first:last].T
Y = TSNE(n_components=n_comps, perplexity=perp).fit_transform(X)
# color it with some other experimental parameter
x, y, z = np.split(Y, 3, axis=1)
fig = plt.figure()
ax = Axes3D(fig)
p = ax.scatter(x, y, z, s=40, alpha=0.25, c=abs(
binned_data[behav_var][first:last]), cmap='jet')
fig.colorbar(p)
#plt.title("TSNE Alex's visual cortex --> hippocampus recording vs wheel speed")
#plt.title("TSNE Alex's motor cortex --> thalamus recording vs %s" %behav_var)
plt.title("TSNE Guido's RSC --> CA1 --> midbrain recording vs %s" %behav_var, fontsize=40)
# plt.scatter(Y.T[0,:],Y.T[1,:],s=1,alpha=0.9,c=D_trial['wheel_velocity'][trial])
plt.show()
def FA_colored(binned_data, trial_range, n_comps, behav_var):
# trial_range is an array of two numbers defining the range of concatenated trials
# Find first and last bin index for given trial
a = list(binned_data['trial_number'])
#first = a.index(trial_number)
#last = len(a) - 1 - a[::-1].index(trial_number)
first = a.index(trial_range[0])
last = len(a) - 1 - a[::-1].index(trial_range[1])
# load neural data and reduce dimensions
X = binned_data['summed_spike_amps'][:, first:last].T
Y = FactorAnalysis(n_components=n_comps).fit_transform(X)
# color it with some other experimental parameter
x, y, z = np.split(Y, 3, axis=1)
fig = plt.figure()
ax = Axes3D(fig)
p = ax.scatter(x, y, z, s=40, alpha=0.25, c=abs(
binned_data[behav_var][first:last]), cmap='hsv')
fig.colorbar(p)
#plt.title("FA Alex's visual cortex --> hippocampus recording vs wheel speed")
#plt.title("FA Alex's motor cortex --> thalamus recording vs %s" %behav_var)
plt.title("Factor Analysis Guido's RSC --> CA1 --> midbrain recording vs %s" %behav_var, fontsize=40)
# plt.scatter(Y.T[0,:],Y.T[1,:],s=1,alpha=0.9,c=D_trial['wheel_velocity'][trial])
plt.show()
def LLE_colored(binned_data, trial_range, n_comps, n_neigh, behav_var):
# trial_range is an array of two numbers defining the range of concatenated trials
# Find first and last bin index for given trial
a = list(binned_data['trial_number'])
#first = a.index(trial_number)
#last = len(a) - 1 - a[::-1].index(trial_number)
first = a.index(trial_range[0])
last = len(a) - 1 - a[::-1].index(trial_range[1])
# load neural data and reduce dimensions
X = binned_data['summed_spike_amps'][:, first:last].T
Y = LocallyLinearEmbedding(n_components=n_comps, n_neighbors=n_neigh).fit_transform(X)
# color it with some other experimental parameter
x, y, z = np.split(Y, 3, axis=1)
fig = plt.figure()
ax = Axes3D(fig)
p = ax.scatter(x, y, z, s=40, alpha=0.25, c=abs(
binned_data[behav_var][first:last]), cmap='jet')
fig.colorbar(p)
#plt.title("LLE Alex's visual cortex --> hippocampus recording vs wheel speed")
#plt.title("LLE Alex's motor cortex --> thalamus recording vs %s" %behav_var)
plt.title("LLE Guido's RSC --> CA1 --> midbrain recording vs %s" %behav_var, fontsize=40)
# plt.scatter(Y.T[0,:],Y.T[1,:],s=1,alpha=0.9,c=D_trial['wheel_velocity'][trial])
plt.show()
def MDS_colored(binned_data, trial_range, n_comps, behav_var):
# trial_range is an array of two numbers defining the range of concatenated trials
# Find first and last bin index for given trial
a = list(binned_data['trial_number'])
#first = a.index(trial_number)
#last = len(a) - 1 - a[::-1].index(trial_number)
first = a.index(trial_range[0])
last = len(a) - 1 - a[::-1].index(trial_range[1])
# load neural data and reduce dimensions
X = binned_data['summed_spike_amps'][:, first:last].T
Y = MDS(n_components=n_comps).fit_transform(X)
# color it with some other experimental parameter
x, y, z = np.split(Y, 3, axis=1)
fig = plt.figure()
ax = Axes3D(fig)
p = ax.scatter(x, y, z, s=40, alpha=0.25, c=abs(
binned_data[behav_var][first:last]), cmap='jet')
fig.colorbar(p)
#plt.title("MDS Alex's visual cortex --> hippocampus recording vs wheel speed")
#plt.title("MDS Alex's motor cortex --> thalamus recording vs %s" %behav_var)
plt.title("MultiDimensional Scaling Guido's RSC --> CA1 --> midbrain recording vs %s" %behav_var, fontsize=40)
# plt.scatter(Y.T[0,:],Y.T[1,:],s=1,alpha=0.9,c=D_trial['wheel_velocity'][trial])
plt.show()
def UMAP_colored(binned_data, trial_range, n_comps, n_neigh, min_distance, behav_var):
# trial_range is an array of two numbers defining the range of concatenated trials
# Find first and last bin index for given trial
a = list(binned_data['trial_number'])
#first = a.index(trial_number)
#last = len(a) - 1 - a[::-1].index(trial_number)
first = a.index(trial_range[0])
last = len(a) - 1 - a[::-1].index(trial_range[1])
# load neural data and reduce dimensions
X = binned_data['summed_spike_amps'][:, first:last].T
Y = umap.UMAP(n_components=n_comps, n_neighbors=n_neigh, min_dist=min_distance).fit_transform(X)
# color it with some other experimental parameter
x, y, z = np.split(Y, 3, axis=1)
fig = plt.figure()
ax = Axes3D(fig)
p = ax.scatter(x, y, z, s=40, alpha=0.25, c=abs(
binned_data[behav_var][first:last]), cmap='ocean')
fig.colorbar(p)
#plt.title("UMAP Alex's visual cortex --> hippocampus recording vs wheel speed")
#plt.title("UMAP Alex's motor cortex --> thalamus recording vs %s" %behav_var)
plt.title("UMAP Guido's RSC --> CA1 --> midbrain recording vs %s" %behav_var, fontsize=40)
# plt.scatter(Y.T[0,:],Y.T[1,:],s=1,alpha=0.9,c=D_trial['wheel_velocity'][trial])
plt.show()
def LDA_colored(binned_data, n_comps, bounds):
# obs_limit=1000 #else it's too slow
low, high = bounds
X = binned_data['summed_spike_amps'][:, low:high].T
Y = LinearDiscriminantAnalysis(binned_data, 3).fit_transform(X)
x, y, z = np.split(Y,3, axis=1)
fig = plt.figure()
ax = Axes3D(fig)
p = ax.scatter(x, y, z, s=20, alpha=0.25, c=abs(binned_data['wheel_velocity'][low:high]), cmap='ocean')
fig.colorbar(p)
#plt.title("LDA Alex's visual cortex--> hippocamus recording vs wheel speed")
plt.title("LDA Alex's motor cortex --> thalamus recording vs wheel speed")
# plt.scatter(Y.T[0,:],Y.T[1,:],s=1,alpha=0.9,c=D_trial['wheel_velocity'][trial])
plt.show()
# lets run and plot
binned_data = dim_reduce.bin_types(spikes, trials, wheel, 0.2)
# binned_data.keys()
# behav_var takes the following options: 'choice'/ 'reward'/ 'wheel_velocity'/ 'wheel_position'
Isomap_colored(binned_data, [40, 90], 3, 5, 'choice') # second variable is range of trials, then number of components, then number of neighbors, last is behavioral variable
PCA_colored(binned_data, [40, 90], 3, 'wheel_velocity') # this is a PPCA implementation of PCA
TSNE_colored(binned_data, [40, 90], 3, 30.0, 'reward') # takes a long time # last variable here is perplexity. One should always run TSNE multiple times with perplexity ranging betoween 5 to 50 in order to decide on the best one
FA_colored(binned_data, [40, 90], 3, 'wheel_position')
LLE_colored(binned_data, [40, 90], 3, 30, 'wheel_velocity')
MDS_colored(binned_data, [40, 90], 3, 'choice')
#UMAP_colored(binned_data, [40, 90], 3, 30.0, 0.3, 'choice') # variable one before last is number of neighbors, last is minimum distance
#LDA_colored(binned_data, 3, (1,1000))
|
[
"sys.path.append",
"dim_reduce.bin_types",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"sklearn.manifold.TSNE",
"numpy.split",
"sklearn.manifold.LocallyLinearEmbedding",
"matplotlib.pyplot.ion",
"pathlib.Path",
"matplotlib.pyplot.figure",
"sklearn.manifold.Isomap",
"sklearn.decomposition.PCA",
"sklearn.decomposition.FactorAnalysis",
"sklearn.discriminant_analysis.LinearDiscriminantAnalysis",
"sklearn.manifold.MDS"
] |
[((176, 234), 'sys.path.append', 'sys.path.append', (['"""/Users/dep/Workspaces/Rodent/IBL/ibllib"""'], {}), "('/Users/dep/Workspaces/Rodent/IBL/ibllib')\n", (191, 234), False, 'import sys\n'), ((236, 332), 'sys.path.append', 'sys.path.append', (['"""/Users/dep/Workspaces/Rodent/IBL/code_camp/ibl-dimensionality_reduction/"""'], {}), "(\n '/Users/dep/Workspaces/Rodent/IBL/code_camp/ibl-dimensionality_reduction/')\n", (251, 332), False, 'import sys\n'), ((1115, 1171), 'pathlib.Path', 'Path', (['"""/Users/dep/Workspaces/Rodent/IBL/code_camp/data/"""'], {}), "('/Users/dep/Workspaces/Rodent/IBL/code_camp/data/')\n", (1119, 1171), False, 'from pathlib import Path\n'), ((1959, 1973), 'pathlib.Path', 'Path', (['SES[sid]'], {}), '(SES[sid])\n', (1963, 1973), False, 'from pathlib import Path\n'), ((3170, 3179), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3177, 3179), True, 'import matplotlib.pyplot as plt\n'), ((12878, 12926), 'dim_reduce.bin_types', 'dim_reduce.bin_types', (['spikes', 'trials', 'wheel', '(0.2)'], {}), '(spikes, trials, wheel, 0.2)\n', (12898, 12926), False, 'import dim_reduce\n'), ((3899, 3921), 'numpy.split', 'np.split', (['Y', '(3)'], {'axis': '(1)'}), '(Y, 3, axis=1)\n', (3907, 3921), True, 'import numpy as np\n'), ((3933, 3945), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3943, 3945), True, 'import matplotlib.pyplot as plt\n'), ((3956, 3967), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (3962, 3967), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((4280, 4377), 'matplotlib.pyplot.title', 'plt.title', (['("Isomap Guido\'s RSC --> CA1 --> midbrain recording vs %s" % behav_var)'], {'fontsize': '(40)'}), '("Isomap Guido\'s RSC --> CA1 --> midbrain recording vs %s" %\n behav_var, fontsize=40)\n', (4289, 4377), True, 'import matplotlib.pyplot as plt\n'), ((4465, 4475), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4473, 4475), True, 'import matplotlib.pyplot as plt\n'), ((5162, 5184), 'numpy.split', 'np.split', (['Y', '(3)'], {'axis': '(1)'}), '(Y, 3, axis=1)\n', (5170, 5184), True, 'import numpy as np\n'), ((5196, 5208), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5206, 5208), True, 'import matplotlib.pyplot as plt\n'), ((5219, 5230), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (5225, 5230), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((5539, 5633), 'matplotlib.pyplot.title', 'plt.title', (['("PCA Guido\'s RSC --> CA1 --> midbrain recording vs %s" % behav_var)'], {'fontsize': '(40)'}), '("PCA Guido\'s RSC --> CA1 --> midbrain recording vs %s" %\n behav_var, fontsize=40)\n', (5548, 5633), True, 'import matplotlib.pyplot as plt\n'), ((5721, 5731), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5729, 5731), True, 'import matplotlib.pyplot as plt\n'), ((6426, 6448), 'numpy.split', 'np.split', (['Y', '(3)'], {'axis': '(1)'}), '(Y, 3, axis=1)\n', (6434, 6448), True, 'import numpy as np\n'), ((6460, 6472), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6470, 6472), True, 'import matplotlib.pyplot as plt\n'), ((6483, 6494), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (6489, 6494), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((6803, 6898), 'matplotlib.pyplot.title', 'plt.title', (['("TSNE Guido\'s RSC --> CA1 --> midbrain recording vs %s" % behav_var)'], {'fontsize': '(40)'}), '("TSNE Guido\'s RSC --> CA1 --> midbrain recording vs %s" %\n behav_var, fontsize=40)\n', (6812, 6898), True, 'import matplotlib.pyplot as plt\n'), ((6986, 6996), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6994, 6996), True, 'import matplotlib.pyplot as plt\n'), ((7676, 7698), 'numpy.split', 'np.split', (['Y', '(3)'], {'axis': '(1)'}), '(Y, 3, axis=1)\n', (7684, 7698), True, 'import numpy as np\n'), ((7710, 7722), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7720, 7722), True, 'import matplotlib.pyplot as plt\n'), ((7733, 7744), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (7739, 7744), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((8049, 8160), 'matplotlib.pyplot.title', 'plt.title', (['("Factor Analysis Guido\'s RSC --> CA1 --> midbrain recording vs %s" % behav_var\n )'], {'fontsize': '(40)'}), '(\n "Factor Analysis Guido\'s RSC --> CA1 --> midbrain recording vs %s" %\n behav_var, fontsize=40)\n', (8058, 8160), True, 'import matplotlib.pyplot as plt\n'), ((8243, 8253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8251, 8253), True, 'import matplotlib.pyplot as plt\n'), ((8972, 8994), 'numpy.split', 'np.split', (['Y', '(3)'], {'axis': '(1)'}), '(Y, 3, axis=1)\n', (8980, 8994), True, 'import numpy as np\n'), ((9006, 9018), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9016, 9018), True, 'import matplotlib.pyplot as plt\n'), ((9029, 9040), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (9035, 9040), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((9347, 9441), 'matplotlib.pyplot.title', 'plt.title', (['("LLE Guido\'s RSC --> CA1 --> midbrain recording vs %s" % behav_var)'], {'fontsize': '(40)'}), '("LLE Guido\'s RSC --> CA1 --> midbrain recording vs %s" %\n behav_var, fontsize=40)\n', (9356, 9441), True, 'import matplotlib.pyplot as plt\n'), ((9529, 9539), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9537, 9539), True, 'import matplotlib.pyplot as plt\n'), ((10207, 10229), 'numpy.split', 'np.split', (['Y', '(3)'], {'axis': '(1)'}), '(Y, 3, axis=1)\n', (10215, 10229), True, 'import numpy as np\n'), ((10241, 10253), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10251, 10253), True, 'import matplotlib.pyplot as plt\n'), ((10264, 10275), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (10270, 10275), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((10582, 10703), 'matplotlib.pyplot.title', 'plt.title', (['("MultiDimensional Scaling Guido\'s RSC --> CA1 --> midbrain recording vs %s" %\n behav_var)'], {'fontsize': '(40)'}), '(\n "MultiDimensional Scaling Guido\'s RSC --> CA1 --> midbrain recording vs %s"\n % behav_var, fontsize=40)\n', (10591, 10703), True, 'import matplotlib.pyplot as plt\n'), ((10785, 10795), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10793, 10795), True, 'import matplotlib.pyplot as plt\n'), ((11541, 11563), 'numpy.split', 'np.split', (['Y', '(3)'], {'axis': '(1)'}), '(Y, 3, axis=1)\n', (11549, 11563), True, 'import numpy as np\n'), ((11575, 11587), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11585, 11587), True, 'import matplotlib.pyplot as plt\n'), ((11598, 11609), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (11604, 11609), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((11920, 12015), 'matplotlib.pyplot.title', 'plt.title', (['("UMAP Guido\'s RSC --> CA1 --> midbrain recording vs %s" % behav_var)'], {'fontsize': '(40)'}), '("UMAP Guido\'s RSC --> CA1 --> midbrain recording vs %s" %\n behav_var, fontsize=40)\n', (11929, 12015), True, 'import matplotlib.pyplot as plt\n'), ((12103, 12113), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12111, 12113), True, 'import matplotlib.pyplot as plt\n'), ((12371, 12393), 'numpy.split', 'np.split', (['Y', '(3)'], {'axis': '(1)'}), '(Y, 3, axis=1)\n', (12379, 12393), True, 'import numpy as np\n'), ((12404, 12416), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12414, 12416), True, 'import matplotlib.pyplot as plt\n'), ((12427, 12438), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (12433, 12438), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((12657, 12731), 'matplotlib.pyplot.title', 'plt.title', (['"""LDA Alex\'s motor cortex --> thalamus recording vs wheel speed"""'], {}), '("LDA Alex\'s motor cortex --> thalamus recording vs wheel speed")\n', (12666, 12731), True, 'import matplotlib.pyplot as plt\n'), ((12824, 12834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12832, 12834), True, 'import matplotlib.pyplot as plt\n'), ((1210, 1240), 'pathlib.Path', 'Path', (['"""ZM_1735/2019-08-01/001"""'], {}), "('ZM_1735/2019-08-01/001')\n", (1214, 1240), False, 'from pathlib import Path\n'), ((1329, 1369), 'pathlib.Path', 'Path', (['"""ibl_witten_04_002/2019-08-04/002"""'], {}), "('ibl_witten_04_002/2019-08-04/002')\n", (1333, 1369), False, 'from pathlib import Path\n'), ((1449, 1479), 'pathlib.Path', 'Path', (['"""ZM_1736/2019-08-09/004"""'], {}), "('ZM_1736/2019-08-09/004')\n", (1453, 1479), False, 'from pathlib import Path\n'), ((1555, 1595), 'pathlib.Path', 'Path', (['"""ibl_witten_04_001/2018-08-11/001"""'], {}), "('ibl_witten_04_001/2018-08-11/001')\n", (1559, 1595), False, 'from pathlib import Path\n'), ((1672, 1700), 'pathlib.Path', 'Path', (['"""KS005/2019-08-29/001"""'], {}), "('KS005/2019-08-29/001')\n", (1676, 1700), False, 'from pathlib import Path\n'), ((3760, 3809), 'sklearn.manifold.Isomap', 'Isomap', ([], {'n_components': 'n_comps', 'n_neighbors': 'n_neigh'}), '(n_components=n_comps, n_neighbors=n_neigh)\n', (3766, 3809), False, 'from sklearn.manifold import Isomap, MDS, TSNE, LocallyLinearEmbedding\n'), ((5030, 5074), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_comps', 'svd_solver': '"""full"""'}), "(n_components=n_comps, svd_solver='full')\n", (5033, 5074), False, 'from sklearn.decomposition import PCA, FactorAnalysis\n'), ((6295, 6338), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': 'n_comps', 'perplexity': 'perp'}), '(n_components=n_comps, perplexity=perp)\n', (6299, 6338), False, 'from sklearn.manifold import Isomap, MDS, TSNE, LocallyLinearEmbedding\n'), ((7552, 7588), 'sklearn.decomposition.FactorAnalysis', 'FactorAnalysis', ([], {'n_components': 'n_comps'}), '(n_components=n_comps)\n', (7566, 7588), False, 'from sklearn.decomposition import PCA, FactorAnalysis\n'), ((8819, 8884), 'sklearn.manifold.LocallyLinearEmbedding', 'LocallyLinearEmbedding', ([], {'n_components': 'n_comps', 'n_neighbors': 'n_neigh'}), '(n_components=n_comps, n_neighbors=n_neigh)\n', (8841, 8884), False, 'from sklearn.manifold import Isomap, MDS, TSNE, LocallyLinearEmbedding\n'), ((10094, 10119), 'sklearn.manifold.MDS', 'MDS', ([], {'n_components': 'n_comps'}), '(n_components=n_comps)\n', (10097, 10119), False, 'from sklearn.manifold import Isomap, MDS, TSNE, LocallyLinearEmbedding\n'), ((12296, 12338), 'sklearn.discriminant_analysis.LinearDiscriminantAnalysis', 'LinearDiscriminantAnalysis', (['binned_data', '(3)'], {}), '(binned_data, 3)\n', (12322, 12338), False, 'from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n')]
|
import numpy as np
from sklearn import model_selection
from sklearn import datasets
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import seaborn as sns
class Friedman1Test:
"""This class encapsulates the Friedman1 regression test for feature selection
"""
VALIDATION_SIZE = 0.20
NOISE = 1.0
def __init__(self, numFeatures, numSamples, randomSeed):
"""
:param numFeatures: total number of features to be used (at least 5)
:param numSamples: number of samples in dataset
:param randomSeed: random seed value used for reproducible results
"""
self.numFeatures = numFeatures
self.numSamples = numSamples
self.randomSeed = randomSeed
# generate test data:
self.X, self.y = datasets.make_friedman1(n_samples=self.numSamples, n_features=self.numFeatures,
noise=self.NOISE, random_state=self.randomSeed)
# divide the data to a training set and a validation set:
self.X_train, self.X_validation, self.y_train, self.y_validation = \
model_selection.train_test_split(self.X, self.y, test_size=self.VALIDATION_SIZE, random_state=self.randomSeed)
self.regressor = GradientBoostingRegressor(random_state=self.randomSeed)
def __len__(self):
"""
:return: the total number of features
"""
return self.numFeatures
def getMSE(self, zeroOneList):
"""
returns the mean squared error of the regressor, calculated for the validation set, after training
using the features selected by the zeroOneList
:param zeroOneList: a list of binary values corresponding the features in the dataset. A value of '1'
represents selecting the corresponding feature, while a value of '0' means that the feature is dropped.
:return: the mean squared error of the regressor when using the features selected by the zeroOneList
"""
# drop the columns of the training and validation sets that correspond to the
# unselected features:
zeroIndices = [i for i, n in enumerate(zeroOneList) if n == 0]
currentX_train = np.delete(self.X_train, zeroIndices, 1)
currentX_validation = np.delete(self.X_validation, zeroIndices, 1)
# train the regression model using th etraining set:
self.regressor.fit(currentX_train, self.y_train)
# calculate the regressor's output for the validation set:
prediction = self.regressor.predict(currentX_validation)
# return the mean square error of predicition vs actual data:
return mean_squared_error(self.y_validation, prediction)
# testing the class:
def main():
# create a test instance:
test = Friedman1Test(numFeatures=15, numSamples=60, randomSeed=42)
scores = []
# calculate MSE for 'n' first features:
for n in range(1, len(test) + 1):
nFirstFeatures = [1] * n + [0] * (len(test) - n)
score = test.getMSE(nFirstFeatures)
print("%d first features: score = %f" % (n, score))
scores.append(score)
# plot graph:
sns.set_style("whitegrid")
plt.plot([i + 1 for i in range(len(test))], scores, color='red')
plt.xticks(np.arange(1, len(test) + 1, 1.0))
plt.xlabel('n First Features')
plt.ylabel('MSE')
plt.title('MSE over Features Selected')
plt.show()
if __name__ == "__main__":
main()
|
[
"matplotlib.pyplot.title",
"seaborn.set_style",
"matplotlib.pyplot.show",
"sklearn.datasets.make_friedman1",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.GradientBoostingRegressor",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.delete",
"sklearn.metrics.mean_squared_error"
] |
[((3234, 3260), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (3247, 3260), True, 'import seaborn as sns\n'), ((3383, 3413), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""n First Features"""'], {}), "('n First Features')\n", (3393, 3413), True, 'import matplotlib.pyplot as plt\n'), ((3418, 3435), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""MSE"""'], {}), "('MSE')\n", (3428, 3435), True, 'import matplotlib.pyplot as plt\n'), ((3440, 3479), 'matplotlib.pyplot.title', 'plt.title', (['"""MSE over Features Selected"""'], {}), "('MSE over Features Selected')\n", (3449, 3479), True, 'import matplotlib.pyplot as plt\n'), ((3484, 3494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3492, 3494), True, 'import matplotlib.pyplot as plt\n'), ((863, 995), 'sklearn.datasets.make_friedman1', 'datasets.make_friedman1', ([], {'n_samples': 'self.numSamples', 'n_features': 'self.numFeatures', 'noise': 'self.NOISE', 'random_state': 'self.randomSeed'}), '(n_samples=self.numSamples, n_features=self.\n numFeatures, noise=self.NOISE, random_state=self.randomSeed)\n', (886, 995), False, 'from sklearn import datasets\n'), ((1196, 1311), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['self.X', 'self.y'], {'test_size': 'self.VALIDATION_SIZE', 'random_state': 'self.randomSeed'}), '(self.X, self.y, test_size=self.\n VALIDATION_SIZE, random_state=self.randomSeed)\n', (1228, 1311), False, 'from sklearn import model_selection\n'), ((1333, 1388), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'random_state': 'self.randomSeed'}), '(random_state=self.randomSeed)\n', (1358, 1388), False, 'from sklearn.ensemble import GradientBoostingRegressor\n'), ((2283, 2322), 'numpy.delete', 'np.delete', (['self.X_train', 'zeroIndices', '(1)'], {}), '(self.X_train, zeroIndices, 1)\n', (2292, 2322), True, 'import numpy as np\n'), ((2353, 2397), 'numpy.delete', 'np.delete', (['self.X_validation', 'zeroIndices', '(1)'], {}), '(self.X_validation, zeroIndices, 1)\n', (2362, 2397), True, 'import numpy as np\n'), ((2736, 2785), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['self.y_validation', 'prediction'], {}), '(self.y_validation, prediction)\n', (2754, 2785), False, 'from sklearn.metrics import mean_squared_error\n')]
|
import numpy as np
def TAPOHardLaw(r, tau0, q, b, H):
sigma = tau0 + q * (1 - np.exp(-b * r)) + H * r
return sigma
r = np.arange(0, 0.2, 0.01)
tau0 = np.average([22.3, 24.88, 23.92])
q = np.average([7.76, 6.19, 6.23])
b = np.average([40.59, 49.48, 47.46])
H = np.average([8.11, 6.82, 13.58])
stress = TAPOHardLaw(r, tau0, q, b, H)
import pandas as pd
df = pd.DataFrame(np.vstack([r, stress]), ['r', 'sigma'])
df.to_csv('hard.csv')
|
[
"numpy.average",
"numpy.arange",
"numpy.exp",
"numpy.vstack"
] |
[((130, 153), 'numpy.arange', 'np.arange', (['(0)', '(0.2)', '(0.01)'], {}), '(0, 0.2, 0.01)\n', (139, 153), True, 'import numpy as np\n'), ((161, 193), 'numpy.average', 'np.average', (['[22.3, 24.88, 23.92]'], {}), '([22.3, 24.88, 23.92])\n', (171, 193), True, 'import numpy as np\n'), ((198, 228), 'numpy.average', 'np.average', (['[7.76, 6.19, 6.23]'], {}), '([7.76, 6.19, 6.23])\n', (208, 228), True, 'import numpy as np\n'), ((233, 266), 'numpy.average', 'np.average', (['[40.59, 49.48, 47.46]'], {}), '([40.59, 49.48, 47.46])\n', (243, 266), True, 'import numpy as np\n'), ((271, 302), 'numpy.average', 'np.average', (['[8.11, 6.82, 13.58]'], {}), '([8.11, 6.82, 13.58])\n', (281, 302), True, 'import numpy as np\n'), ((382, 404), 'numpy.vstack', 'np.vstack', (['[r, stress]'], {}), '([r, stress])\n', (391, 404), True, 'import numpy as np\n'), ((83, 97), 'numpy.exp', 'np.exp', (['(-b * r)'], {}), '(-b * r)\n', (89, 97), True, 'import numpy as np\n')]
|
import os
import numpy as np
from ReconstructOrder.utils import mManagerReader
from ReconstructOrder.utils import imBitConvert
if __name__ == '__main__':
RawDataPath = '/flexo/ComputationalMicroscopy/Projects/brainarchitecture'
ProcessedPath = RawDataPath
ImgDir = '2019_01_04_david_594CTIP2_647SATB2_20X'
SmDir = 'SMS_2019_0104_1257_2_SMS_2019_0104_1257_2'
input_chan = ['Orientation']
output_chan = ['Orientation_x', 'Orientation_y']
img_sm_path = os.path.join(RawDataPath, ImgDir, SmDir) # Sample image folder path, of form 'SM_yyyy_mmdd_hhmm_X'
OutputPath = img_sm_path
img_io = mManagerReader(img_sm_path, OutputPath, input_chan=input_chan, output_chan=output_chan)
for t_idx in range(img_io.n_time):
img_io.t_idx = t_idx
for pos_idx in range(img_io.n_pos): # nXY
img_io.pos_idx = pos_idx
for z_idx in range(img_io.n_z):
print('Processing position %03d, time %03d z %03d ...' % (pos_idx, t_idx, z_idx))
img_io.z_idx = z_idx
img_io.chan_idx = 0
azimuth_degree = img_io.read_img()
azimuth = azimuth_degree/18000*np.pi
azimuth_imgs = [np.cos(2 * azimuth), np.sin(2 * azimuth)]
azimuth_imgs = [imBitConvert((img + 1) * 1000, bit=16) for img in azimuth_imgs] # scale to [0, 1000]
for chan_idx, image in enumerate(azimuth_imgs):
img_io.chan_idx = chan_idx
img_io.write_img(image)
|
[
"ReconstructOrder.utils.mManagerReader",
"numpy.sin",
"ReconstructOrder.utils.imBitConvert",
"numpy.cos",
"os.path.join"
] |
[((479, 519), 'os.path.join', 'os.path.join', (['RawDataPath', 'ImgDir', 'SmDir'], {}), '(RawDataPath, ImgDir, SmDir)\n', (491, 519), False, 'import os\n'), ((621, 713), 'ReconstructOrder.utils.mManagerReader', 'mManagerReader', (['img_sm_path', 'OutputPath'], {'input_chan': 'input_chan', 'output_chan': 'output_chan'}), '(img_sm_path, OutputPath, input_chan=input_chan, output_chan=\n output_chan)\n', (635, 713), False, 'from ReconstructOrder.utils import mManagerReader\n'), ((1217, 1236), 'numpy.cos', 'np.cos', (['(2 * azimuth)'], {}), '(2 * azimuth)\n', (1223, 1236), True, 'import numpy as np\n'), ((1238, 1257), 'numpy.sin', 'np.sin', (['(2 * azimuth)'], {}), '(2 * azimuth)\n', (1244, 1257), True, 'import numpy as np\n'), ((1291, 1329), 'ReconstructOrder.utils.imBitConvert', 'imBitConvert', (['((img + 1) * 1000)'], {'bit': '(16)'}), '((img + 1) * 1000, bit=16)\n', (1303, 1329), False, 'from ReconstructOrder.utils import imBitConvert\n')]
|
#! /usr/bin/python2
# -*- coding: utf-8 -*-
# Using AC for the construction of galaxies
import numpy as np
import scipy.stats.distributions as dis
from mayavi import mlab
from itertools import repeat, izip, ifilter
class Star(object):
"Clase para definir una Estrella"""
def __init__(self, r, angle, state=0):
"""Constructor de estrellas """
self.r = r
self.angle = angle
self.state = state
class Galaxy(object):
"""Clase galaxia"""
def __init__(self, radio, neighborhood):
"""Initialize the galaxy with some radius
and the chosen neighborhood """
self.distributionFuncStars = dis.rv_discrete(name='custm',
values=[(0, 1),
(0.84, 0.16)])
self.distributionGas = dis.rv_discrete(name='custm_gas',
values=[(0, 1), (0.3, 0.7)])
stars = []
stars.append(Star(0, neighborhood[0],
self.distributionFuncStars.rvs()))
try:
for r in np.arange(1, radio + 1, 0.5): # para regular el tamaño de la celda
map(lambda a: stars.append(Star(a[1], a[0] / float(a[1]),
self.distributionFuncStars.rvs())), izip(neighborhood,
repeat(r, len(neighborhood))))
self.stars = stars
except e:
print(e)
def birthFunction(self, star):
"""Initialize one star"""
star[0].state = self.distributionFuncStars.rvs()
def growthFunction(self, star):
"""Increases growth of each star"""
if star.state > 8:
star.state = 0
star.state += 1
try:
# set angular velocity
star.angle = star.angle + 1 / float(star.r)
except ZeroDivisionError:
# just for save the day :)
star.angle = star.angle + 1
def scanning(self):
"""See state of stars and change formations"""
for s in self.stars:
localNeighborhood = ifilter(lambda star: star[1] == star[0].r + 1,
izip(self.stars,
repeat(s.r, len(self.stars))))
map(self.birthFunction, localNeighborhood)
# aqui envejecemos la galaxia
map(self.growthFunction, self.stars)
def plottingStars(self, colors):
"""For plotting the stars"""
x = []
y = []
activeStars = filter(lambda s: s.state != 0, self.stars)
map(lambda star: x.append(float(star.r * np.cos(star.angle))),
activeStars)
map(lambda star: y.append(float(star.r * np.sin(star.angle))),
activeStars)
return mlab.points3d(x, y, x, resolution=20, scale_factor=.5,
scale_mode='none')
def countOfActiveStars(self):
"""Show counter of stars"""
activeStars = ifilter(lambda s: s.state != 0, self.stars)
noActiveStars = ifilter(lambda s: s.state == 0, self.stars)
return len(list(activeStars)), len(list(noActiveStars))
def plottingInterstellarGas(self):
"""Insert randomness in the production of the galaxy """
x = []
y = []
activeGas = filter(lambda s: self.distributionGas.rvs() == 1,
self.stars)
map(lambda star: x.append(float(star.r * np.cos(star.angle))),
activeGas)
map(lambda star: y.append(float(star.r * np.sin(star.angle))),
activeGas)
return mlab.points3d(x, y, x, color=(0.8, 0.5, 0), resolution=20,
scale_factor=.5, scale_mode='none')
if __name__ == "__main__":
neighborhood = [0, 60, 120, 180, 240, 330]
colors = {1: (1, 1, 0.9), 2: (1, 1, 0.6), 3: (1, 1, 0.4), 4: (1, 1, 0.2),
5: (1, 1, 0), 6: (1, 0.8, 0), 7: (1, 0.5, 0), 8: (1, 0.2, 0),
9: (1, 0, 0)}
myGalaxy = Galaxy(50, neighborhood)
print("al comienzo tenemos {}".format(myGalaxy.countOfActiveStars()))
for t in range(40):
myGalaxy.scanning()
points = myGalaxy.plottingStars(colors)
otherpoints = myGalaxy.plottingInterstellarGas()
print("al final tenemos {}".format(myGalaxy.countOfActiveStars()))
mlab.pipeline.volume(mlab.pipeline.gaussian_splatter(points))
mlab.pipeline.volume(mlab.pipeline.gaussian_splatter(otherpoints))
mlab.view(49, 31.5, 52.8, (4.2, 37.3, 20.6))
mlab.show()
|
[
"itertools.ifilter",
"mayavi.mlab.show",
"mayavi.mlab.points3d",
"scipy.stats.distributions.rv_discrete",
"numpy.sin",
"numpy.arange",
"numpy.cos",
"mayavi.mlab.view",
"mayavi.mlab.pipeline.gaussian_splatter"
] |
[((4493, 4537), 'mayavi.mlab.view', 'mlab.view', (['(49)', '(31.5)', '(52.8)', '(4.2, 37.3, 20.6)'], {}), '(49, 31.5, 52.8, (4.2, 37.3, 20.6))\n', (4502, 4537), False, 'from mayavi import mlab\n'), ((4542, 4553), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (4551, 4553), False, 'from mayavi import mlab\n'), ((659, 719), 'scipy.stats.distributions.rv_discrete', 'dis.rv_discrete', ([], {'name': '"""custm"""', 'values': '[(0, 1), (0.84, 0.16)]'}), "(name='custm', values=[(0, 1), (0.84, 0.16)])\n", (674, 719), True, 'import scipy.stats.distributions as dis\n'), ((865, 927), 'scipy.stats.distributions.rv_discrete', 'dis.rv_discrete', ([], {'name': '"""custm_gas"""', 'values': '[(0, 1), (0.3, 0.7)]'}), "(name='custm_gas', values=[(0, 1), (0.3, 0.7)])\n", (880, 927), True, 'import scipy.stats.distributions as dis\n'), ((2811, 2885), 'mayavi.mlab.points3d', 'mlab.points3d', (['x', 'y', 'x'], {'resolution': '(20)', 'scale_factor': '(0.5)', 'scale_mode': '"""none"""'}), "(x, y, x, resolution=20, scale_factor=0.5, scale_mode='none')\n", (2824, 2885), False, 'from mayavi import mlab\n'), ((3007, 3050), 'itertools.ifilter', 'ifilter', (['(lambda s: s.state != 0)', 'self.stars'], {}), '(lambda s: s.state != 0, self.stars)\n', (3014, 3050), False, 'from itertools import repeat, izip, ifilter\n'), ((3075, 3118), 'itertools.ifilter', 'ifilter', (['(lambda s: s.state == 0)', 'self.stars'], {}), '(lambda s: s.state == 0, self.stars)\n', (3082, 3118), False, 'from itertools import repeat, izip, ifilter\n'), ((3634, 3733), 'mayavi.mlab.points3d', 'mlab.points3d', (['x', 'y', 'x'], {'color': '(0.8, 0.5, 0)', 'resolution': '(20)', 'scale_factor': '(0.5)', 'scale_mode': '"""none"""'}), "(x, y, x, color=(0.8, 0.5, 0), resolution=20, scale_factor=0.5,\n scale_mode='none')\n", (3647, 3733), False, 'from mayavi import mlab\n'), ((4377, 4416), 'mayavi.mlab.pipeline.gaussian_splatter', 'mlab.pipeline.gaussian_splatter', (['points'], {}), '(points)\n', (4408, 4416), False, 'from mayavi import mlab\n'), ((4443, 4487), 'mayavi.mlab.pipeline.gaussian_splatter', 'mlab.pipeline.gaussian_splatter', (['otherpoints'], {}), '(otherpoints)\n', (4474, 4487), False, 'from mayavi import mlab\n'), ((1136, 1164), 'numpy.arange', 'np.arange', (['(1)', '(radio + 1)', '(0.5)'], {}), '(1, radio + 1, 0.5)\n', (1145, 1164), True, 'import numpy as np\n'), ((2652, 2670), 'numpy.cos', 'np.cos', (['star.angle'], {}), '(star.angle)\n', (2658, 2670), True, 'import numpy as np\n'), ((2748, 2766), 'numpy.sin', 'np.sin', (['star.angle'], {}), '(star.angle)\n', (2754, 2766), True, 'import numpy as np\n'), ((3479, 3497), 'numpy.cos', 'np.cos', (['star.angle'], {}), '(star.angle)\n', (3485, 3497), True, 'import numpy as np\n'), ((3573, 3591), 'numpy.sin', 'np.sin', (['star.angle'], {}), '(star.angle)\n', (3579, 3591), True, 'import numpy as np\n')]
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import Union, Callable, Dict, TypeVar, Optional, List
import itertools
import tensorflow as tf
import numpy as np
TF_EXECUTABLE_ND_ARRAY = TypeVar('TF_EXECUTABLE_ND_ARRAY', tf.Tensor,
np.ndarray)
def interpolate(x: tf.Tensor, y: tf.Tensor, beta: Union[tf.Tensor,
float]) -> tf.Tensor:
beta = tf.convert_to_tensor(beta)
with tf.compat.v1.name_scope('Interpolation', values=[x, y, beta]):
return x + (y - x) * beta
def regular_grid_sample(feat: TF_EXECUTABLE_ND_ARRAY,
pts: TF_EXECUTABLE_ND_ARRAY,
name: str = 'regular_grid_sample'):
"""
This is a regular grid sampling method to receive the N-D indices of points, returning the corresponding features on
these points
Args:
feat: a `float32` Tensor with shape [1, ..., N, C], the sampled feature map
pts: a `int32` Tensor with shape [M, N], the indices map to sample on `feat`
name: operation name
Returns:
a `float32` Tensor with shape [M, C]
"""
feat = tf.convert_to_tensor(feat)
pts = tf.convert_to_tensor(pts)
batch_dims = len(feat.shape) - pts.shape.as_list()[-1] - 1
with tf.name_scope(name=name):
return tf.gather_nd(feat, pts, batch_dims=batch_dims)
def floor_and_ceiling(x):
floor = tf.floor(x)
ceiling = floor + 1
return floor, ceiling
def expand_stack_indices(indices):
expand_list = list([[]])
for idx in indices:
tmp_list = list()
tmp_floor, tmp_ceil = floor_and_ceiling(idx)
for e_l in expand_list:
tmp_list.append(e_l + [tmp_floor])
tmp_list.append(e_l + [tmp_ceil])
expand_list = list(tmp_list)
expand_list = [tf.stack(e, axis=-1) for e in expand_list]
return expand_list
def linear_nd_interpolate(x: TF_EXECUTABLE_ND_ARRAY,
s_pts: TF_EXECUTABLE_ND_ARRAY,
s_pts_scale: int = 1,
s_func: Optional[Callable[..., tf.Tensor]] = None,
s_func_kargs: Optional[Dict] = None,
name: str = 'linear_nd_interpolate'):
"""
This is a nd-linear interpolation function to support regular grid (with default `s_func`) and other irregular
ND-structure (with customize `s_func`).
Args:
x: a `float32` Tensor with an arbitrary shape (should be parsed by `s_func`) but given channels [..., C]
s_pts: a `float32` Tensor with shape [M, N + 1], where M is the total points size and 3 indicates the batch id,
ND-positions, respectively.
s_pts_scale: scale of s_pts
s_func: a function receives a feature map `x` and a set of points location as inputs, returning the exactly
features in these points
s_func_kargs: a dict to save the extra parameters of `s_func`
name: operation name
Returns:
a `float32` Tensor with shape [M, C]
"""
if s_func is None:
s_func = regular_grid_sample
if s_func_kargs is None:
s_func_kargs = dict()
x = tf.convert_to_tensor(x)
s_pts = tf.convert_to_tensor(s_pts)
with tf.name_scope(name):
indices = tf.unstack(s_pts, axis=-1)
indices[1:] = [pts * s_pts_scale for pts in indices[1:]]
expanded_indices = expand_stack_indices(indices[1:])
center = tf.stack(indices[1:], axis=-1)
def _acquire_feat(sample_):
feat_ = s_func(x, tf.cast(tf.concat([indices[0][..., tf.newaxis], sample_], axis=-1), tf.int32),
**s_func_kargs)
return feat_
def _acquire_weight(sample_):
offset_ = center - sample_
weight_ = tf.abs(tf.reduce_prod(offset_, axis=-1, keepdims=True))
return weight_
intrp_shape = tf.concat([tf.shape(s_pts)[:-1], [tf.shape(x)[-1]]],
axis=0)
intrp_value = tf.zeros(intrp_shape, dtype=x.dtype)
for s_f, s_w in zip(expanded_indices, expanded_indices[::-1]):
intrp_value += _acquire_feat(s_f) * _acquire_weight(s_w)
return intrp_value
def batch_interpolate(x: tf.Tensor,
s_pts: tf.Tensor,
pts_value_range: List,
pts_offset: float,
interpolate_method: str,
name: str = None):
"""
batch nearest interpolation function
Args:
x: a `float32` Tensor with an arbitrary shape (should be parsed by `s_func`) but given channels [N, ..., C]
s_pts: a `float32` Tensor with shape [N, ..., D], where D = x.shape.ndims - 2
pts_value_range: points position range
pts_offset: points position offset
interpolate_method: interpolate method, 'linear' or 'nearest'
name: operation name
Returns:
a `float32` Tensor with shape [..., C]
"""
with tf.compat.v1.name_scope(name, default_name='BatchInterpolation', values=[x, s_pts]):
x_shape = x.shape.as_list()
clipped_pts = tf.clip_by_value(s_pts, pts_value_range[0], pts_value_range[1])
batch_range = tf.reshape(tf.range(x_shape[0], dtype=tf.int32), [x_shape[0], *[1]*(len(x_shape) - 1)])
batch_indices = tf.tile(batch_range, [1, *s_pts.shape.as_list()[1:-1], 1])
if interpolate_method == 'linear':
batch_pts = tf.concat((tf.cast(batch_indices, tf.float32), clipped_pts + pts_offset), axis=-1)
interpolated_label = linear_nd_interpolate(x, tf.reshape(batch_pts, [-1, len(x_shape)-1]))
interpolated = tf.reshape(interpolated_label, [*s_pts.shape.as_list()[:-1], x_shape[-1]])
elif interpolate_method == 'nearest':
rounded_pts = tf.cast(tf.round(clipped_pts + pts_offset), tf.int32)
batch_pts = tf.concat((batch_indices, rounded_pts), axis=-1)
interpolated = tf.gather_nd(x, batch_pts)
else:
raise NotImplementedError
return interpolated
class TestInterpolation(tf.test.TestCase):
def __init__(self, method_name='TestInterpolation'):
super().__init__(methodName=method_name)
@staticmethod
def gen_test_pairs(test_shape, channel_num=4, batch_size=6, offset=0.5):
test_size = np.asarray(test_shape)
feat_map = np.random.rand(
*[batch_size, *test_size.tolist(), channel_num]).astype(np.float32)
interp_nd = [
np.arange(0, t_s - 1).astype(np.float32) + offset
for t_s in test_size
]
interp = np.meshgrid(
*[np.arange(batch_size).astype(np.float32), *interp_nd],
indexing='ij')
interp = np.stack(interp, axis=-1)
interp_shape = interp.shape
interp = np.reshape(interp, [-1, len(test_shape) + 1])
return feat_map, interp, interp_shape
@staticmethod
def mean_nd_tensor(arr, axis):
sls = [slice(0, -1), slice(1, None)]
sls_list = [i for i in itertools.product(*([sls] * len(axis)))]
new_arr_shape = np.array(arr.shape)
new_arr_shape[1:-1] = new_arr_shape[1:-1] - 1
new_arr = np.zeros(new_arr_shape)
for s_l in sls_list:
assert len(s_l) == 2 or len(s_l) == 3
x, y = s_l[:2]
new_arr += arr[:, x, y, ...] if len(s_l) == 2 else arr[:, x, y,
s_l[2], ...]
return new_arr / len(sls_list)
def testLinearInterpolate(self):
offset = 0.25
feat_map, interp, interp_shape = self.gen_test_pairs([8],
offset=offset)
interp_native = tf.reshape(linear_nd_interpolate(feat_map, interp), [*interp_shape[:-1], -1])
interp_ref = np.array(feat_map[:, :-1, ...]) * (1 - offset)
interp_ref += feat_map[:, 1:, ...] * offset
self.assertAllClose(interp_ref, interp_native.numpy())
def testBilinearInterpolate(self):
feat_map, interp, interp_shape = self.gen_test_pairs([8, 8])
interp_native = tf.reshape(linear_nd_interpolate(feat_map, interp), [*interp_shape[:-1], -1])
interp_ref = self.mean_nd_tensor(feat_map, [1, 2])
self.assertAllClose(interp_ref, interp_native.numpy())
def testTrilinearInterpolate(self):
feat_map, interp, interp_shape = self.gen_test_pairs([8, 8, 8])
interp_native = tf.reshape(linear_nd_interpolate(feat_map, interp), [*interp_shape[:-1], -1])
interp_ref = self.mean_nd_tensor(feat_map, [1, 2, 3])
self.assertAllClose(interp_ref, interp_native.numpy())
|
[
"tensorflow.clip_by_value",
"tensorflow.gather_nd",
"tensorflow.floor",
"tensorflow.reduce_prod",
"numpy.arange",
"tensorflow.compat.v1.name_scope",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.cast",
"typing.TypeVar",
"tensorflow.name_scope",
"numpy.stack",
"tensorflow.range",
"numpy.asarray",
"tensorflow.round",
"tensorflow.convert_to_tensor",
"numpy.zeros",
"tensorflow.shape",
"tensorflow.zeros",
"numpy.array",
"tensorflow.unstack"
] |
[((226, 282), 'typing.TypeVar', 'TypeVar', (['"""TF_EXECUTABLE_ND_ARRAY"""', 'tf.Tensor', 'np.ndarray'], {}), "('TF_EXECUTABLE_ND_ARRAY', tf.Tensor, np.ndarray)\n", (233, 282), False, 'from typing import Union, Callable, Dict, TypeVar, Optional, List\n'), ((474, 500), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['beta'], {}), '(beta)\n', (494, 500), True, 'import tensorflow as tf\n'), ((1213, 1239), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['feat'], {}), '(feat)\n', (1233, 1239), True, 'import tensorflow as tf\n'), ((1250, 1275), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['pts'], {}), '(pts)\n', (1270, 1275), True, 'import tensorflow as tf\n'), ((1476, 1487), 'tensorflow.floor', 'tf.floor', (['x'], {}), '(x)\n', (1484, 1487), True, 'import tensorflow as tf\n'), ((3255, 3278), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {}), '(x)\n', (3275, 3278), True, 'import tensorflow as tf\n'), ((3291, 3318), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['s_pts'], {}), '(s_pts)\n', (3311, 3318), True, 'import tensorflow as tf\n'), ((510, 571), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['"""Interpolation"""'], {'values': '[x, y, beta]'}), "('Interpolation', values=[x, y, beta])\n", (533, 571), True, 'import tensorflow as tf\n'), ((1348, 1372), 'tensorflow.name_scope', 'tf.name_scope', ([], {'name': 'name'}), '(name=name)\n', (1361, 1372), True, 'import tensorflow as tf\n'), ((1389, 1435), 'tensorflow.gather_nd', 'tf.gather_nd', (['feat', 'pts'], {'batch_dims': 'batch_dims'}), '(feat, pts, batch_dims=batch_dims)\n', (1401, 1435), True, 'import tensorflow as tf\n'), ((1888, 1908), 'tensorflow.stack', 'tf.stack', (['e'], {'axis': '(-1)'}), '(e, axis=-1)\n', (1896, 1908), True, 'import tensorflow as tf\n'), ((3328, 3347), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (3341, 3347), True, 'import tensorflow as tf\n'), ((3367, 3393), 'tensorflow.unstack', 'tf.unstack', (['s_pts'], {'axis': '(-1)'}), '(s_pts, axis=-1)\n', (3377, 3393), True, 'import tensorflow as tf\n'), ((3537, 3567), 'tensorflow.stack', 'tf.stack', (['indices[1:]'], {'axis': '(-1)'}), '(indices[1:], axis=-1)\n', (3545, 3567), True, 'import tensorflow as tf\n'), ((4103, 4139), 'tensorflow.zeros', 'tf.zeros', (['intrp_shape'], {'dtype': 'x.dtype'}), '(intrp_shape, dtype=x.dtype)\n', (4111, 4139), True, 'import tensorflow as tf\n'), ((5088, 5175), 'tensorflow.compat.v1.name_scope', 'tf.compat.v1.name_scope', (['name'], {'default_name': '"""BatchInterpolation"""', 'values': '[x, s_pts]'}), "(name, default_name='BatchInterpolation', values=[x,\n s_pts])\n", (5111, 5175), True, 'import tensorflow as tf\n'), ((5231, 5294), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['s_pts', 'pts_value_range[0]', 'pts_value_range[1]'], {}), '(s_pts, pts_value_range[0], pts_value_range[1])\n', (5247, 5294), True, 'import tensorflow as tf\n'), ((6443, 6465), 'numpy.asarray', 'np.asarray', (['test_shape'], {}), '(test_shape)\n', (6453, 6465), True, 'import numpy as np\n'), ((6851, 6876), 'numpy.stack', 'np.stack', (['interp'], {'axis': '(-1)'}), '(interp, axis=-1)\n', (6859, 6876), True, 'import numpy as np\n'), ((7217, 7236), 'numpy.array', 'np.array', (['arr.shape'], {}), '(arr.shape)\n', (7225, 7236), True, 'import numpy as np\n'), ((7309, 7332), 'numpy.zeros', 'np.zeros', (['new_arr_shape'], {}), '(new_arr_shape)\n', (7317, 7332), True, 'import numpy as np\n'), ((5328, 5364), 'tensorflow.range', 'tf.range', (['x_shape[0]'], {'dtype': 'tf.int32'}), '(x_shape[0], dtype=tf.int32)\n', (5336, 5364), True, 'import tensorflow as tf\n'), ((7959, 7990), 'numpy.array', 'np.array', (['feat_map[:, :-1, ...]'], {}), '(feat_map[:, :-1, ...])\n', (7967, 7990), True, 'import numpy as np\n'), ((3889, 3936), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['offset_'], {'axis': '(-1)', 'keepdims': '(True)'}), '(offset_, axis=-1, keepdims=True)\n', (3903, 3936), True, 'import tensorflow as tf\n'), ((5993, 6041), 'tensorflow.concat', 'tf.concat', (['(batch_indices, rounded_pts)'], {'axis': '(-1)'}), '((batch_indices, rounded_pts), axis=-1)\n', (6002, 6041), True, 'import tensorflow as tf\n'), ((6069, 6095), 'tensorflow.gather_nd', 'tf.gather_nd', (['x', 'batch_pts'], {}), '(x, batch_pts)\n', (6081, 6095), True, 'import tensorflow as tf\n'), ((3643, 3701), 'tensorflow.concat', 'tf.concat', (['[indices[0][..., tf.newaxis], sample_]'], {'axis': '(-1)'}), '([indices[0][..., tf.newaxis], sample_], axis=-1)\n', (3652, 3701), True, 'import tensorflow as tf\n'), ((3999, 4014), 'tensorflow.shape', 'tf.shape', (['s_pts'], {}), '(s_pts)\n', (4007, 4014), True, 'import tensorflow as tf\n'), ((5566, 5600), 'tensorflow.cast', 'tf.cast', (['batch_indices', 'tf.float32'], {}), '(batch_indices, tf.float32)\n', (5573, 5600), True, 'import tensorflow as tf\n'), ((5923, 5957), 'tensorflow.round', 'tf.round', (['(clipped_pts + pts_offset)'], {}), '(clipped_pts + pts_offset)\n', (5931, 5957), True, 'import tensorflow as tf\n'), ((4022, 4033), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (4030, 4033), True, 'import tensorflow as tf\n'), ((6615, 6636), 'numpy.arange', 'np.arange', (['(0)', '(t_s - 1)'], {}), '(0, t_s - 1)\n', (6624, 6636), True, 'import numpy as np\n'), ((6752, 6773), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (6761, 6773), True, 'import numpy as np\n')]
|
import json
from argparse import ArgumentParser
import tensorflow as tf
import cv2
import sys
import numpy as np
from PIL import Image
def cut_faces(image, faces_coord):
faces = []
for (x, y, w, h) in faces_coord:
faces.append(image[y: y + h, x: x + w])
return faces
def resize(images, size=(224, 224)):
images_norm = []
for image in images:
if image.shape < size:
image_norm = cv2.resize(image, size,
interpolation=cv2.INTER_AREA)
else:
image_norm = cv2.resize(image, size,
interpolation=cv2.INTER_CUBIC)
images_norm.append(image_norm)
return images_norm
def normalize_faces(image, faces_coord):
faces = cut_faces(image, faces_coord)
faces = resize(faces)
return faces
def webcam_detection(model_path: str):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(
physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
cap = cv2.VideoCapture(0)
# web cam params
font = cv2.FONT_HERSHEY_SIMPLEX
bottom_left_corner_Of_text = (10, 30)
font_scale = 1
font_color = (255, 255, 255)
line_type = 2
names = ['Neutral', 'Happiness', 'Sadness', 'Surprise', 'Fear',
'Disgust', 'Anger', 'Contempt']
names = sorted(names)
model = tf.keras.models.load_model(model_path)
# launch web cam
video_capture = cv2.VideoCapture(0)
classifier = cv2.CascadeClassifier(
'./haarcascade_frontalface_default.xml')
exit = False
while not exit:
_, frame = video_capture.read()
faces = classifier.detectMultiScale(
frame,
scaleFactor=1.1,
minNeighbors=4,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE)
predicted_name = 'unknown'
if faces == ():
pass
else:
for (x, y, w, h) in faces:
x -= 30
y -= 30
w += 60
h += 60
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)
faces_only = normalize_faces(frame, faces)
for face in faces_only:
image = Image.fromarray(face, 'RGB')
image_array = np.array(image, dtype=np.float32)
image_array /= 127.5
image_array -= 1.
image_array = np.expand_dims(image_array, axis=0)
prediction = model(image_array)
#img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#x = cv2.resize(img, (224, 224))
#x = np.array(x, dtype=np.float32)
#x /= 127.5
#x -= 1.
#x = np.expand_dims(x, axis=0)
#prediction = model(x)
for i, item in enumerate(prediction[0]):
print(f'{names[i]}: {float(item)}')
predicted_name = names[np.argmax(prediction)]
# add text on the image
cv2.putText(frame, predicted_name,
bottom_left_corner_Of_text,
font,
font_scale,
font_color,
line_type)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
# to snap a picture
# out = cv2.imwrite('capture.jpg', frame)
# break
exit = True
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-m", "--model",
help="select your model")
args = parser.parse_args()
model_configuration_name = args.model
webcam_detection(model_configuration_name)
|
[
"tensorflow.keras.models.load_model",
"argparse.ArgumentParser",
"cv2.putText",
"numpy.argmax",
"cv2.waitKey",
"cv2.imshow",
"tensorflow.config.experimental.set_memory_growth",
"numpy.expand_dims",
"cv2.VideoCapture",
"cv2.rectangle",
"numpy.array",
"cv2.CascadeClassifier",
"PIL.Image.fromarray",
"cv2.destroyAllWindows",
"tensorflow.config.experimental.list_physical_devices",
"cv2.resize"
] |
[((907, 958), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (951, 958), True, 'import tensorflow as tf\n'), ((1063, 1130), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (1103, 1130), True, 'import tensorflow as tf\n'), ((1141, 1160), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1157, 1160), False, 'import cv2\n'), ((1484, 1522), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {}), '(model_path)\n', (1510, 1522), True, 'import tensorflow as tf\n'), ((1565, 1584), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1581, 1584), False, 'import cv2\n'), ((1603, 1665), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./haarcascade_frontalface_default.xml"""'], {}), "('./haarcascade_frontalface_default.xml')\n", (1624, 1665), False, 'import cv2\n'), ((3538, 3561), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3559, 3561), False, 'import cv2\n'), ((3604, 3620), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (3618, 3620), False, 'from argparse import ArgumentParser\n'), ((3099, 3206), 'cv2.putText', 'cv2.putText', (['frame', 'predicted_name', 'bottom_left_corner_Of_text', 'font', 'font_scale', 'font_color', 'line_type'], {}), '(frame, predicted_name, bottom_left_corner_Of_text, font,\n font_scale, font_color, line_type)\n', (3110, 3206), False, 'import cv2\n'), ((3312, 3338), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (3322, 3338), False, 'import cv2\n'), ((431, 484), 'cv2.resize', 'cv2.resize', (['image', 'size'], {'interpolation': 'cv2.INTER_AREA'}), '(image, size, interpolation=cv2.INTER_AREA)\n', (441, 484), False, 'import cv2\n'), ((560, 614), 'cv2.resize', 'cv2.resize', (['image', 'size'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, size, interpolation=cv2.INTER_CUBIC)\n', (570, 614), False, 'import cv2\n'), ((2192, 2254), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 255, 255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)\n', (2205, 2254), False, 'import cv2\n'), ((2370, 2398), 'PIL.Image.fromarray', 'Image.fromarray', (['face', '"""RGB"""'], {}), "(face, 'RGB')\n", (2385, 2398), False, 'from PIL import Image\n'), ((2429, 2462), 'numpy.array', 'np.array', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (2437, 2462), True, 'import numpy as np\n'), ((2564, 2599), 'numpy.expand_dims', 'np.expand_dims', (['image_array'], {'axis': '(0)'}), '(image_array, axis=0)\n', (2578, 2599), True, 'import numpy as np\n'), ((3035, 3056), 'numpy.argmax', 'np.argmax', (['prediction'], {}), '(prediction)\n', (3044, 3056), True, 'import numpy as np\n'), ((3350, 3364), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3361, 3364), False, 'import cv2\n')]
|
# Copyright (c) 2019, Vienna University of Technology (TU Wien), Department of
# Geodesy and Geoinformation (GEO).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
"""
Main code for creating data cubes.
"""
# general packages
import copy
import os
import re
import netCDF4
import pandas as pd
import numpy as np
import xarray as xr
from collections import OrderedDict
# geo packages
from osgeo import osr
from osgeo import ogr
import shapely.wkt
from shapely.geometry import Polygon
from geopandas import GeoSeries
from geopandas import GeoDataFrame
from geopandas.base import is_geometry_type
import pytileproj.geometry as geometry
from veranda.io.geotiff import GeoTiffFile
from veranda.io.netcdf import NcFile
from veranda.io.timestack import GeoTiffRasterTimeStack
from veranda.io.timestack import NcRasterTimeStack
from geospade.tools import rasterise_polygon
from geospade.raster import RasterGeometry
from geospade.raster import SpatialRef
from geospade.transform import xy2ij
from geospade.transform import ij2xy
# load yeoda's utils module
from yeoda.utils import get_file_type
from yeoda.utils import any_geom2ogr_geom
from yeoda.utils import to_list
from yeoda.utils import swap_axis
from yeoda.utils import get_polygon_envelope
# load classes from yeoda's error module
from yeoda.errors import IOClassNotFound
from yeoda.errors import DataTypeUnknown
from yeoda.errors import TileNotAvailable
from yeoda.errors import FileTypeUnknown
from yeoda.errors import DimensionUnkown
from yeoda.errors import LoadingDataError
from yeoda.errors import SpatialInconsistencyError
def _check_inventory(f):
"""
Decorator for `EODataCube` functions to check if the inventory exists.
Parameters
----------
f : function
'EODataCube' function that has a keyword argument `inplace`
Returns
-------
function
Wrapper around `f`.
"""
def f_wrapper(self, *args, **kwargs):
inplace = kwargs.get('inplace')
if self.inventory is not None:
return f(self, *args, **kwargs)
else:
if inplace:
return None
else:
return self
return f_wrapper
def _set_status(status):
"""
Decorator for `EODataCube` functions to set internal flag defining if a process changes the data cube structure or
not.
Parameters
----------
status: str
Flag defining the status of the data cube. It can be:
- "changed": a filtering process was executed, therefore the structure of the data cube has changed.
- "stable": the structure of the data cube is remains the same.
Returns
-------
function
Wrapper around `f`.
"""
def decorator(f):
def f_wrapper(self, *args, **kwargs):
ret_val = f(self, *args, **kwargs)
inplace = kwargs.get('inplace', None)
if inplace:
self.status = status
return ret_val
return f_wrapper
return decorator
class EODataCube:
"""
A file(name) based data cube for preferably gridded and well-structured EO data.
"""
def __init__(self, filepaths=None, grid=None, filename_class=None, dimensions=None, inventory=None,
io_map=None, sdim_name="tile", tdim_name="time"):
"""
Constructor of the `EODataCube` class.
Parameters
----------
filepaths : list of str, optional
List of file paths.
grid : pytileproj.base.TiledProjection, optional
Tiled projection/grid object/class (e.g. `Equi7Grid`, `LatLonGrid`).
filename_class : geopathfinder.file_naming.SmartFilename, optional
`SmartFilename` class to handle the interpretation of file names.
dimensions : list of str, optional
List of filename parts to use as dimensions. The strings have to match with the keys of the `SmartFilename`
fields definition.
inventory : GeoDataFrame, optional
Contains information about the dimensions (columns) and each filepath (rows).
If `grid` is not specified, a `Shapely` geometry object is added to the `GeoDataFrame`.
io_map : dictionary, optional
Map that represents the relation of an EO file type (e.g. GeoTIFF) with an appropriate reader
(e.g. `GeoTiffFile` from veranda).
sdim_name : str, optional
Name of the spatial dimension (default is 'tile'). If no grid is given, then the spatial dimension name
will be 'geometry'.
tdim_name : str, optional
Name of the temporal dimension (default is 'time').
"""
# initialise simple class variables
self._ds = None # data set pointer
self.status = None
self.tdim_name = tdim_name
self._filename_class = filename_class
# initialise IO classes responsible for reading and writing
if io_map is not None:
self.io_map = io_map
else:
self.io_map = {'GeoTIFF': GeoTiffFile, 'NetCDF': NcFile}
# create inventory from found filepaths
self.inventory = None
if inventory is not None:
self.inventory = inventory
else:
self.__inventory_from_filepaths(filepaths, dimensions=dimensions)
self.grid = None
if grid:
self.grid = grid
self.sdim_name = sdim_name
elif (self.inventory is not None) and ('geometry' not in self.inventory.keys()):
geometries = [self.__raster_geom_from_file(filepath).boundary_shapely
for filepath in self.filepaths]
self.sdim_name = sdim_name if sdim_name in self.dimensions else "geometry"
self.add_dimension('geometry', geometries, inplace=True)
else:
self.sdim_name = sdim_name
@property
def filepaths(self):
"""
list : List of file paths.
"""
if self.inventory is not None:
return list(self.inventory['filepath'])
else:
return None
@property
def dimensions(self):
"""
list : List of inventory keys/dimensions of the data cube.
"""
if self.inventory is not None:
dimensions = list(self.inventory.keys())
if 'filepath' in dimensions:
dimensions.remove('filepath')
return dimensions
else:
return None
@property
def boundary(self):
"""
ogr.geometry : OGR polygon representing the boundary/envelope of the data cube or `None` if no files are
contained in the data cube.
"""
self.__check_spatial_consistency()
boundary = None
if self.filepaths is not None:
filepath = self.filepaths[0]
raster_geom = self.__raster_geom_from_file(filepath)
boundary = raster_geom.boundary_ogr
boundary = swap_axis(boundary) # ensure lon-lat order
return boundary
@property
def raster_geometry(self):
"""
geospade.raster.RasterGeometry :
Raster geometry representing the geometric properties of the given file. Note
that the raster geometry is extracted from the first file, so be sure that the
datacube only holds files from the same tile of the grid.
"""
self.__check_spatial_consistency()
raster_geom = None
if not self.inventory.empty:
raster_geom = self.__raster_geom_from_file(self.filepaths[0])
return raster_geom
@property
def coordinate_boundary(self):
"""
ogr.geometry : OGR polygon representing the coordinate boundary of the data cube or `None` if no files are
contained in the data cube.
"""
self.__check_spatial_consistency()
coord_boundary = None
if self.filepaths is not None:
filepath = self.filepaths[0]
raster_geom = self.__raster_geom_from_file(filepath)
coord_boundary = ogr.CreateGeometryFromWkt(Polygon(raster_geom.coord_corners).wkt)
coord_boundary.AssignSpatialReference(raster_geom.sref.osr_sref)
coord_boundary = swap_axis(coord_boundary) # ensure lon-lat order
return coord_boundary
@classmethod
def from_inventory(cls, inventory, grid=None, **kwargs):
"""
Creates an `EODataCube` instance from a given inventory.
Parameters
----------
inventory : GeoDataFrame
Contains information about the dimensions (columns) and each filepath (rows).
grid : pytileproj.base.TiledProjection, optional
Tiled projection/grid object/class (e.g. `Equi7Grid`, `LatLonGrid`).
Returns
-------
EODataCube
Data cube consisting of data stored in `inventory`.
"""
return cls(inventory=inventory, grid=grid, **kwargs)
@_check_inventory
def rename_dimensions(self, dimensions_map, inplace=False):
"""
Renames the dimensions of the data cube.
Parameters
----------
dimensions_map : dict
A dictionary representing the relation between old and new dimension names. The keys are the old dimension
names, the values the new dimension names (e.g., {'time_begin': 'time'}).
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
`EODataCube` object with renamed dimensions/columns of the inventory.
"""
# reset spatial and and temporal dimension name
for old_dimension in list(dimensions_map.keys()):
if self.sdim_name == old_dimension:
self.sdim_name = dimensions_map[old_dimension]
if self.tdim_name == old_dimension:
self.tdim_name = dimensions_map[old_dimension]
inventory = copy.deepcopy(self.inventory)
inventory = inventory.rename(columns=dimensions_map)
return self._assign_inventory(inventory, inplace=inplace)
@_check_inventory
def add_dimension(self, name, values, inplace=False):
"""
Adds a new dimension to the data cube.
Parameters
----------
name : str
Name of the new dimension
values : list
Values of the new dimension (e.g., cloud cover, quality flag, ...).
They have to have the same length as all the rows in the inventory.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
`EODataCube` object with an additional dimension in the inventory.
"""
if is_geometry_type(values):
ds = GeoSeries(values, index=self.inventory.index)
else:
ds = pd.Series(values, index=self.inventory.index)
inventory = self.inventory.assign(**{name: ds})
return self._assign_inventory(inventory, inplace=inplace)
@_set_status('changed')
@_check_inventory
def filter_files_with_pattern(self, pattern, full_path=False, inplace=False):
"""
Filters all filepaths according to the given pattern.
Parameters
----------
pattern : str
A regular expression (e.g., ".*S1A.*GRD.*").
full_path : boolean, optional
Uses the full file paths for filtering if it is set to True.
Otherwise the filename is used (default value is False).
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
`EODataCube` object with a filtered inventory according to the given pattern.
"""
pattern = re.compile(pattern)
if not full_path:
file_filter = lambda x: re.search(pattern, os.path.basename(x)) is not None
else:
file_filter = lambda x: re.search(pattern, x) is not None
idx_filter = [file_filter(filepath) for filepath in self.filepaths]
inventory = self.inventory[idx_filter]
return self._assign_inventory(inventory, inplace=inplace)
@_set_status('changed')
@_check_inventory
def filter_by_metadata(self, metadata, inplace=False):
"""
Filters all file paths according to the given metadata.
Parameters
----------
metadata : dict
Key value relationships being expected to be in the metadata.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
`EODataCube` object with a filtered inventory according to the given metadata.
"""
bool_filter = []
for filepath in self.filepaths:
io_class = self.__io_class(get_file_type(filepath))
io_instance = io_class(filepath, mode='r')
select = False
if io_instance.src:
ds_metadata = io_instance.metadata
select = True
for key, value in metadata.items():
if key not in ds_metadata.keys():
select = False
else:
if ds_metadata[key] != value:
select = False
# close data set
io_instance.close()
bool_filter.append(select)
inventory = self.inventory[bool_filter]
return self._assign_inventory(inventory, inplace=inplace)
@_set_status('changed')
@_check_inventory
def sort_by_dimension(self, name, ascending=True, inplace=False):
"""
Sorts the data cube/inventory according to the given dimension.
Parameters
----------
name : str
Name of the dimension.
ascending : bool, optional
If true, sorts in ascending order, otherwise in descending order.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
Sorted EODataCube object.
"""
inventory = copy.deepcopy(self.inventory)
inventory_sorted = inventory.sort_values(by=name, ascending=ascending)
return self._assign_inventory(inventory=inventory_sorted)
@_set_status('changed')
def filter_by_dimension(self, values, expressions=None, name="time", inplace=False):
"""
Filters the data cube according to the given extents and returns a (new) data cube.
Parameters
----------
values : list, tuple, list of tuples or list of lists
Values of interest to filter, e.g., timestamps: ('2019-01-01', '2019-02-01'), polarisations: ('VV')
expressions : list, tuple, list of tuples or list of lists, optional
Mathematical expressions to filter the data accordingly. If none are given, the exact values from 'values'
are taken, otherwise the expressions are applied for each value and linked with an AND (e.g., ('>=', '<=')).
They have to have the same length as 'values'. The following comparison operators are allowed:
- '==': equal to
- '>=': larger than or equal to
- '<=': smaller than or equal to
- '>': larger than
- '<': smaller than
name : str, optional
Name of the dimension.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
Filtered EODataCube object.
"""
return self.__filter_by_dimension(values, expressions=expressions, name=name, inplace=inplace, split=False)
@_set_status('changed')
def filter_spatially_by_tilename(self, tilenames, inplace=False, use_grid=True):
"""
Spatially filters the data cube by tile names.
Parameters
----------
tilenames : list of str
Tile names corresponding to a grid and/or the inventory.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
use_grid : bool
If true, the given tile names are compared with the file names defined by the grid.
If false, pre-defined grid information is ignored.
Returns
-------
EODataCube
`EODataCube` object with a filtered inventory according to the given tile names.
"""
tilenames = to_list(tilenames)
if use_grid:
if self.grid is not None:
available_tilenames = self.grid.tilesys.list_tiles_covering_land()
for tilename in tilenames:
if tilename not in available_tilenames:
raise TileNotAvailable(tilename)
return self.filter_by_dimension(tilenames, name=self.sdim_name, inplace=inplace)
else:
print('No grid is provided to extract tile information.')
return self
else:
return self.filter_by_dimension(tilenames, name=self.sdim_name, inplace=inplace)
@_set_status('changed')
@_check_inventory
def filter_spatially_by_geom(self, geom, sref=None, inplace=False):
"""
Spatially filters the data cube by a bounding box or a geometry.
Parameters
----------
geom : OGR Geometry or Shapely Geometry or list or tuple, optional
A geometry defining the region of interest. If it is of type list/tuple representing the extent
(i.e. [x_min, y_min, x_max, y_max]), `sref` has to be given to transform the extent into a
georeferenced polygon.
sref : osr.SpatialReference, optional
Spatial reference of the given region of interest `geom`.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
`EODataCube` object with a filtered inventory according to the given region of interest `geom`.
"""
geom_roi = any_geom2ogr_geom(geom, osr_sref=sref)
if self.grid:
# pytileproj expects latlon polygon
sref_lonlat = osr.SpatialReference()
sref_lonlat.ImportFromEPSG(4326)
geom_roi.TransformTo(sref_lonlat)
geom_roi = swap_axis(geom_roi)
ftilenames = self.grid.search_tiles_over_geometry(geom_roi)
tilenames = [ftilename.split('_')[1] for ftilename in ftilenames]
return self.filter_spatially_by_tilename(tilenames, inplace=inplace, use_grid=False)
elif 'geometry' in self.inventory.keys():
# get spatial reference of data
geom_roi = self.align_geom(geom_roi)
geom_roi = shapely.wkt.loads(geom_roi.ExportToWkt())
inventory = self.inventory[self.inventory.intersects(geom_roi)]
return self._assign_inventory(inventory, inplace=inplace)
else:
return self
def split_by_dimension(self, values, expressions=None, name="time"):
"""
Splits the data cube according to the given extents and returns a list of data cubes.
Parameters
----------
values : list, tuple, list of tuples or list of lists
Values of interest to filter, e.g., timestamps: ('2019-01-01', '2019-02-01'), polarisations: ('VV')
expressions : list, tuple, list of tuples or list of lists, optional
Mathematical expressions to filter the data accordingly. If none are given, the exact values from 'values'
are taken, otherwise the expressions are applied for each value and linked with an AND (e.g., ('>=', '<=')).
They have to have the same length as 'values'. The following comparison operators are allowed:
- '==': equal to
- '>=': larger than or equal to
- '<=': smaller than or equal to
- '>': larger than
- '<': smaller than
name : str, optional
Name of the dimension.
Returns
-------
List of EODataCube objects.
"""
return self.__filter_by_dimension(values, expressions=expressions, name=name, split=True)
def split_monthly(self, months=None):
"""
Separates the data cube into months.
Parameters
----------
name : str, optional
Name of the dimension.
months : int, list of int
List of integers specifying the months to select/split, i.e. each value is allowed to be in between 1-12.
Returns
-------
List of monthly `EODataCube` objects.
"""
sort = False
yearly_eodcs = self.split_yearly()
monthly_eodcs = []
for yearly_eodc in yearly_eodcs:
if months is not None:
yearly_months = to_list(months)
# initialise empty dict keeping track of the months
timestamps_months = {}
for month in yearly_months:
timestamps_months[month] = []
for timestamp in yearly_eodc.inventory[self.tdim_name]:
if timestamp.month in yearly_months:
timestamps_months[timestamp.month].append(timestamp)
else:
sort = True
timestamps_months = {}
for timestamp in yearly_eodc.inventory[self.tdim_name]:
if timestamp.month not in timestamps_months.keys():
timestamps_months[timestamp.month] = []
timestamps_months[timestamp.month].append(timestamp)
yearly_months = timestamps_months.keys()
if sort:
yearly_months = sorted(yearly_months) # sort in ascending order
values = []
expressions = [(">=", "<=")] * len(yearly_months)
for month in yearly_months:
min_timestamp = min(timestamps_months[month])
max_timestamp = max(timestamps_months[month])
values.append((min_timestamp, max_timestamp))
monthly_eodcs.extend(self.split_by_dimension(values, expressions, name=self.tdim_name))
return monthly_eodcs
def split_yearly(self, years=None):
"""
Separates the data cube into years.
Parameters
----------
name : str, optional
Name of the dimension.
years : int, list of int
List of integers specifying the years to select/split.
Returns
-------
List of yearly EODataCube objects.
"""
sort = False
if years:
years = to_list(years)
# initialise empty dict keeping track of the years
timestamps_years = {}
for year in years:
timestamps_years[year] = []
for timestamp in self.inventory[self.tdim_name]:
if timestamp.year in years:
timestamps_years[timestamp.year].append(timestamp)
else:
sort = True
timestamps_years = {}
for timestamp in self.inventory[self.tdim_name]:
if timestamp.year not in timestamps_years.keys():
timestamps_years[timestamp.year] = []
timestamps_years[timestamp.year].append(timestamp)
years = timestamps_years.keys()
if sort:
years = sorted(years) # sort in ascending order
values = []
expressions = [(">=", "<=")]*len(years)
for year in years:
min_timestamp = min(timestamps_years[year])
max_timestamp = max(timestamps_years[year])
values.append((min_timestamp, max_timestamp))
return self.split_by_dimension(values, expressions, name=self.tdim_name)
@_set_status('stable')
def load_by_geom(self, geom, sref=None, band=1, apply_mask=True, dtype="xarray", origin='ul',
decode_kwargs=None):
"""
Loads data according to a given geometry.
Parameters
----------
geom : OGR Geometry or Shapely Geometry or list or tuple
A geometry defining the region of interest. If it is of type list/tuple representing the extent
(i.e. [x_min, y_min, x_max, y_max]), `sref` has to be given to transform the extent into a
georeferenced polygon.
sref : osr.SpatialReference, optional
Spatial reference of the given region of interest `geom`.
band : int or str, optional
Band number or name (default is 1).
apply_mask : bool, optional
If true, a numpy mask array with a mask excluding (=1) all pixels outside `geom` (=0) will be created
(default is False).
dtype : str
Data type of the returned array-like structure (default is 'xarray'). It can be:
- 'xarray': loads data as an xarray.DataSet
- 'numpy': loads data as a numpy.ndarray
- 'dataframe': loads data as a pandas.DataFrame
origin: str, optional
Defines the world system origin of the pixel. It can be:
- upper left ("ul", default)
- upper right ("ur")
- lower right ("lr")
- lower left ("ll")
- center ("c")
decode_kwargs: dict, optional
Keyword arguments for the decoder.
Returns
-------
numpy.array or xarray.DataSet or pd.DataFrame
Data as an array-like object.
"""
if self.inventory is None or self.inventory.empty: # no data given
return None
decode_kwargs = {} if decode_kwargs is None else decode_kwargs
if self.grid:
if self.sdim_name not in self.dimensions:
raise DimensionUnkown(self.sdim_name)
this_sref = self.grid.core.projection.osr_spref
tilenames = list(self.inventory[self.sdim_name])
if len(list(set(tilenames))) > 1:
raise Exception('Data can be loaded only from one tile. Please filter the data cube before.')
tilename = tilenames[0]
this_gt = self.grid.tilesys.create_tile(name=tilename).geotransform()
else:
this_sref, this_gt = self.__get_georef()
if sref is not None:
geom_roi = any_geom2ogr_geom(geom, osr_sref=sref)
else:
geom_roi = any_geom2ogr_geom(geom, osr_sref=this_sref)
roi_sref = geom_roi.GetSpatialReference()
if not this_sref.IsSame(roi_sref):
geom_roi = geometry.transform_geometry(geom_roi, this_sref)
geom_roi = swap_axis(geom_roi)
# clip region of interest to tile boundary
geom_roi = geom_roi.Intersection(self.coordinate_boundary)
if geom_roi.IsEmpty():
raise Exception('The given geometry does not intersect with the tile boundaries.')
geom_roi.AssignSpatialReference(this_sref)
# remove third dimension from geometry
geom_roi.FlattenTo2D()
# retrieve extent of polygon with respect to the pixel sampling of the grid
x_pixel_size = abs(this_gt[1])
y_pixel_size = abs(this_gt[5])
extent = get_polygon_envelope(shapely.wkt.loads(geom_roi.ExportToWkt()),
x_pixel_size,
y_pixel_size)
inv_traffo_fun = lambda i, j: ij2xy(i, j, this_gt, origin=origin)
min_col, min_row = [int(coord) for coord in xy2ij(extent[0], extent[3], this_gt)]
max_col, max_row = [int(coord) for coord in xy2ij(extent[2], extent[1], this_gt)]
max_col, max_row = max_col + 1, max_row + 1 # plus one to still include the maximum indexes
if apply_mask:
# pixel size extraction assumes non-rotated data
data_mask = np.invert(rasterise_polygon(shapely.wkt.loads(geom_roi.ExportToWkt()),
x_pixel_size,
y_pixel_size).astype(bool))
file_type = get_file_type(self.filepaths[0])
xs = None
ys = None
if file_type == "GeoTIFF":
if self._ds is None and self.status != "stable":
file_ts = {'filenames': list(self.filepaths)}
self._ds = GeoTiffRasterTimeStack(file_ts=file_ts, file_band=band)
col_size = max_col - min_col
row_size = max_row - min_row
data = self._ds.read_ts(min_col, min_row, col_size=col_size, row_size=row_size)
if data is None:
raise LoadingDataError()
data = self.decode(data, **decode_kwargs)
if len(data.shape) == 2: # ensure that the data is always forwarded as a 3D array
data = data[None, :, :]
if apply_mask:
data = np.ma.array(data, mask=np.stack([data_mask]*data.shape[0], axis=0))
cols_traffo = np.concatenate(([min_col] * row_size, np.arange(min_col, max_col))).astype(float)
rows_traffo = np.concatenate((np.arange(min_row, max_row), [min_row] * col_size)).astype(float)
x_traffo, y_traffo = inv_traffo_fun(cols_traffo, rows_traffo)
xs = x_traffo[row_size:]
ys = y_traffo[:row_size]
data = self.__convert_dtype(data, dtype=dtype, xs=xs, ys=ys, band=band)
elif file_type == "NetCDF":
if self._ds is None and self.status != "stable":
file_ts = pd.DataFrame({'filenames': list(self.filepaths)})
self._ds = NcRasterTimeStack(file_ts=file_ts, stack_size='single', auto_decode=False)
time_units = self._ds.time_units
data_ar = self._ds.read()[str(band)][:, min_row:max_row, min_col:max_col]
if data_ar is None:
raise LoadingDataError()
data_ar.data = self.decode(data_ar.data, **decode_kwargs)
if apply_mask:
data_ar.data = np.ma.array(data_ar.data, mask=np.stack([data_mask] * data_ar.data.shape[0], axis=0))
data = data_ar.to_dataset()
data = self.__convert_dtype(data, dtype=dtype, xs=xs, ys=ys, band=band, time_units=time_units)
else:
raise FileTypeUnknown(file_type)
return data
@_set_status('stable')
def load_by_pixels(self, rows, cols, row_size=1, col_size=1, band=1, dtype="xarray", origin="ul",
decode_kwargs=None):
"""
Loads data according to given pixel numbers, i.e. the row and column numbers and optionally a certain
pixel window (`row_size` and `col_size`).
Parameters
----------
rows : list of int or int
Row numbers.
cols : list of int or int
Column numbers.
row_size : int, optional
Number of rows to read (counts from input argument `rows`, default is 1).
col_size : int, optional
Number of columns to read (counts from input argument `cols`, default is 1).
band : int or str, optional
Band number or name (default is 1).
dtype : str
Data type of the returned array-like structure (default is 'xarray'). It can be:
- 'xarray': loads data as an xarray.DataSet
- 'numpy': loads data as a numpy.ndarray
- 'dataframe': loads data as a pandas.DataFrame
origin: str, optional
Defines the world system origin of the pixel. It can be:
- upper left ("ul", default)
- upper right ("ur")
- lower right ("lr")
- lower left ("ll")
- center ("c")
decode_kwargs: dict, optional
Keyword arguments for the decoder.
Returns
-------
list of numpy.ndarray or list of xarray.DataSets or pandas.DataFrame
Data as an array-like object.
"""
if self.inventory is None or self.inventory.empty: # no data given
return None
decode_kwargs = {} if decode_kwargs is None else decode_kwargs
rows = to_list(rows)
cols = to_list(cols)
if self.grid:
if self.sdim_name not in self.dimensions:
raise DimensionUnkown(self.sdim_name)
tilenames = list(self.inventory[self.sdim_name])
if len(list(set(tilenames))) > 1:
raise Exception('Data can be loaded only from one tile. Please filter the data cube before.')
tilename = tilenames[0]
this_gt = self.grid.tilesys.create_tile(name=tilename).geotransform()
else:
_, this_gt = self.__get_georef()
inv_traffo_fun = lambda i, j: ij2xy(i, j, this_gt, origin=origin)
file_type = get_file_type(self.filepaths[0])
time_units = "days since 1900-01-01 00:00:00"
n = len(rows)
data = []
xs = []
ys = []
for i in range(n):
row = rows[i]
col = cols[i]
if file_type == "GeoTIFF":
if self._ds is None and self.status != "stable":
file_ts = {'filenames': list(self.filepaths)}
self._ds = GeoTiffRasterTimeStack(file_ts=file_ts, file_band=band)
data_i = self._ds.read_ts(col, row, col_size=col_size, row_size=row_size)
if data_i is None:
raise LoadingDataError()
data_i = self.decode(data_i, **decode_kwargs)
if len(data_i.shape) == 2: # ensure that the data is always forwarded as a 3D array
data_i = data_i[None, :, :]
data.append(data_i)
if row_size != 1 and col_size != 1:
max_col = col + col_size
max_row = row + row_size
cols_traffo = np.concatenate(([col] * row_size, np.arange(col, max_col))).astype(float)
rows_traffo = np.concatenate((np.arange(row, max_row), [row] * col_size)).astype(float)
x_traffo, y_traffo = inv_traffo_fun(cols_traffo, rows_traffo)
xs_i = x_traffo[row_size:].tolist()
ys_i = y_traffo[:row_size].tolist()
else:
xs_i, ys_i = inv_traffo_fun(col, row)
xs.append(xs_i)
ys.append(ys_i)
elif file_type == "NetCDF":
if self._ds is None and self.status != "stable":
file_ts = pd.DataFrame({'filenames': list(self.filepaths)})
self._ds = NcRasterTimeStack(file_ts=file_ts, stack_size='single', auto_decode=False)
time_units = self._ds.time_units
if row_size != 1 and col_size != 1:
data_ar = self._ds.read()[str(band)][:, row:(row + row_size), col:(col + col_size)]
else:
data_ar = self._ds.read()[str(band)][:, row:(row + 1), col:(col + 1)] # +1 to keep the dimension
if data_ar is None:
raise LoadingDataError()
data_ar.data = self.decode(data_ar.data, **decode_kwargs)
data.append(data_ar.to_dataset())
else:
raise FileTypeUnknown(file_type)
return self.__convert_dtype(data, dtype, xs=xs, ys=ys, band=band, time_units=time_units)
@_set_status('stable')
def load_by_coords(self, xs, ys, sref=None, band=1, dtype="xarray", origin="ul", decode_kwargs=None):
"""
Loads data as a 1-D array according to a given coordinate.
Parameters
----------
xs : list of floats or float
World system coordinates in X direction.
ys : list of floats or float
World system coordinates in Y direction.
sref : osr.SpatialReference, optional
Spatial reference referring to the world system coordinates `x` and `y`.
band : int or str, optional
Band number or name (default is 1).
dtype : str
Data type of the returned array-like structure (default is 'xarray'). It can be:
- 'xarray': loads data as an xarray.DataSet
- 'numpy': loads data as a numpy.ndarray
- 'dataframe': loads data as a pandas.DataFrame
origin: str, optional
Defines the world system origin of the pixel. It can be:
- upper left ("ul", default)
- upper right ("ur")
- lower right ("lr")
- lower left ("ll")
- center ("c")
decode_kwargs: dict, optional
Keyword arguments for the decoder.
Returns
-------
list of numpy.ndarray or list of xarray.DataSets or pandas.DataFrame
Data as an array-like object.
"""
if self.inventory is None or self.inventory.empty: # no data given
return None
decode_kwargs = {} if decode_kwargs is None else decode_kwargs
xs = to_list(xs)
ys = to_list(ys)
if self.grid is not None:
if self.sdim_name not in self.dimensions:
raise DimensionUnkown(self.sdim_name)
this_sref = self.grid.core.projection.osr_spref
tilenames = list(self.inventory[self.sdim_name])
if len(list(set(tilenames))) > 1:
raise Exception('Data can be loaded only from one tile. Please filter the data cube before.')
tilename = tilenames[0]
this_gt = self.grid.tilesys.create_tile(name=tilename).geotransform()
else:
this_sref, this_gt = self.__get_georef()
time_units = "days since 1900-01-01 00:00:00"
n = len(xs)
data = []
for i in range(n):
x = xs[i]
y = ys[i]
if sref is not None:
x, y = geometry.uv2xy(x, y, sref, this_sref)
col, row = [int(coord) for coord in xy2ij(x, y, this_gt)]
# check if coordinate is within datacube
raster_geom = self.__raster_geom_from_file(self.filepaths[0])
if (col < 0) or (row < 0) or (col >= raster_geom.n_cols) or (row >= raster_geom.n_rows):
raise Exception('The given coordinate does not intersect with the tile boundaries.')
# replace old coordinates with transformed coordinates related to the users definition
x_t, y_t = ij2xy(col, row, this_gt, origin=origin)
xs[i] = x_t
ys[i] = y_t
file_type = get_file_type(self.filepaths[0])
if file_type == "GeoTIFF":
if self._ds is None and self.status != "stable":
file_ts = {'filenames': self.filepaths}
self._ds = GeoTiffRasterTimeStack(file_ts=file_ts, file_band=band)
data_i = self._ds.read_ts(col, row)
if data_i is None:
raise LoadingDataError()
data_i = self.decode(data_i, **decode_kwargs)
if len(data_i.shape) == 2: # ensure that the data is always forwarded as a 3D array
data_i = data_i[None, :, :]
elif file_type == "NetCDF":
if self._ds is None and self.status != "stable":
file_ts = pd.DataFrame({'filenames': list(self.filepaths)})
self._ds = NcRasterTimeStack(file_ts=file_ts, stack_size='single', auto_decode=False)
time_units = self._ds.time_units
data_ar = self._ds.read()[str(band)][:, row:(row + 1), col:(col + 1)] # +1 to keep the dimension
if data_ar is None:
raise LoadingDataError()
data_ar.data = self.decode(data_ar.data, **decode_kwargs)
data_i = data_ar.to_dataset()
else:
raise FileTypeUnknown(file_type)
data.append(data_i)
return self.__convert_dtype(data, dtype, xs=xs, ys=ys, band=band, time_units=time_units)
def encode(self, data, **kwargs):
"""
Encodes an array.
Parameters
----------
data : numpy, dask or xarray array
Data array.
**kwargs
Keyword arguments for encoding function.
Returns
-------
numpy, dask or xarray array
Encoded data.
"""
return data
def decode(self, data, **kwargs):
"""
Decodes an encoded array to retrieve the values in native units.
Parameters
----------
data : numpy, dask or xarray array
Encoded array.
**kwargs
Keyword arguments for decoding function.
Returns
-------
numpy, dask or xarray array
Decoded data (original/native values).
"""
return data
@_set_status('changed')
@_check_inventory
def intersect(self, dc_other, on_dimension=None, inplace=False):
"""
Intersects this data cube with another data cube. This is equal to an SQL INNER JOIN operation.
In other words:
- all uncommon columns and rows (if `on_dimension` is given) are removed
- duplicates are removed
Parameters
----------
dc_other : EODataCube
Data cube to intersect with.
on_dimension : str, optional
Dimension name to intersect on, meaning that only equal entries along this dimension will be retained.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default is False).
Returns
-------
EODataCube
Intersected data cubes.
"""
dc_intersected = intersect_datacubes([self, dc_other], on_dimension=on_dimension)
return self._assign_inventory(dc_intersected.inventory, inplace=inplace)
@_set_status('changed')
@_check_inventory
def unite(self, dc_other, inplace=False):
"""
Unites this data cube with respect to another data cube. This is equal to an SQL UNION operation.
In other words:
- all columns are put into one DataFrame
- duplicates are removed
- gaps are filled with NaN
Parameters
----------
dc_other : EODataCube
Data cube to unite with.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default is False).
Returns
-------
EODataCube
United data cubes.
"""
dc_united = unite_datacubes([self, dc_other])
return self._assign_inventory(dc_united.inventory, inplace=inplace)
@_set_status('changed')
def align_dimension(self, dc_other, name, inplace=False):
"""
Aligns this data cube with another data cube along the specified dimension `name`.
Parameters
----------
dc_other : EODataCube
Data cube to align with.
name : str
Name of the dimension, which is used for aligning/filtering the values for all data cubes.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
Data cube with common values along the given dimension with respect to another data cube.
"""
this_dim_values = list(self.inventory[name])
uni_values = list(set(this_dim_values))
other_dim_values = dc_other.inventory[name]
idxs = np.zeros(len(other_dim_values)) - 1 # set -1 as no data value
for i in range(len(uni_values)):
val_idxs = np.where(uni_values[i] == other_dim_values)
idxs[val_idxs] = this_dim_values.index(uni_values[i]) # get index of value in this data cube
idxs = idxs[idxs != -1]
if len(idxs) > 0:
inventory = self.inventory.iloc[idxs].reset_index(drop=True)
return self._assign_inventory(inventory, inplace=inplace)
else:
print('No common dimension values found. Original data cube is returned.')
return self
def clone(self):
"""
Clones, i.e. deep-copies a data cube.
Returns
-------
EODataCube
Cloned/copied data cube.
"""
return copy.deepcopy(self)
def align_geom(self, geom, sref=None):
"""
Transforms a geometry into the (geo-)spatial representation of the data cube.
Parameters
----------
geom : OGR Geometry or Shapely Geometry or list or tuple, optional
A geometry defining the region of interest. If it is of type list/tuple representing the extent
(i.e. [x_min, y_min, x_max, y_max]), `sref` has to be given to transform the extent into a
georeferenced polygon.
sref : osr.SpatialReference, optional
Spatial reference of the given region of interest `geom`.
Returns
-------
ogr.Geometry
Geometry with the (geo-)spatial reference of the data cube.
"""
geom = any_geom2ogr_geom(geom, osr_sref=sref)
this_sref, _ = self.__get_georef()
geom = geometry.transform_geometry(geom, this_sref)
geom = swap_axis(geom)
return geom
def close(self):
""" Closes data set pointer. """
self._ds.close()
def __convert_dtype(self, data, dtype, xs=None, ys=None, band=1, time_units='days since 1900-01-01 00:00:00'):
"""
Converts `data` into an array-like object defined by `dtype`. It supports NumPy arrays, Xarray arrays and
Pandas data frames.
Parameters
----------
data : list of numpy.ndarray or list of xarray.DataSets or numpy.ndarray or xarray.DataArray
dtype : str
Data type of the returned array-like structure (default is 'xarray'). It can be:
- 'xarray': loads data as an xarray.DataSet
- 'numpy': loads data as a numpy.ndarray
- 'dataframe': loads data as a pandas.DataFrame
xs : list, optional
List of world system coordinates in X direction.
ys : list, optional
List of world system coordinates in Y direction.
band : int or str, optional
Band number or name (default is 1).
time_units : str, optional
Time units definition for NetCDF4's `num2date` function.
Defaults to 'days since 1900-01-01 00:00:00'.
Returns
-------
list of numpy.ndarray or list of xarray.DataSets or pandas.DataFrame or numpy.ndarray or xarray.DataSet
Data as an array-like object.
"""
if dtype == "xarray":
timestamps = self[self.tdim_name]
if isinstance(data, list) and isinstance(data[0], np.ndarray):
ds = []
for i, entry in enumerate(data):
x = xs[i]
y = ys[i]
x = to_list(x)
y = to_list(y)
xr_ar = xr.DataArray(entry, coords={self.tdim_name: timestamps, 'y': y, 'x': x},
dims=[self.tdim_name, 'y', 'x'])
ds.append(xr.Dataset(data_vars={str(band): xr_ar}))
converted_data = xr.merge(ds)
elif isinstance(data, list) and isinstance(data[0], xr.Dataset):
converted_data = xr.merge(data)
converted_data.attrs = data[0].attrs
if converted_data['time'].dtype == 'float':
conv_timestamps = netCDF4.num2date(converted_data['time'], time_units)
converted_data = converted_data.assign_coords({'time': conv_timestamps})
elif isinstance(data, np.ndarray):
xr_ar = xr.DataArray(data, coords={self.tdim_name: timestamps, 'y': ys, 'x': xs},
dims=[self.tdim_name, 'y', 'x'])
converted_data = xr.Dataset(data_vars={str(band): xr_ar})
elif isinstance(data, xr.Dataset):
converted_data = data
if converted_data['time'].dtype == 'float':
conv_timestamps = netCDF4.num2date(converted_data['time'], time_units)
converted_data = converted_data.assign_coords({'time': conv_timestamps})
else:
raise DataTypeUnknown(type(data), dtype)
elif dtype == "numpy":
if isinstance(data, list) and isinstance(data[0], np.ndarray):
if len(data) == 1:
converted_data = data[0]
else:
converted_data = data
elif isinstance(data, list) and isinstance(data[0], xr.Dataset):
converted_data = [np.array(entry[str(band)].data) for entry in data]
if len(converted_data) == 1:
converted_data = converted_data[0]
elif isinstance(data, xr.Dataset):
converted_data = np.array(data[str(band)].data)
elif isinstance(data, np.ndarray):
converted_data = data
else:
raise DataTypeUnknown(type(data), dtype)
elif dtype == "dataframe":
dimensions = ["time", "y", "x"]
xr_ds = self.__convert_dtype(data, 'xarray', xs=xs, ys=ys, band=band)
converted_data = xr_ds.to_dataframe().reset_index().sort_values(dimensions).set_index(dimensions)
else:
raise DataTypeUnknown(type(data), dtype)
return converted_data
def __get_georef(self):
"""
Retrieves georeference consisting of the spatialreference and the transformation parameters from the first file
in the data cube.
Returns
-------
this_sref : osr.SpatialReference()
Spatial reference of data cube based on the first file.
this_gt : tuple
Geotransformation parameters based on the first file.
"""
if self.filepaths is not None:
filepath = self.filepaths[0]
io_class = self.__io_class(get_file_type(filepath))
io_instance = io_class(filepath, mode='r')
this_sref = osr.SpatialReference()
this_sref.ImportFromWkt(io_instance.spatialref)
this_gt = io_instance.geotransform
# close data set
io_instance.close()
return this_sref, tuple(this_gt)
else:
return None, None
def __io_class(self, file_type):
"""
Looks up appropriate file handler/IO class for a given file type.
Parameters
----------
file_type : str
File type, e.g. "GeoTIFF".
Returns
-------
object
File handler to read and write EO data, e.g. "GeoTiffFile".
Raises
------
IOClassNotFound
File handler for a given file type was not found.
"""
if file_type not in self.io_map.keys():
raise IOClassNotFound(self.io_map, file_type)
else:
return self.io_map[file_type]
def __raster_geom_from_file(self, filepath):
"""
Retrieves a raster geometry from an EO file.
Parameters
----------
filepath : str
Filepath or filename of a geospatial file (e.g. NetCDF or GeoTIFF).
Returns
-------
geospade.raster.RasterGeometry
Raster geometry representing the geometric properties of the given file.
"""
file_type = get_file_type(filepath)
io_class = self.__io_class(file_type)
io_instance = io_class(filepath, mode='r')
sref = SpatialRef(io_instance.spatialref)
raster_geom = RasterGeometry(io_instance.shape[0], io_instance.shape[1], sref, io_instance.geotransform)
# close data set
io_instance.close()
return raster_geom
def __check_spatial_consistency(self):
"""
Checks if there are multiple tiles/file extents present in the data cube.
If so, a `SpatialInconsistencyError` is raised.
"""
if self.sdim_name in self.dimensions:
geoms = self[self.sdim_name]
try: # try apply unique function to DataSeries
uni_vals = geoms.unique()
except:
# the type seems not to be hashable, it could contain shapely geometries.
# try to convert them to strings and then apply a unique function
uni_vals = geoms.apply(lambda x: x.wkt).unique()
if len(uni_vals) > 1:
raise SpatialInconsistencyError()
def __inventory_from_filepaths(self, filepaths, dimensions=None):
"""
Creates GeoDataFrame (`inventory`) based on all filepaths.
Each filepath/filename is translated to a SmartFilename object using a translation function
`smart_filename_creator`.
Parameters
----------
filepaths : list of str
List of file paths.
dimensions : list of str, optional
List of filename parts to use as dimensions. The strings have to match with the keys of the `SmartFilename`
fields definition.
"""
inventory = OrderedDict()
inventory['filepath'] = []
# fill inventory
for filepath in filepaths:
n = len(inventory['filepath'])
local_inventory = OrderedDict()
local_inventory['filepath'] = [filepath]
# get information from filename
smart_filename = None
try:
smart_filename = self._filename_class.from_filename(os.path.basename(filepath), convert=True)
except:
pass
if smart_filename:
if dimensions is not None:
for dimension in dimensions:
try:
local_inventory[dimension] = [smart_filename[dimension]]
except:
pass
else:
for key, value in smart_filename.fields.items():
local_inventory[key] = [value]
extended_entries = list(local_inventory.values())
extended_entries = list(map(list, zip(*extended_entries)))
# add local inventory keys to global inventory if they are not available globally
for key in local_inventory.keys():
if key not in inventory.keys():
if n == 0: # first time
inventory[key] = []
else: # fill dimension with None
inventory[key] = [None] * n
for entry in extended_entries:
for i, key in enumerate(inventory.keys()):
inventory[key].append(entry[i])
self.inventory = GeoDataFrame(inventory)
@_check_inventory
def __filter_by_dimension(self, values, expressions=None, name="time", split=False, inplace=False):
"""
Filters the data cube according to the given extent and returns a new data cube.
Parameters
----------
values : list, tuple, list of tuples or list of lists
Values of interest to filter, e.g., timestamps: ('2019-01-01', '2019-02-01'), polarisations: ('VV')
expressions : list, tuple, list of tuples or list of lists, optional
Mathematical expressions to filter the data accordingly. If none are given, the exact values from 'values'
are taken, otherwise the expressions are applied for each value and linked with an AND (e.g., ('>=', '<=')).
They have to have the same length as 'values'. The following comparison operators are allowed:
- '==': equal to
- '>=': larger than or equal to
- '<=': smaller than or equal to
- '>': larger than
- '<': smaller than
name : str, optional
Name of the dimension.
split : boolean, optional
If true, a list of data cubes will be returned according to the length of the input data
(i.e. `values` and `expressions`)(default value is False).
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube or list of EODataCubes
If `split` is true and multiple filters are specified, a list of EODataCube objects will be returned.
If not, the inventory of the data cube is filtered.
"""
values = to_list(values)
n_filters = len(values)
if expressions is None: # equal operator is the default comparison operator
expressions = ["=="] * n_filters
else:
expressions = to_list(expressions)
inventory = copy.deepcopy(self.inventory)
filtered_inventories = []
for i in range(n_filters):
value = to_list(values[i])
expression = to_list(expressions[i])
if (len(value) == 2) and (len(expression) == 2):
filter_cmd = "inventory[(inventory[name] {} value[0]) & " \
"(inventory[name] {} value[1])]".format(expression[0], expression[1])
elif (len(value) == 1) and (len(expression) == 1):
filter_cmd = "inventory[inventory[name] {} value[0]]".format(expression[0])
else:
raise Exception('Length of value (={}) and length of expression (={}) does not match or is larger than 2.'.format(len(value), len(expression)))
filtered_inventories.append(eval(filter_cmd))
if split:
eodcs = [self._assign_inventory(filtered_inventory, inplace=False)
for filtered_inventory in filtered_inventories]
return eodcs
else:
filtered_inventory = pd.concat(filtered_inventories, ignore_index=True)
return self._assign_inventory(filtered_inventory, inplace=inplace)
def _assign_inventory(self, inventory, inplace=True):
"""
Helper method for either create a new data cube or overwrite the old data cube with the given inventory.
Parameters
----------
inventory : GeoDataFrame
Data cube inventory.
inplace : boolean, optional
If true, the current class instance will be altered.
If false, a new class instance will be returned (default value is False).
Returns
-------
EODataCube
"""
if self.sdim_name not in list(inventory.keys()):
sdim_name = None
else:
sdim_name = self.sdim_name
if self.tdim_name not in list(inventory.keys()):
tdim_name = None
else:
tdim_name = self.tdim_name
if inplace:
self.inventory = inventory
self.sdim_name = sdim_name
self.tdim_name = tdim_name
return self
else:
return self.from_inventory(inventory=inventory, grid=self.grid,
dimensions=self.dimensions, filename_class=self._filename_class,
io_map=self.io_map, tdim_name=tdim_name, sdim_name=sdim_name)
def __deepcopy__(self, memodict={}):
"""
Deepcopy method of the EODataCube class.
Parameters
----------
memodict : dict, optional
Returns
-------
EODataCube
Deepcopy of a data cube.
"""
filepaths = copy.deepcopy(self.filepaths)
grid = self.grid
dimensions = copy.deepcopy(self.dimensions)
inventory = copy.deepcopy(self.inventory)
return EODataCube(filepaths=filepaths, grid=grid, dimensions=dimensions, inventory=inventory,
sdim_name=self.sdim_name, tdim_name=self.tdim_name)
def __getitem__(self, dimension_name):
"""
Returns a column of the internal inventory according to the given column name/item.
Parameters
----------
dimension_name : str
Column/Dimension name of the data cube inventory.
Returns
-------
pandas.DataSeries
Column of the internal inventory.
"""
if self.inventory is not None and dimension_name in self.inventory.columns:
return self.inventory[dimension_name]
else:
raise DimensionUnkown(dimension_name)
def __len__(self):
"""
Returns number of inventory/data entries.
Returns
-------
int
Number of inventory/data entries.
"""
return len(self.inventory)
def __repr__(self):
"""
Defines the string representation of the class.
Returns
-------
str
String representation of a data cube, i.e. its inventory.
"""
return str(self.inventory)
def unite_datacubes(dcs):
"""
Unites data cubes into one data cube. This is equal to an SQL UNION operation.
In other words:
- all columns are put into one DataFrame
- duplicates are removed
- gaps are filled with NaN
Parameters
----------
dcs : list of EODataCube objects
List of data cubes, which should be united.
Returns
-------
EODataCube
Data cube containing all information of the given data cubes.
"""
inventories = [dc.inventory for dc in dcs]
# this is a SQL alike UNION operation
united_inventory = pd.concat(inventories, ignore_index=True, sort=False).drop_duplicates().reset_index(drop=True)
sdim_name = dcs[0].sdim_name
tdim_name = dcs[0].tdim_name
if sdim_name not in list(united_inventory.keys()):
sdim_name = None
if tdim_name not in list(united_inventory.keys()):
tdim_name = None
dc_merged = EODataCube.from_inventory(united_inventory, grid=dcs[0].grid, sdim_name=sdim_name, tdim_name=tdim_name)
return dc_merged
def intersect_datacubes(dcs, on_dimension=None):
"""
Intersects data cubes. This is equal to an SQL INNER JOIN operation.
In other words:
- all uncommon columns and rows (if `on_dimension` is given) are removed
- duplicates are removed
Parameters
----------
dcs : list of EODataCube objects
List of data cubes, which should be intersected.
Returns
-------
EODataCube
Data cube containing all common information of the given data cubes.
"""
inventories = [dc.inventory for dc in dcs]
intersected_inventory = pd.concat(inventories, ignore_index=True, join='inner')
if on_dimension is not None:
all_vals = []
for inventory in inventories:
all_vals.append(list(inventory[on_dimension]))
common_vals = list(set.intersection(*map(set, all_vals)))
intersected_inventory = intersected_inventory[intersected_inventory[on_dimension].isin(common_vals)]
intersected_inventory = intersected_inventory.drop_duplicates().reset_index(drop=True)
sdim_name = dcs[0].sdim_name
tdim_name = dcs[0].tdim_name
if sdim_name not in list(intersected_inventory.keys()):
sdim_name = None
if tdim_name not in list(intersected_inventory.keys()):
tdim_name = None
dc_merged = EODataCube.from_inventory(intersected_inventory, grid=dcs[0].grid,
sdim_name=sdim_name, tdim_name=tdim_name)
return dc_merged
|
[
"yeoda.errors.TileNotAvailable",
"geospade.raster.SpatialRef",
"veranda.io.timestack.GeoTiffRasterTimeStack",
"numpy.arange",
"yeoda.errors.DimensionUnkown",
"yeoda.errors.LoadingDataError",
"shapely.geometry.Polygon",
"yeoda.errors.IOClassNotFound",
"yeoda.errors.FileTypeUnknown",
"veranda.io.timestack.NcRasterTimeStack",
"xarray.merge",
"yeoda.errors.SpatialInconsistencyError",
"geopandas.GeoDataFrame",
"yeoda.utils.get_file_type",
"pytileproj.geometry.transform_geometry",
"pandas.concat",
"re.search",
"numpy.stack",
"copy.deepcopy",
"yeoda.utils.to_list",
"yeoda.utils.swap_axis",
"os.path.basename",
"yeoda.utils.any_geom2ogr_geom",
"pandas.Series",
"pytileproj.geometry.uv2xy",
"re.compile",
"geopandas.GeoSeries",
"geospade.raster.RasterGeometry",
"geopandas.base.is_geometry_type",
"geospade.transform.xy2ij",
"numpy.where",
"xarray.DataArray",
"collections.OrderedDict",
"osgeo.osr.SpatialReference",
"geospade.transform.ij2xy",
"netCDF4.num2date"
] |
[((65923, 65978), 'pandas.concat', 'pd.concat', (['inventories'], {'ignore_index': '(True)', 'join': '"""inner"""'}), "(inventories, ignore_index=True, join='inner')\n", (65932, 65978), True, 'import pandas as pd\n'), ((11604, 11633), 'copy.deepcopy', 'copy.deepcopy', (['self.inventory'], {}), '(self.inventory)\n', (11617, 11633), False, 'import copy\n'), ((12521, 12545), 'geopandas.base.is_geometry_type', 'is_geometry_type', (['values'], {}), '(values)\n', (12537, 12545), False, 'from geopandas.base import is_geometry_type\n'), ((13674, 13693), 're.compile', 're.compile', (['pattern'], {}), '(pattern)\n', (13684, 13693), False, 'import re\n'), ((16276, 16305), 'copy.deepcopy', 'copy.deepcopy', (['self.inventory'], {}), '(self.inventory)\n', (16289, 16305), False, 'import copy\n'), ((18835, 18853), 'yeoda.utils.to_list', 'to_list', (['tilenames'], {}), '(tilenames)\n', (18842, 18853), False, 'from yeoda.utils import to_list\n'), ((20544, 20582), 'yeoda.utils.any_geom2ogr_geom', 'any_geom2ogr_geom', (['geom'], {'osr_sref': 'sref'}), '(geom, osr_sref=sref)\n', (20561, 20582), False, 'from yeoda.utils import any_geom2ogr_geom\n'), ((30708, 30740), 'yeoda.utils.get_file_type', 'get_file_type', (['self.filepaths[0]'], {}), '(self.filepaths[0])\n', (30721, 30740), False, 'from yeoda.utils import get_file_type\n'), ((34787, 34800), 'yeoda.utils.to_list', 'to_list', (['rows'], {}), '(rows)\n', (34794, 34800), False, 'from yeoda.utils import to_list\n'), ((34816, 34829), 'yeoda.utils.to_list', 'to_list', (['cols'], {}), '(cols)\n', (34823, 34829), False, 'from yeoda.utils import to_list\n'), ((35450, 35482), 'yeoda.utils.get_file_type', 'get_file_type', (['self.filepaths[0]'], {}), '(self.filepaths[0])\n', (35463, 35482), False, 'from yeoda.utils import get_file_type\n'), ((39730, 39741), 'yeoda.utils.to_list', 'to_list', (['xs'], {}), '(xs)\n', (39737, 39741), False, 'from yeoda.utils import to_list\n'), ((39755, 39766), 'yeoda.utils.to_list', 'to_list', (['ys'], {}), '(ys)\n', (39762, 39766), False, 'from yeoda.utils import to_list\n'), ((47309, 47328), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (47322, 47328), False, 'import copy\n'), ((48101, 48139), 'yeoda.utils.any_geom2ogr_geom', 'any_geom2ogr_geom', (['geom'], {'osr_sref': 'sref'}), '(geom, osr_sref=sref)\n', (48118, 48139), False, 'from yeoda.utils import any_geom2ogr_geom\n'), ((48198, 48242), 'pytileproj.geometry.transform_geometry', 'geometry.transform_geometry', (['geom', 'this_sref'], {}), '(geom, this_sref)\n', (48225, 48242), True, 'import pytileproj.geometry as geometry\n'), ((48258, 48273), 'yeoda.utils.swap_axis', 'swap_axis', (['geom'], {}), '(geom)\n', (48267, 48273), False, 'from yeoda.utils import swap_axis\n'), ((54649, 54672), 'yeoda.utils.get_file_type', 'get_file_type', (['filepath'], {}), '(filepath)\n', (54662, 54672), False, 'from yeoda.utils import get_file_type\n'), ((54785, 54819), 'geospade.raster.SpatialRef', 'SpatialRef', (['io_instance.spatialref'], {}), '(io_instance.spatialref)\n', (54795, 54819), False, 'from geospade.raster import SpatialRef\n'), ((54842, 54936), 'geospade.raster.RasterGeometry', 'RasterGeometry', (['io_instance.shape[0]', 'io_instance.shape[1]', 'sref', 'io_instance.geotransform'], {}), '(io_instance.shape[0], io_instance.shape[1], sref,\n io_instance.geotransform)\n', (54856, 54936), False, 'from geospade.raster import RasterGeometry\n'), ((56367, 56380), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (56378, 56380), False, 'from collections import OrderedDict\n'), ((58013, 58036), 'geopandas.GeoDataFrame', 'GeoDataFrame', (['inventory'], {}), '(inventory)\n', (58025, 58036), False, 'from geopandas import GeoDataFrame\n'), ((59825, 59840), 'yeoda.utils.to_list', 'to_list', (['values'], {}), '(values)\n', (59832, 59840), False, 'from yeoda.utils import to_list\n'), ((60085, 60114), 'copy.deepcopy', 'copy.deepcopy', (['self.inventory'], {}), '(self.inventory)\n', (60098, 60114), False, 'import copy\n'), ((62845, 62874), 'copy.deepcopy', 'copy.deepcopy', (['self.filepaths'], {}), '(self.filepaths)\n', (62858, 62874), False, 'import copy\n'), ((62921, 62951), 'copy.deepcopy', 'copy.deepcopy', (['self.dimensions'], {}), '(self.dimensions)\n', (62934, 62951), False, 'import copy\n'), ((62972, 63001), 'copy.deepcopy', 'copy.deepcopy', (['self.inventory'], {}), '(self.inventory)\n', (62985, 63001), False, 'import copy\n'), ((8476, 8495), 'yeoda.utils.swap_axis', 'swap_axis', (['boundary'], {}), '(boundary)\n', (8485, 8495), False, 'from yeoda.utils import swap_axis\n'), ((9763, 9788), 'yeoda.utils.swap_axis', 'swap_axis', (['coord_boundary'], {}), '(coord_boundary)\n', (9772, 9788), False, 'from yeoda.utils import swap_axis\n'), ((12564, 12609), 'geopandas.GeoSeries', 'GeoSeries', (['values'], {'index': 'self.inventory.index'}), '(values, index=self.inventory.index)\n', (12573, 12609), False, 'from geopandas import GeoSeries\n'), ((12641, 12686), 'pandas.Series', 'pd.Series', (['values'], {'index': 'self.inventory.index'}), '(values, index=self.inventory.index)\n', (12650, 12686), True, 'import pandas as pd\n'), ((20680, 20702), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (20700, 20702), False, 'from osgeo import osr\n'), ((20817, 20836), 'yeoda.utils.swap_axis', 'swap_axis', (['geom_roi'], {}), '(geom_roi)\n', (20826, 20836), False, 'from yeoda.utils import swap_axis\n'), ((25209, 25223), 'yeoda.utils.to_list', 'to_list', (['years'], {}), '(years)\n', (25216, 25223), False, 'from yeoda.utils import to_list\n'), ((28955, 28993), 'yeoda.utils.any_geom2ogr_geom', 'any_geom2ogr_geom', (['geom'], {'osr_sref': 'sref'}), '(geom, osr_sref=sref)\n', (28972, 28993), False, 'from yeoda.utils import any_geom2ogr_geom\n'), ((29031, 29074), 'yeoda.utils.any_geom2ogr_geom', 'any_geom2ogr_geom', (['geom'], {'osr_sref': 'this_sref'}), '(geom, osr_sref=this_sref)\n', (29048, 29074), False, 'from yeoda.utils import any_geom2ogr_geom\n'), ((29192, 29240), 'pytileproj.geometry.transform_geometry', 'geometry.transform_geometry', (['geom_roi', 'this_sref'], {}), '(geom_roi, this_sref)\n', (29219, 29240), True, 'import pytileproj.geometry as geometry\n'), ((29264, 29283), 'yeoda.utils.swap_axis', 'swap_axis', (['geom_roi'], {}), '(geom_roi)\n', (29273, 29283), False, 'from yeoda.utils import swap_axis\n'), ((30045, 30080), 'geospade.transform.ij2xy', 'ij2xy', (['i', 'j', 'this_gt'], {'origin': 'origin'}), '(i, j, this_gt, origin=origin)\n', (30050, 30080), False, 'from geospade.transform import ij2xy\n'), ((35394, 35429), 'geospade.transform.ij2xy', 'ij2xy', (['i', 'j', 'this_gt'], {'origin': 'origin'}), '(i, j, this_gt, origin=origin)\n', (35399, 35429), False, 'from geospade.transform import ij2xy\n'), ((41153, 41192), 'geospade.transform.ij2xy', 'ij2xy', (['col', 'row', 'this_gt'], {'origin': 'origin'}), '(col, row, this_gt, origin=origin)\n', (41158, 41192), False, 'from geospade.transform import ij2xy\n'), ((41266, 41298), 'yeoda.utils.get_file_type', 'get_file_type', (['self.filepaths[0]'], {}), '(self.filepaths[0])\n', (41279, 41298), False, 'from yeoda.utils import get_file_type\n'), ((46635, 46678), 'numpy.where', 'np.where', (['(uni_values[i] == other_dim_values)'], {}), '(uni_values[i] == other_dim_values)\n', (46643, 46678), True, 'import numpy as np\n'), ((53283, 53305), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (53303, 53305), False, 'from osgeo import osr\n'), ((54105, 54144), 'yeoda.errors.IOClassNotFound', 'IOClassNotFound', (['self.io_map', 'file_type'], {}), '(self.io_map, file_type)\n', (54120, 54144), False, 'from yeoda.errors import IOClassNotFound\n'), ((56550, 56563), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (56561, 56563), False, 'from collections import OrderedDict\n'), ((60043, 60063), 'yeoda.utils.to_list', 'to_list', (['expressions'], {}), '(expressions)\n', (60050, 60063), False, 'from yeoda.utils import to_list\n'), ((60204, 60222), 'yeoda.utils.to_list', 'to_list', (['values[i]'], {}), '(values[i])\n', (60211, 60222), False, 'from yeoda.utils import to_list\n'), ((60248, 60271), 'yeoda.utils.to_list', 'to_list', (['expressions[i]'], {}), '(expressions[i])\n', (60255, 60271), False, 'from yeoda.utils import to_list\n'), ((61140, 61190), 'pandas.concat', 'pd.concat', (['filtered_inventories'], {'ignore_index': '(True)'}), '(filtered_inventories, ignore_index=True)\n', (61149, 61190), True, 'import pandas as pd\n'), ((63745, 63776), 'yeoda.errors.DimensionUnkown', 'DimensionUnkown', (['dimension_name'], {}), '(dimension_name)\n', (63760, 63776), False, 'from yeoda.errors import DimensionUnkown\n'), ((14851, 14874), 'yeoda.utils.get_file_type', 'get_file_type', (['filepath'], {}), '(filepath)\n', (14864, 14874), False, 'from yeoda.utils import get_file_type\n'), ((23366, 23381), 'yeoda.utils.to_list', 'to_list', (['months'], {}), '(months)\n', (23373, 23381), False, 'from yeoda.utils import to_list\n'), ((28408, 28439), 'yeoda.errors.DimensionUnkown', 'DimensionUnkown', (['self.sdim_name'], {}), '(self.sdim_name)\n', (28423, 28439), False, 'from yeoda.errors import DimensionUnkown\n'), ((30133, 30169), 'geospade.transform.xy2ij', 'xy2ij', (['extent[0]', 'extent[3]', 'this_gt'], {}), '(extent[0], extent[3], this_gt)\n', (30138, 30169), False, 'from geospade.transform import xy2ij\n'), ((30223, 30259), 'geospade.transform.xy2ij', 'xy2ij', (['extent[2]', 'extent[1]', 'this_gt'], {}), '(extent[2], extent[1], this_gt)\n', (30228, 30259), False, 'from geospade.transform import xy2ij\n'), ((30962, 31017), 'veranda.io.timestack.GeoTiffRasterTimeStack', 'GeoTiffRasterTimeStack', ([], {'file_ts': 'file_ts', 'file_band': 'band'}), '(file_ts=file_ts, file_band=band)\n', (30984, 31017), False, 'from veranda.io.timestack import GeoTiffRasterTimeStack\n'), ((31243, 31261), 'yeoda.errors.LoadingDataError', 'LoadingDataError', ([], {}), '()\n', (31259, 31261), False, 'from yeoda.errors import LoadingDataError\n'), ((32891, 32917), 'yeoda.errors.FileTypeUnknown', 'FileTypeUnknown', (['file_type'], {}), '(file_type)\n', (32906, 32917), False, 'from yeoda.errors import FileTypeUnknown\n'), ((34929, 34960), 'yeoda.errors.DimensionUnkown', 'DimensionUnkown', (['self.sdim_name'], {}), '(self.sdim_name)\n', (34944, 34960), False, 'from yeoda.errors import DimensionUnkown\n'), ((39878, 39909), 'yeoda.errors.DimensionUnkown', 'DimensionUnkown', (['self.sdim_name'], {}), '(self.sdim_name)\n', (39893, 39909), False, 'from yeoda.errors import DimensionUnkown\n'), ((40593, 40630), 'pytileproj.geometry.uv2xy', 'geometry.uv2xy', (['x', 'y', 'sref', 'this_sref'], {}), '(x, y, sref, this_sref)\n', (40607, 40630), True, 'import pytileproj.geometry as geometry\n'), ((50346, 50358), 'xarray.merge', 'xr.merge', (['ds'], {}), '(ds)\n', (50354, 50358), True, 'import xarray as xr\n'), ((53179, 53202), 'yeoda.utils.get_file_type', 'get_file_type', (['filepath'], {}), '(filepath)\n', (53192, 53202), False, 'from yeoda.utils import get_file_type\n'), ((55724, 55751), 'yeoda.errors.SpatialInconsistencyError', 'SpatialInconsistencyError', ([], {}), '()\n', (55749, 55751), False, 'from yeoda.errors import SpatialInconsistencyError\n'), ((9617, 9651), 'shapely.geometry.Polygon', 'Polygon', (['raster_geom.coord_corners'], {}), '(raster_geom.coord_corners)\n', (9624, 9651), False, 'from shapely.geometry import Polygon\n'), ((13858, 13879), 're.search', 're.search', (['pattern', 'x'], {}), '(pattern, x)\n', (13867, 13879), False, 'import re\n'), ((32218, 32292), 'veranda.io.timestack.NcRasterTimeStack', 'NcRasterTimeStack', ([], {'file_ts': 'file_ts', 'stack_size': '"""single"""', 'auto_decode': '(False)'}), "(file_ts=file_ts, stack_size='single', auto_decode=False)\n", (32235, 32292), False, 'from veranda.io.timestack import NcRasterTimeStack\n'), ((32478, 32496), 'yeoda.errors.LoadingDataError', 'LoadingDataError', ([], {}), '()\n', (32494, 32496), False, 'from yeoda.errors import LoadingDataError\n'), ((35891, 35946), 'veranda.io.timestack.GeoTiffRasterTimeStack', 'GeoTiffRasterTimeStack', ([], {'file_ts': 'file_ts', 'file_band': 'band'}), '(file_ts=file_ts, file_band=band)\n', (35913, 35946), False, 'from veranda.io.timestack import GeoTiffRasterTimeStack\n'), ((36099, 36117), 'yeoda.errors.LoadingDataError', 'LoadingDataError', ([], {}), '()\n', (36115, 36117), False, 'from yeoda.errors import LoadingDataError\n'), ((37943, 37969), 'yeoda.errors.FileTypeUnknown', 'FileTypeUnknown', (['file_type'], {}), '(file_type)\n', (37958, 37969), False, 'from yeoda.errors import FileTypeUnknown\n'), ((40679, 40699), 'geospade.transform.xy2ij', 'xy2ij', (['x', 'y', 'this_gt'], {}), '(x, y, this_gt)\n', (40684, 40699), False, 'from geospade.transform import xy2ij\n'), ((41494, 41549), 'veranda.io.timestack.GeoTiffRasterTimeStack', 'GeoTiffRasterTimeStack', ([], {'file_ts': 'file_ts', 'file_band': 'band'}), '(file_ts=file_ts, file_band=band)\n', (41516, 41549), False, 'from veranda.io.timestack import GeoTiffRasterTimeStack\n'), ((41663, 41681), 'yeoda.errors.LoadingDataError', 'LoadingDataError', ([], {}), '()\n', (41679, 41681), False, 'from yeoda.errors import LoadingDataError\n'), ((42590, 42616), 'yeoda.errors.FileTypeUnknown', 'FileTypeUnknown', (['file_type'], {}), '(file_type)\n', (42605, 42616), False, 'from yeoda.errors import FileTypeUnknown\n'), ((50020, 50030), 'yeoda.utils.to_list', 'to_list', (['x'], {}), '(x)\n', (50027, 50030), False, 'from yeoda.utils import to_list\n'), ((50055, 50065), 'yeoda.utils.to_list', 'to_list', (['y'], {}), '(y)\n', (50062, 50065), False, 'from yeoda.utils import to_list\n'), ((50094, 50203), 'xarray.DataArray', 'xr.DataArray', (['entry'], {'coords': "{self.tdim_name: timestamps, 'y': y, 'x': x}", 'dims': "[self.tdim_name, 'y', 'x']"}), "(entry, coords={self.tdim_name: timestamps, 'y': y, 'x': x},\n dims=[self.tdim_name, 'y', 'x'])\n", (50106, 50203), True, 'import xarray as xr\n'), ((50469, 50483), 'xarray.merge', 'xr.merge', (['data'], {}), '(data)\n', (50477, 50483), True, 'import xarray as xr\n'), ((56781, 56807), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (56797, 56807), False, 'import os\n'), ((64865, 64918), 'pandas.concat', 'pd.concat', (['inventories'], {'ignore_index': '(True)', 'sort': '(False)'}), '(inventories, ignore_index=True, sort=False)\n', (64874, 64918), True, 'import pandas as pd\n'), ((13775, 13794), 'os.path.basename', 'os.path.basename', (['x'], {}), '(x)\n', (13791, 13794), False, 'import os\n'), ((19130, 19156), 'yeoda.errors.TileNotAvailable', 'TileNotAvailable', (['tilename'], {}), '(tilename)\n', (19146, 19156), False, 'from yeoda.errors import TileNotAvailable\n'), ((31524, 31569), 'numpy.stack', 'np.stack', (['([data_mask] * data.shape[0])'], {'axis': '(0)'}), '([data_mask] * data.shape[0], axis=0)\n', (31532, 31569), True, 'import numpy as np\n'), ((37277, 37351), 'veranda.io.timestack.NcRasterTimeStack', 'NcRasterTimeStack', ([], {'file_ts': 'file_ts', 'stack_size': '"""single"""', 'auto_decode': '(False)'}), "(file_ts=file_ts, stack_size='single', auto_decode=False)\n", (37294, 37351), False, 'from veranda.io.timestack import NcRasterTimeStack\n'), ((37760, 37778), 'yeoda.errors.LoadingDataError', 'LoadingDataError', ([], {}), '()\n', (37776, 37778), False, 'from yeoda.errors import LoadingDataError\n'), ((42110, 42184), 'veranda.io.timestack.NcRasterTimeStack', 'NcRasterTimeStack', ([], {'file_ts': 'file_ts', 'stack_size': '"""single"""', 'auto_decode': '(False)'}), "(file_ts=file_ts, stack_size='single', auto_decode=False)\n", (42127, 42184), False, 'from veranda.io.timestack import NcRasterTimeStack\n'), ((42410, 42428), 'yeoda.errors.LoadingDataError', 'LoadingDataError', ([], {}), '()\n', (42426, 42428), False, 'from yeoda.errors import LoadingDataError\n'), ((50635, 50687), 'netCDF4.num2date', 'netCDF4.num2date', (["converted_data['time']", 'time_units'], {}), "(converted_data['time'], time_units)\n", (50651, 50687), False, 'import netCDF4\n'), ((50852, 50962), 'xarray.DataArray', 'xr.DataArray', (['data'], {'coords': "{self.tdim_name: timestamps, 'y': ys, 'x': xs}", 'dims': "[self.tdim_name, 'y', 'x']"}), "(data, coords={self.tdim_name: timestamps, 'y': ys, 'x': xs},\n dims=[self.tdim_name, 'y', 'x'])\n", (50864, 50962), True, 'import xarray as xr\n'), ((31634, 31661), 'numpy.arange', 'np.arange', (['min_col', 'max_col'], {}), '(min_col, max_col)\n', (31643, 31661), True, 'import numpy as np\n'), ((31720, 31747), 'numpy.arange', 'np.arange', (['min_row', 'max_row'], {}), '(min_row, max_row)\n', (31729, 31747), True, 'import numpy as np\n'), ((32657, 32710), 'numpy.stack', 'np.stack', (['([data_mask] * data_ar.data.shape[0])'], {'axis': '(0)'}), '([data_mask] * data_ar.data.shape[0], axis=0)\n', (32665, 32710), True, 'import numpy as np\n'), ((51253, 51305), 'netCDF4.num2date', 'netCDF4.num2date', (["converted_data['time']", 'time_units'], {}), "(converted_data['time'], time_units)\n", (51269, 51305), False, 'import netCDF4\n'), ((36575, 36598), 'numpy.arange', 'np.arange', (['col', 'max_col'], {}), '(col, max_col)\n', (36584, 36598), True, 'import numpy as np\n'), ((36665, 36688), 'numpy.arange', 'np.arange', (['row', 'max_row'], {}), '(row, max_row)\n', (36674, 36688), True, 'import numpy as np\n')]
|
import numpy as np
import pyximport; pyximport.install(setup_args={"include_dirs":np.get_include()})
from cam_viewer.data_structures.state_machine import ThreadedSocketedStateMachine, JMsg
import cam_viewer.data_util.cy_scatter as ct
import pyqtgraph as pg
import time
from PyQt5.QtCore import QObject, pyqtSignal
class DataDaemon(ThreadedSocketedStateMachine):
buffer_limit = 10
tx = pyqtSignal(np.ndarray, np.ndarray)
slider_tx = pyqtSignal()
def __init__(self):
super().__init__()
self.roi = False
# region Color map stuff
self.cmap_low = 0
self.cmap_mid = 1.5
self.cmap_up = 3
# self.curve_a = 0
# self.curve_b = 0
# self.curve_c = 0
self.center_x = 0
self.center_y = 0
self.center_z = 0
self.cmap_up_to_date = True
# endregion
# region roi stuff
self.roi_coords = None
# endregion
self.scale = 1.0
self.states['frame'] = self.process
self.states['up'] = self.set_up
self.states['mid'] = self.set_mid
self.states['low'] = self.set_low
self.states['scale'] = self.set_scale
self.states['enable_roi'] = self.set_roi_coords
self.states['disable_roi'] = self.unset_roi_coords
def __recieve_msg(self, msg: JMsg):
self.state_queue.append(msg)
def initial_state(self):
# self.find_curve()
pass
def set_low(self):
self.cmap_low = self.data.data
self.cmap_up_to_date = True
self.slider_tx.emit()
# self.find_curve()
def set_mid(self):
self.cmap_mid = self.data.data
self.cmap_up_to_date = True
self.slider_tx.emit()
# self.find_curve()
def set_up(self):
self.cmap_up = self.data.data
self.cmap_up_to_date = True
self.slider_tx.emit()
# self.find_curve()
def set_scale(self):
self.scale = self.data.data
self.cmap_up_to_date = True
self.slider_tx.emit()
def set_roi_coords(self):
self.roi_coords = self.data.data
def unset_roi_coords(self):
self.roi_coords = None
# def find_curve(self):
# self.curve_a, self.curve_b, self.curve_c = ct.find_formula(self.cmap_low, self.cmap_mid, self.cmap_up)
def process(self):
if not self.roi:
# start = time.time()
# bits = ct.split_data(self.data.data, self.data.data.shape[0], self.data.data.shape[1], 4)
# points, counts = ct.convert_to_points(bits, self.data.data.shape[0], self.data.data.shape[1], 1)
# result = ct.concatenate_bits(points, counts)
if self.roi_coords is None:
# print(self.data.data.shape)
result = ct.scatter_data(self.data.data)
else:
# print(self.data.data.shape)
result = ct.apply_roi(self.data.data, self.roi_coords)
colors = ct.create_color_data(result, self.cmap_low, self.cmap_mid, self.cmap_up)
# print("Frame took {} seconds to process".format(round(time.time() - start, 3)))
self.tx.emit(result, colors)
|
[
"PyQt5.QtCore.pyqtSignal",
"cam_viewer.data_util.cy_scatter.apply_roi",
"numpy.get_include",
"cam_viewer.data_util.cy_scatter.create_color_data",
"cam_viewer.data_util.cy_scatter.scatter_data"
] |
[((398, 432), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', (['np.ndarray', 'np.ndarray'], {}), '(np.ndarray, np.ndarray)\n', (408, 432), False, 'from PyQt5.QtCore import QObject, pyqtSignal\n'), ((450, 462), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ([], {}), '()\n', (460, 462), False, 'from PyQt5.QtCore import QObject, pyqtSignal\n'), ((82, 98), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (96, 98), True, 'import numpy as np\n'), ((2990, 3062), 'cam_viewer.data_util.cy_scatter.create_color_data', 'ct.create_color_data', (['result', 'self.cmap_low', 'self.cmap_mid', 'self.cmap_up'], {}), '(result, self.cmap_low, self.cmap_mid, self.cmap_up)\n', (3010, 3062), True, 'import cam_viewer.data_util.cy_scatter as ct\n'), ((2801, 2832), 'cam_viewer.data_util.cy_scatter.scatter_data', 'ct.scatter_data', (['self.data.data'], {}), '(self.data.data)\n', (2816, 2832), True, 'import cam_viewer.data_util.cy_scatter as ct\n'), ((2922, 2967), 'cam_viewer.data_util.cy_scatter.apply_roi', 'ct.apply_roi', (['self.data.data', 'self.roi_coords'], {}), '(self.data.data, self.roi_coords)\n', (2934, 2967), True, 'import cam_viewer.data_util.cy_scatter as ct\n')]
|
# -*- coding: utf-8 -*-
"""
動く背景動画を作成する
======================
Description.
"""
# import standard libraries
import os
# import third-party libraries
import numpy as np
from colour import write_image, read_image
from multiprocessing import Pool, cpu_count
# import my libraries
# information
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2019 - <NAME>'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = 'tor<EMAIL>.11 at-sign gmail.com'
__all__ = []
def thread_wrapper_cut_and_save(kwargs):
cut_and_save(**kwargs)
def cut_and_save(
frame_idx, base_img, st_pos_h_list, st_pos_v_list,
width, height, fps=60, sec=3, h_px=5, v_px=3,
bg_file="./bg_image/bg_img_16x9.png"):
base = os.path.basename(os.path.splitext(bg_file)[0])
out_name = f"./bg_image_seq/{base}_{fps}fps_{sec}s_{frame_idx:04d}.png"
st_pov_h = st_pos_h_list[frame_idx]
ed_pos_h = st_pov_h + width
st_pos_v = st_pos_v_list[frame_idx]
ed_pos_v = st_pos_v + height
out_img = base_img[st_pos_v:ed_pos_v, st_pov_h:ed_pos_h]
print(out_name)
write_image(out_img, out_name, bit_depth='uint16')
def moving_background(
fps=60, sec=7, h_px=6, v_px=3, bg_file="./bg_image/bg_img_16x9.png"):
frame = fps * sec
# 切り取り用の大きい画像を生成
img = read_image(bg_file)
width, height = (img.shape[1], img.shape[0])
img_temp = np.hstack([img, img])
img_4x = np.vstack([img_temp, img_temp])
# 切り出し位置を計算しておく
idx = np.arange(frame)
st_pos_h_list = (idx * h_px) % width
st_pos_v_list = (idx * v_px) % height
args = []
for frame_idx in idx:
kwargs = dict(
frame_idx=frame_idx, base_img=img_4x,
st_pos_h_list=st_pos_h_list, st_pos_v_list=st_pos_v_list,
width=width, height=height,
fps=fps, sec=sec, h_px=h_px, v_px=v_px, bg_file=bg_file)
args.append(kwargs)
# cut_and_save(**kwargs)
with Pool(cpu_count()) as pool:
pool.map(thread_wrapper_cut_and_save, args)
def main_func():
moving_background(
fps=60, sec=7, h_px=6, v_px=3, bg_file="./bg_image/bg_img_16x9.png")
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
main_func()
|
[
"os.path.abspath",
"colour.write_image",
"numpy.hstack",
"numpy.arange",
"os.path.splitext",
"colour.read_image",
"numpy.vstack",
"multiprocessing.cpu_count"
] |
[((1147, 1197), 'colour.write_image', 'write_image', (['out_img', 'out_name'], {'bit_depth': '"""uint16"""'}), "(out_img, out_name, bit_depth='uint16')\n", (1158, 1197), False, 'from colour import write_image, read_image\n'), ((1355, 1374), 'colour.read_image', 'read_image', (['bg_file'], {}), '(bg_file)\n', (1365, 1374), False, 'from colour import write_image, read_image\n'), ((1439, 1460), 'numpy.hstack', 'np.hstack', (['[img, img]'], {}), '([img, img])\n', (1448, 1460), True, 'import numpy as np\n'), ((1474, 1505), 'numpy.vstack', 'np.vstack', (['[img_temp, img_temp]'], {}), '([img_temp, img_temp])\n', (1483, 1505), True, 'import numpy as np\n'), ((1537, 1553), 'numpy.arange', 'np.arange', (['frame'], {}), '(frame)\n', (1546, 1553), True, 'import numpy as np\n'), ((808, 833), 'os.path.splitext', 'os.path.splitext', (['bg_file'], {}), '(bg_file)\n', (824, 833), False, 'import os\n'), ((2007, 2018), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2016, 2018), False, 'from multiprocessing import Pool, cpu_count\n'), ((2258, 2283), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2273, 2283), False, 'import os\n')]
|
#! /Library/Frameworks/Python.framework/Versions/3.7/bin/python3
# ============================================== #
# =========== C: Coarse Grain Fitter =========== #
# ============================================== #
# Written by <NAME>
# August 2019
#
# ================ Requiremets ================ #
from matplotlib import pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline
import os
# import math
import BINAnalysis as boandi
from util import func_to_xy
# ================= Input File ================= #
value_file = 'outputs/measurement_data/EB2_EB3.dat'
# ============= Specifications ============== #
view_range = 0 # negative integer for a specific number of standard deviations; 0 for full view; positve integer for a specific value
# step = 0.01 # step needs manual adjustment (See README section C2. for help)
# ============= Fitting Functions ============== #
def f(x, k, x0, c):
return(k * (x - x0)**2 + c)
# ================= Execution ================== #
def spline_fit(values, mes_type, step, export=False, plot=True, view_range=0):
# file_name = os.path.splitext(os.path.basename(value_file))[0]
# with open(value_file, 'r') as file:
# dataset = file.read()
# dataset_list = dataset.split('\n')
# mes_type = dataset_list.pop(0)[-1]
# values = [float(value) for value in dataset_list]
# === Calculating bin information === #
if view_range <= -1:
view_range = abs(view_range) * np.std(values)
elif view_range == 0:
view_range = len(values)
# initiates histogram object
histogram = boandi.Histogram(mes_type, values, step)
# for value in values:
# histogram.add_instance(value) # places each value into its appropriate bin
histogram.clear_empty_bins()
x = histogram.get_floors()
y = histogram.get_boltzes()
# === Fitting curve === #
spl = UnivariateSpline(x, y)
xs = np.linspace(min(x), max(x), 1000)
# k, x0, c = curve_fit(f, x, y, maxfev=1000000, p0=[10, 5, 5])[0]
if plot:
lo_cut = histogram.get_biggest(1)[0].floor - (view_range / 2)
up_cut = histogram.get_biggest(1)[0].floor + (view_range / 2)
x = [bin.floor for bin in histogram if lo_cut <= bin.floor <= up_cut]
y = [bin.boltz() for bin in histogram if lo_cut <= bin.floor <= up_cut]
# === Plotting points === #
x2 = [bin.floor for bin in histogram.get_biggest(1)]
y2 = [bin.boltz() for bin in histogram.get_biggest(1)]
plt.scatter(x2, y2, s=0.5, c='#eb5600') # plots biggest bin in red
plt.scatter(x, y, s=0.5) # plots points within view_range of biggest bin
plt.plot(xs, spl(xs), 'g', lw=3)
# === Adusting display settings === #
fmt = '{} {}\nBin: {:.2f} - {:.2f}\n{:.3f}(x-{:.3f})+{:.3f}'
# plt.suptitle(fmt.format(histogram.mes_type, histogram.name,
# min_bin, max_bin, k, x0, c), fontname='Courier New')
plt.ylabel('Boltzmann Inversion')
plt.xlabel(histogram.mes_type.name.title() + ' Measurment')
plt.savefig(f'outputs/fit/hist_{histogram.name}.png')
plt.show()
# === Histogram Data Writing === #
# edge_flor = edges[biggest_bin_ix]
# edge_ceil = edges[biggest_bin_ix+1]
# percent_within = 100*(max(hist)/sum(hist))
# hist_output.write('{:9} {:23} {:22.16f} {:22.16f} {:.1f}%\n'.format(mes_type,histogram.name,edge_flor,edge_ceil,percent_within))
#
# # === Angle Data Writing === #
# cd('angles')
# angle_output = open('{}.dat'.format(histogram.name),'w')
#
# for bin in bins.values():
# for angle in sorted(bin.contents,key=float):
# output = '{} {}\n'.format(angle.val,angle.prob)
# angle_output.write(output)
print('Outputs Written!\nTask Complete!')
return spl
|
[
"matplotlib.pyplot.show",
"numpy.std",
"matplotlib.pyplot.scatter",
"scipy.interpolate.UnivariateSpline",
"BINAnalysis.Histogram",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.savefig"
] |
[((1665, 1705), 'BINAnalysis.Histogram', 'boandi.Histogram', (['mes_type', 'values', 'step'], {}), '(mes_type, values, step)\n', (1681, 1705), True, 'import BINAnalysis as boandi\n'), ((1959, 1981), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['x', 'y'], {}), '(x, y)\n', (1975, 1981), False, 'from scipy.interpolate import UnivariateSpline\n'), ((2576, 2615), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x2', 'y2'], {'s': '(0.5)', 'c': '"""#eb5600"""'}), "(x2, y2, s=0.5, c='#eb5600')\n", (2587, 2615), True, 'from matplotlib import pyplot as plt\n'), ((2652, 2676), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': '(0.5)'}), '(x, y, s=0.5)\n', (2663, 2676), True, 'from matplotlib import pyplot as plt\n'), ((3049, 3082), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Boltzmann Inversion"""'], {}), "('Boltzmann Inversion')\n", (3059, 3082), True, 'from matplotlib import pyplot as plt\n'), ((3159, 3212), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""outputs/fit/hist_{histogram.name}.png"""'], {}), "(f'outputs/fit/hist_{histogram.name}.png')\n", (3170, 3212), True, 'from matplotlib import pyplot as plt\n'), ((3221, 3231), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3229, 3231), True, 'from matplotlib import pyplot as plt\n'), ((1540, 1554), 'numpy.std', 'np.std', (['values'], {}), '(values)\n', (1546, 1554), True, 'import numpy as np\n')]
|
import numpy as np
import random as random
from random import sample
import copy as cp
from HandEvaluator import HandEvaluator
from keras.models import Sequential
from keras.layers import LSTM, Dense, Merge
from keras.optimizers import Adam
from keras.models import load_model
# Some useful methods.
def is_discard_round(possible_actions):
splits = possible_actions[-1].split(":")
return splits[0] == "DISCARD"
def can_check(possible_actions):
for i in range(len(possible_actions)):
if possible_actions[i] == "CHECK":
return True
return False
def can_call(possible_actions):
for i in range(len(possible_actions)):
if possible_actions[i] == "CALL":
return True
return False
def can_fold(possible_actions):
for i in range(len(possible_actions)):
if possible_actions[i] == "FOLD":
return True
return False
def can_put_money_in(possible_actions):
for i in range(len(possible_actions)):
splits = possible_actions[i].split(":")
if splits[0] == "BET" or splits[0] == "RAISE":
return True, splits[0], int(splits[1]), int(splits[2])
return False, "", 0, 0
class AdaptiveBrain:
def __init__(self, restore_from=[]):
self.hand_evaluator = HandEvaluator()
self.Q_gamma = 0.9
self.epsilon = 0.995
self.new_state = []
if restore_from:
print("Restoring from: " + restore_from)
self.Q = load_model(restore_from)
else:
self.Q = self.initialize_network()
def initialize_network(self):
TIMESTEPS = None
TEMPORAL_DATADIM = 5
STATIC_DATADIM = 1
# Learns over the temporal feature matrix
temporal_model = Sequential()
temporal_model.add(LSTM(12, return_sequences=True,
input_shape=(TIMESTEPS, TEMPORAL_DATADIM))) # returns a sequence of vectors of dimension 12
temporal_model.add(LSTM(12, return_sequences=True)) # returns a sequence of vectors of dimension 12
temporal_model.add(LSTM(12)) # return a single vector of dimension 12
# Learns over static features.
static_model = Sequential()
static_model.add(Dense(5, input_dim=STATIC_DATADIM, activation="relu"))
combined_model = Sequential()
combined_model.add(Merge([temporal_model, static_model], mode='concat'))
combined_model.add(Dense(10, activation='relu'))
combined_model.add(Dense(3, activation='relu'))
combined_model.add(Dense(1, activation='linear'))
combined_model.compile(loss='mse', optimizer='adam')
return combined_model
def enumerate_next_action_vectors(self, bot):
BET_INCREMENT = 5
STACKSIZE = 200.0
possible_actions = bot.possible_actions
last_in_pot_hero = bot.temporal_feature_matrix[0,-1];
last_in_pot_villain = bot.temporal_feature_matrix[1,-1];
street = bot.get_street()
if is_discard_round(possible_actions):
# Showdown probabilities of discarding.
win_pct_discard_none = self.hand_evaluator.evaluate_showdown_probabilities(
[bot.hand['hole1'], bot.hand['hole2']], bot.hand['board'], 100)
win_pct_discard_1 = self.hand_evaluator.evaluate_showdown_probabilities(
[bot.hand['hole2']], bot.hand['board'], 100)
win_pct_discard_2 = self.hand_evaluator.evaluate_showdown_probabilities(
[bot.hand['hole1']], bot.hand['board'], 100)
a_none = np.asarray([last_in_pot_hero, last_in_pot_villain, street, 0, 0]).reshape((1,-1))
a_discard = np.asarray([last_in_pot_hero, last_in_pot_villain, street, 1, 0]).reshape((1,-1))
input_discard_none = [a_none, np.asarray([[win_pct_discard_none]])]
input_discard_1 = [a_discard, np.asarray([[win_pct_discard_1]])]
input_discard_2 = [a_discard, np.asarray([[win_pct_discard_2]])]
inputs = [input_discard_none, input_discard_1, input_discard_2]
action_strs = ['CHECK', 'DISCARD:' + bot.hand['hole1'], 'DISCARD:' + bot.hand['hole2']]
else:
# Available actions should be one of the following:
# BET:minBet:maxBet *
# CALL*
# CHECK *
# FOLD *
# RAISE:minRaise:maxRaise *
showdown_prob = self.hand_evaluator.evaluate_showdown_probabilities(
[bot.hand['hole1'], bot.hand['hole2']], bot.hand['board'], 100)
inputs = []
action_strs = []
# Folding
#if can_fold(possible_actions):
# inputs.append([np.asarray([-1, -1, -1, -1, -1]).reshape((1,-1)),
# np.asarray([[showdown_prob]]) ])
# action_strs.append("FOLD")
# Checking
inputs.append([np.asarray([last_in_pot_hero, last_in_pot_villain, street, 0, 0]).reshape((1,-1)),
np.asarray([[showdown_prob]]) ])
if can_check(possible_actions):
action_strs.append("CHECK")
else:
action_strs.append("FOLD")
# Calling
if can_call(possible_actions):
call_amt = bot.temporal_feature_matrix[1,-1]
inputs.append([np.asarray([call_amt, last_in_pot_villain, street, 0, 0]).reshape((1,-1)),
np.asarray([[showdown_prob]]) ])
action_strs.append("CALL")
# Betting or raising.
cpmi, action_type, min_bet, max_bet = can_put_money_in(possible_actions)
if cpmi:
max_of_prev_street = bot.get_max_of_prev_street(0)
for i in range(min_bet, max_bet+1, BET_INCREMENT):
inputs.append([np.asarray([float(i)/STACKSIZE + max_of_prev_street, last_in_pot_villain, street, 0, 0]).reshape((1,-1)),
np.asarray([[showdown_prob]]) ])
action_strs.append(action_type + ":" + str(i))
if i != max_bet: # tack on the max_bet
i = max_bet
inputs.append([np.asarray([float(i)/STACKSIZE + max_of_prev_street, last_in_pot_villain, street, 0, 0]).reshape((1,-1)),
np.asarray([[showdown_prob]]) ])
action_strs.append(action_type + ":" + str(i))
return inputs, action_strs
def evaluate_Q_function(self, S, a):
# S = state
# a = action(s)
Q_in_temporal = np.vstack((S, a[0][0])).reshape((1,-1,S.shape[1]))
Q_in_static = a[0][1]
for i in range(1,len(a)):
q_i = np.vstack((S, a[i][0])).reshape((1,-1,S.shape[1]))
Q_in_temporal = np.vstack( (Q_in_temporal, q_i) )
Q_in_static = np.vstack( (Q_in_static, a[i][1]))
possible_states = [Q_in_temporal, Q_in_static]
return self.Q.predict(possible_states), possible_states
def update_Q_function(self, bot):
# self.new_state is the move we made in the last decision point.
if len(self.new_state) > 0: # only update if tfm is initialized.
print("YO!! Updating Q-function")
actions, action_strs = self.enumerate_next_action_vectors(bot)
Qvals, possible_states = self.evaluate_Q_function(bot.temporal_feature_matrix.T, actions)
reward = bot.hand['winnings']
if reward != 0: # hand is over -> terminal state.
target = reward
else: # we're still playing the hand
#newQ = self.Q.predict(self.new_state, batch_size=1)
maxQ = np.max(Qvals)
target = self.Q_gamma*maxQ # reward is zero, so our target is simply our expected future reward.
print(target)
self.Q.fit(self.new_state, np.asarray(target).reshape((1,1)), batch_size=1, nb_epoch=5, verbose=0)
def learn_from_last_action(self, bot):
self.update_Q_function(bot)
def get_reward_for_folding(self, bot):
return -self.bot.temporal_feature_matrix[0,-1]*200.0
def get_epsilon_value(self):
self.epsilon = 0.995*self.epsilon
return self.epsilon
def make_decision(self, bot):
print("Making Q decision")
actions, action_strs = self.enumerate_next_action_vectors(bot)
# Evaluates Q-function over all non-folding actions.
Qvals, possible_states = self.evaluate_Q_function(bot.temporal_feature_matrix.T, actions)
if random.random() > self.get_epsilon_value():
best_idx = np.argmax(Qvals)
else:
print("woowowow taking random action!!")
best_idx = random.randint(0,Qvals.shape[0]-1)
best_temporal_in = possible_states[0][best_idx]
best_static_in = possible_states[1][best_idx]
self.new_state = [best_temporal_in.reshape((1,best_temporal_in.shape[0],best_temporal_in.shape[1])),
best_static_in.reshape((1,1))]
return action_strs[best_idx]
class RationalBrain:
def __init__(self):
self.hand_evaluator = HandEvaluator()
def make_decision(self, bot):
# bot is a poker player bot.
# by passing bot, this function has access to the internals of bot
# which includes various features.
possible_actions = bot.possible_actions
if is_discard_round(possible_actions):
print("This is a discard round")
# Just take a look at naive showdown probabilities and make the discard decision based on that.
win_pct_discard_none = self.hand_evaluator.evaluate_showdown_probabilities(
[bot.hand['hole1'], bot.hand['hole2']], bot.hand['board'], 100)
win_pct_discard_1 = self.hand_evaluator.evaluate_showdown_probabilities(
[bot.hand['hole2']], bot.hand['board'], 100)
win_pct_discard_2 = self.hand_evaluator.evaluate_showdown_probabilities(
[bot.hand['hole1']], bot.hand['board'], 100)
prob_vec = np.asarray([win_pct_discard_none, win_pct_discard_1, win_pct_discard_2])
print(prob_vec)
action_idx = np.argmax(prob_vec)
if action_idx == 0:
action_str = "CHECK"
elif action_idx == 1:
action_str = "DISCARD:" + bot.hand['hole1']
elif action_idx == 2:
action_str = "DISCARD:" + bot.hand['hole2']
print("ACTION_TAKEN -> " + action_str)
return action_str
else: # This is a quantitative bet round, where we have to decide how much money to put on the table (if any <=> fold)
print("This is a Q-bet round")
showdown_prob = self.hand_evaluator.evaluate_showdown_probabilities(
[bot.hand['hole1'], bot.hand['hole2']], bot.hand['board'], 100)
print("showdown prob:" + str(showdown_prob))
if showdown_prob > 0.8:
stake_is_worth = random.randint(170, 200)
elif showdown_prob > 0.6 and showdown_prob < 0.8:
stake_is_worth = random.randint(100, 169)
elif showdown_prob > 0.5 and showdown_prob < 0.6:
stake_is_worth = random.randint(50,100)
elif showdown_prob > 0.3 and showdown_prob < 0.5:
stake_is_worth = random.randint(10,50)
else:
stake_is_worth = 0
# Available actions should be one of the following:
# BET:minBet:maxBet
# CALL
# CHECK
# FOLD
# RAISE:minRaise:maxRaise
current_stake = np.sum(bot.temporal_feature_matrix[0])*200
villain_hero_differential = (bot.hand['pot_size'][-1] - current_stake)
stake_difference = stake_is_worth - villain_hero_differential
print("Current stake: " + str(current_stake))
print("Stake worth: " + str(stake_is_worth))
print("Stake diff: " + str(stake_difference))
if stake_difference < 0:
action_str = "CHECK" if can_check(bot.possible_actions) else "FOLD"
else:
if stake_difference < 30:
action_str = "CHECK" if can_check(bot.possible_actions) else "CALL"
else:
can_put_money_in, action_type, min_bet, max_bet = can_put_money_in(bot.possible_actions)
if action_type == "RAISE":
bet_val = max(min(stake_difference + current_stake, max_bet), min_bet)
else:
bet_val = max(min(stake_difference, max_bet), min_bet)
action_str = action_type + ":" + str(bet_val)
print("ACTION_TAKEN -> " + action_str)
return action_str
|
[
"keras.models.load_model",
"keras.layers.Merge",
"numpy.sum",
"random.randint",
"numpy.argmax",
"keras.layers.LSTM",
"numpy.asarray",
"random.random",
"numpy.max",
"keras.layers.Dense",
"HandEvaluator.HandEvaluator",
"keras.models.Sequential",
"numpy.vstack"
] |
[((1284, 1299), 'HandEvaluator.HandEvaluator', 'HandEvaluator', ([], {}), '()\n', (1297, 1299), False, 'from HandEvaluator import HandEvaluator\n'), ((1795, 1807), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1805, 1807), False, 'from keras.models import Sequential\n'), ((2234, 2246), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2244, 2246), False, 'from keras.models import Sequential\n'), ((2361, 2373), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2371, 2373), False, 'from keras.models import Sequential\n'), ((9613, 9628), 'HandEvaluator.HandEvaluator', 'HandEvaluator', ([], {}), '()\n', (9626, 9628), False, 'from HandEvaluator import HandEvaluator\n'), ((1492, 1516), 'keras.models.load_model', 'load_model', (['restore_from'], {}), '(restore_from)\n', (1502, 1516), False, 'from keras.models import load_model\n'), ((1835, 1909), 'keras.layers.LSTM', 'LSTM', (['(12)'], {'return_sequences': '(True)', 'input_shape': '(TIMESTEPS, TEMPORAL_DATADIM)'}), '(12, return_sequences=True, input_shape=(TIMESTEPS, TEMPORAL_DATADIM))\n', (1839, 1909), False, 'from keras.layers import LSTM, Dense, Merge\n'), ((2010, 2041), 'keras.layers.LSTM', 'LSTM', (['(12)'], {'return_sequences': '(True)'}), '(12, return_sequences=True)\n', (2014, 2041), False, 'from keras.layers import LSTM, Dense, Merge\n'), ((2119, 2127), 'keras.layers.LSTM', 'LSTM', (['(12)'], {}), '(12)\n', (2123, 2127), False, 'from keras.layers import LSTM, Dense, Merge\n'), ((2272, 2325), 'keras.layers.Dense', 'Dense', (['(5)'], {'input_dim': 'STATIC_DATADIM', 'activation': '"""relu"""'}), "(5, input_dim=STATIC_DATADIM, activation='relu')\n", (2277, 2325), False, 'from keras.layers import LSTM, Dense, Merge\n'), ((2401, 2453), 'keras.layers.Merge', 'Merge', (['[temporal_model, static_model]'], {'mode': '"""concat"""'}), "([temporal_model, static_model], mode='concat')\n", (2406, 2453), False, 'from keras.layers import LSTM, Dense, Merge\n'), ((2482, 2510), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""relu"""'}), "(10, activation='relu')\n", (2487, 2510), False, 'from keras.layers import LSTM, Dense, Merge\n'), ((2539, 2566), 'keras.layers.Dense', 'Dense', (['(3)'], {'activation': '"""relu"""'}), "(3, activation='relu')\n", (2544, 2566), False, 'from keras.layers import LSTM, Dense, Merge\n'), ((2595, 2624), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (2600, 2624), False, 'from keras.layers import LSTM, Dense, Merge\n'), ((7056, 7087), 'numpy.vstack', 'np.vstack', (['(Q_in_temporal, q_i)'], {}), '((Q_in_temporal, q_i))\n', (7065, 7087), True, 'import numpy as np\n'), ((7116, 7149), 'numpy.vstack', 'np.vstack', (['(Q_in_static, a[i][1])'], {}), '((Q_in_static, a[i][1]))\n', (7125, 7149), True, 'import numpy as np\n'), ((8932, 8947), 'random.random', 'random.random', ([], {}), '()\n', (8945, 8947), True, 'import random as random\n'), ((8999, 9015), 'numpy.argmax', 'np.argmax', (['Qvals'], {}), '(Qvals)\n', (9008, 9015), True, 'import numpy as np\n'), ((9107, 9144), 'random.randint', 'random.randint', (['(0)', '(Qvals.shape[0] - 1)'], {}), '(0, Qvals.shape[0] - 1)\n', (9121, 9144), True, 'import random as random\n'), ((10580, 10652), 'numpy.asarray', 'np.asarray', (['[win_pct_discard_none, win_pct_discard_1, win_pct_discard_2]'], {}), '([win_pct_discard_none, win_pct_discard_1, win_pct_discard_2])\n', (10590, 10652), True, 'import numpy as np\n'), ((10706, 10725), 'numpy.argmax', 'np.argmax', (['prob_vec'], {}), '(prob_vec)\n', (10715, 10725), True, 'import numpy as np\n'), ((3911, 3947), 'numpy.asarray', 'np.asarray', (['[[win_pct_discard_none]]'], {}), '([[win_pct_discard_none]])\n', (3921, 3947), True, 'import numpy as np\n'), ((3991, 4024), 'numpy.asarray', 'np.asarray', (['[[win_pct_discard_1]]'], {}), '([[win_pct_discard_1]])\n', (4001, 4024), True, 'import numpy as np\n'), ((4068, 4101), 'numpy.asarray', 'np.asarray', (['[[win_pct_discard_2]]'], {}), '([[win_pct_discard_2]])\n', (4078, 4101), True, 'import numpy as np\n'), ((6835, 6858), 'numpy.vstack', 'np.vstack', (['(S, a[0][0])'], {}), '((S, a[0][0]))\n', (6844, 6858), True, 'import numpy as np\n'), ((7987, 8000), 'numpy.max', 'np.max', (['Qvals'], {}), '(Qvals)\n', (7993, 8000), True, 'import numpy as np\n'), ((11577, 11601), 'random.randint', 'random.randint', (['(170)', '(200)'], {}), '(170, 200)\n', (11591, 11601), True, 'import random as random\n'), ((12247, 12285), 'numpy.sum', 'np.sum', (['bot.temporal_feature_matrix[0]'], {}), '(bot.temporal_feature_matrix[0])\n', (12253, 12285), True, 'import numpy as np\n'), ((3668, 3733), 'numpy.asarray', 'np.asarray', (['[last_in_pot_hero, last_in_pot_villain, street, 0, 0]'], {}), '([last_in_pot_hero, last_in_pot_villain, street, 0, 0])\n', (3678, 3733), True, 'import numpy as np\n'), ((3774, 3839), 'numpy.asarray', 'np.asarray', (['[last_in_pot_hero, last_in_pot_villain, street, 1, 0]'], {}), '([last_in_pot_hero, last_in_pot_villain, street, 1, 0])\n', (3784, 3839), True, 'import numpy as np\n'), ((5184, 5213), 'numpy.asarray', 'np.asarray', (['[[showdown_prob]]'], {}), '([[showdown_prob]])\n', (5194, 5213), True, 'import numpy as np\n'), ((6977, 7000), 'numpy.vstack', 'np.vstack', (['(S, a[i][0])'], {}), '((S, a[i][0]))\n', (6986, 7000), True, 'import numpy as np\n'), ((11697, 11721), 'random.randint', 'random.randint', (['(100)', '(169)'], {}), '(100, 169)\n', (11711, 11721), True, 'import random as random\n'), ((5648, 5677), 'numpy.asarray', 'np.asarray', (['[[showdown_prob]]'], {}), '([[showdown_prob]])\n', (5658, 5677), True, 'import numpy as np\n'), ((8192, 8210), 'numpy.asarray', 'np.asarray', (['target'], {}), '(target)\n', (8202, 8210), True, 'import numpy as np\n'), ((11817, 11840), 'random.randint', 'random.randint', (['(50)', '(100)'], {}), '(50, 100)\n', (11831, 11840), True, 'import random as random\n'), ((5069, 5134), 'numpy.asarray', 'np.asarray', (['[last_in_pot_hero, last_in_pot_villain, street, 0, 0]'], {}), '([last_in_pot_hero, last_in_pot_villain, street, 0, 0])\n', (5079, 5134), True, 'import numpy as np\n'), ((6206, 6235), 'numpy.asarray', 'np.asarray', (['[[showdown_prob]]'], {}), '([[showdown_prob]])\n', (6216, 6235), True, 'import numpy as np\n'), ((6571, 6600), 'numpy.asarray', 'np.asarray', (['[[showdown_prob]]'], {}), '([[showdown_prob]])\n', (6581, 6600), True, 'import numpy as np\n'), ((11935, 11957), 'random.randint', 'random.randint', (['(10)', '(50)'], {}), '(10, 50)\n', (11949, 11957), True, 'import random as random\n'), ((5541, 5598), 'numpy.asarray', 'np.asarray', (['[call_amt, last_in_pot_villain, street, 0, 0]'], {}), '([call_amt, last_in_pot_villain, street, 0, 0])\n', (5551, 5598), True, 'import numpy as np\n')]
|
import os
import numpy as np
import h5py
from .utils import timestamp2array, timestamp2vec_origin, transtr, transtrlong, transtr24
def external_taxibj(datapath, fourty_eight, previous_meteorol):
def f(tsx, tsy, ext_time):
exd = ExtDat(datapath)
tsx = np.asarray([exd.get_bjextarray(N, ext_time, fourty_eight=fourty_eight) for N in tsx]) # N * len_seq
tsy = exd.get_bjextarray(tsy, ext_time, fourty_eight=fourty_eight, previous_meteorol=previous_meteorol) # N
print('there are totally', exd.tot_holiday, 'holidays in constructed data')
return tsx, tsy
return f
def external_bikenyc():
def f(tsx, tsy, ext_time):
timestampfunc = timestamp2array if ext_time else timestamp2vec_origin
tsx = np.asarray([timestampfunc(N) for N in tsx])
tsy = timestampfunc(tsy)
return tsx, tsy
return f
def external_taxinyc(datapath, fourty_eight, previous_meteorol):
def f(tsx, tsy, ext_time):
exd = ExtDat(datapath, dataset='TaxiNYC')
tsx = np.asarray([exd.get_bjextarray(N, ext_time, fourty_eight=fourty_eight) for N in tsx]) # N * len_seq
tsy = exd.get_bjextarray(tsy, ext_time, fourty_eight=fourty_eight, previous_meteorol=previous_meteorol) # N
print('there are totally', exd.tot_holiday, 'holidays in constructed data')
return tsx, tsy
return f
class ExtDat:
def __init__(self, datapath, dataset='TaxiBJ'):
self.tot_holiday = 0
self.holidayfname = os.path.join(datapath, dataset, 'Holiday.txt')
f = open(self.holidayfname, 'r')
holidays = f.readlines()
self.holidays = set([h.strip() for h in holidays])
'''
timeslots: the predicted timeslots
In real-world, we dont have the meteorol data in the predicted timeslot, instead, we use the meteoral at
previous timeslots, i.e., slot = predicted_slot - timeslot (you can use predicted meteorol data as well)
'''
fname = os.path.join(datapath, dataset, 'Meteorology.h5')
f = h5py.File(fname, 'r')
timeslots = f['date'].value
wind_speed = f['WindSpeed'].value
weather = f['Weather'].value
temperature = f['Temperature'].value
f.close()
self.M = dict() # map timeslot to index
for i, slot in enumerate(timeslots):
self.M[slot.decode()] = i
ws = [] # WindSpeed
wr = [] # Weather
te = [] # Temperature
for slot in timeslots:
cur_id = self.M[slot.decode()]
ws.append(wind_speed[cur_id])
wr.append(weather[cur_id])
te.append(temperature[cur_id])
ws = np.asarray(ws)
wr = np.asarray(wr)
te = np.asarray(te)
# 0-1 scale
ws = 1. * (ws - ws.min()) / (ws.max() - ws.min())
te = 1. * (te - te.min()) / (te.max() - te.min())
print("meteor shape: ", ws.shape, wr.shape, te.shape)
# concatenate all these attributes
self.meteor_data = np.hstack([wr, ws[:, None], te[:, None]])
def get_bjextarray(self, timestamp_list, ext_time, fourty_eight=False, previous_meteorol=False):
vecs_timestamp = timestamp2array(timestamp_list, fourty_eight) if ext_time else timestamp2vec_origin(timestamp_list)
bits_holiday = self.get_holidayarray(timestamp_list)
vecs_meteorol = self.get_meteorolarray(timestamp_list, previous_meteorol, fourty_eight)
return np.hstack([vecs_timestamp, bits_holiday, vecs_meteorol])
def get_holidayarray(self, timeslots):
h = [0 for _ in range(len(timeslots))]
for i, slot in enumerate(timeslots):
transformat = transtr(slot)
if transformat in self.holidays:
h[i] = 1
self.tot_holiday += 1
return np.vstack(h)
def get_meteorolarray(self, timestamp_list, previous_meteorol, fourty_eight):
if fourty_eight:
return np.array(
[
self.meteor_data[
self.M[transtrlong(ts-np.timedelta64(30, 'm') if previous_meteorol else ts)]
] for ts in timestamp_list
]
)
else:
return np.array(
[
self.meteor_data[
self.M[transtr24(ts-np.timedelta64(60, 'm') if previous_meteorol else ts)]
] for ts in timestamp_list
]
)
|
[
"h5py.File",
"numpy.asarray",
"numpy.hstack",
"numpy.timedelta64",
"os.path.join",
"numpy.vstack"
] |
[((1507, 1553), 'os.path.join', 'os.path.join', (['datapath', 'dataset', '"""Holiday.txt"""'], {}), "(datapath, dataset, 'Holiday.txt')\n", (1519, 1553), False, 'import os\n'), ((1997, 2046), 'os.path.join', 'os.path.join', (['datapath', 'dataset', '"""Meteorology.h5"""'], {}), "(datapath, dataset, 'Meteorology.h5')\n", (2009, 2046), False, 'import os\n'), ((2059, 2080), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (2068, 2080), False, 'import h5py\n'), ((2691, 2705), 'numpy.asarray', 'np.asarray', (['ws'], {}), '(ws)\n', (2701, 2705), True, 'import numpy as np\n'), ((2719, 2733), 'numpy.asarray', 'np.asarray', (['wr'], {}), '(wr)\n', (2729, 2733), True, 'import numpy as np\n'), ((2747, 2761), 'numpy.asarray', 'np.asarray', (['te'], {}), '(te)\n', (2757, 2761), True, 'import numpy as np\n'), ((3032, 3073), 'numpy.hstack', 'np.hstack', (['[wr, ws[:, None], te[:, None]]'], {}), '([wr, ws[:, None], te[:, None]])\n', (3041, 3073), True, 'import numpy as np\n'), ((3474, 3530), 'numpy.hstack', 'np.hstack', (['[vecs_timestamp, bits_holiday, vecs_meteorol]'], {}), '([vecs_timestamp, bits_holiday, vecs_meteorol])\n', (3483, 3530), True, 'import numpy as np\n'), ((3830, 3842), 'numpy.vstack', 'np.vstack', (['h'], {}), '(h)\n', (3839, 3842), True, 'import numpy as np\n'), ((4082, 4105), 'numpy.timedelta64', 'np.timedelta64', (['(30)', '"""m"""'], {}), "(30, 'm')\n", (4096, 4105), True, 'import numpy as np\n'), ((4359, 4382), 'numpy.timedelta64', 'np.timedelta64', (['(60)', '"""m"""'], {}), "(60, 'm')\n", (4373, 4382), True, 'import numpy as np\n')]
|
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import treecorr
import os
import coord
from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog
def test_log_binning():
import math
# Test some basic properties of the base class
def check_arrays(nnn):
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.ubin_size * nnn.nubins, nnn.max_u-nnn.min_u)
np.testing.assert_almost_equal(nnn.vbin_size * nnn.nvbins, nnn.max_v-nnn.min_v)
#print('logr = ',nnn.logr1d)
np.testing.assert_equal(nnn.logr1d.shape, (nnn.nbins,) )
np.testing.assert_almost_equal(nnn.logr1d[0], math.log(nnn.min_sep) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr1d[-1], math.log(nnn.max_sep) - 0.5*nnn.bin_size)
np.testing.assert_equal(nnn.logr.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.logr[:,0,0], nnn.logr1d)
np.testing.assert_almost_equal(nnn.logr[:,-1,-1], nnn.logr1d)
assert len(nnn.logr) == nnn.nbins
#print('u = ',nnn.u1d)
np.testing.assert_equal(nnn.u1d.shape, (nnn.nubins,) )
np.testing.assert_almost_equal(nnn.u1d[0], nnn.min_u + 0.5*nnn.ubin_size)
np.testing.assert_almost_equal(nnn.u1d[-1], nnn.max_u - 0.5*nnn.ubin_size)
np.testing.assert_equal(nnn.u.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.u[0,:,0], nnn.u1d)
np.testing.assert_almost_equal(nnn.u[-1,:,-1], nnn.u1d)
#print('v = ',nnn.v1d)
np.testing.assert_equal(nnn.v1d.shape, (2*nnn.nvbins,) )
np.testing.assert_almost_equal(nnn.v1d[0], -nnn.max_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[-1], nnn.max_v - 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins], nnn.min_v + 0.5*nnn.vbin_size)
np.testing.assert_almost_equal(nnn.v1d[nnn.nvbins-1], -nnn.min_v - 0.5*nnn.vbin_size)
np.testing.assert_equal(nnn.v.shape, (nnn.nbins, nnn.nubins, 2*nnn.nvbins) )
np.testing.assert_almost_equal(nnn.v[0,0,:], nnn.v1d)
np.testing.assert_almost_equal(nnn.v[-1,-1,:], nnn.v1d)
def check_defaultuv(nnn):
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == np.ceil(1./nnn.ubin_size)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == np.ceil(1./nnn.vbin_size)
# Check the different ways to set up the binning:
# Omit bin_size
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, bin_type='LogRUV')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, n for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20,
min_u=0.2, max_u=0.9, nubins=12,
min_v=0., max_v=0.2, nvbins=2)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 12
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 2
check_arrays(nnn)
# Omit min_sep
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify max, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(max_sep=20, nbins=20, bin_size=0.1,
max_u=0.9, nubins=3, ubin_size=0.05,
max_v=0.4, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.max_sep == 20.
assert nnn.nbins == 20
assert np.isclose(nnn.ubin_size, 0.05)
assert np.isclose(nnn.min_u, 0.75)
assert nnn.max_u == 0.9
assert nnn.nubins == 3
assert np.isclose(nnn.vbin_size, 0.05)
assert np.isclose(nnn.min_v, 0.2)
assert nnn.max_v == 0.4
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit max_sep
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size == 0.1
assert nnn.min_sep == 5.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, n, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=20, bin_size=0.1,
min_u=0.7, nubins=4, ubin_size=0.05,
min_v=0.2, nvbins=4, vbin_size=0.05)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.bin_size == 0.1
assert nnn.nbins == 20
assert nnn.min_u == 0.7
assert np.isclose(nnn.ubin_size, 0.05)
assert nnn.nubins == 4
assert nnn.min_v == 0.2
assert nnn.max_v == 0.4
assert np.isclose(nnn.vbin_size, 0.05)
assert nnn.nvbins == 4
check_arrays(nnn)
# Omit nbins
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
check_defaultuv(nnn)
check_arrays(nnn)
# Specify min, max, bs for u,v too.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, max_u=0.9, ubin_size=0.03,
min_v=0.1, max_v=0.3, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.bin_size <= 0.1
assert nnn.min_u == 0.2
assert nnn.max_u == 0.9
assert nnn.nubins == 24
assert np.isclose(nnn.ubin_size, 0.7/24)
assert nnn.min_v == 0.1
assert nnn.max_v == 0.3
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only one of min/max v are set, respect that
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.2, ubin_size=0.03,
min_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.2
assert nnn.max_u == 1.
assert nnn.nubins == 27
assert np.isclose(nnn.ubin_size, 0.8/27)
assert nnn.min_v == 0.2
assert nnn.max_v == 1.
assert nnn.nvbins == 12
assert np.isclose(nnn.vbin_size, 0.8/12)
check_arrays(nnn)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
max_u=0.2, ubin_size=0.03,
max_v=0.2, vbin_size=0.07)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.min_u == 0.
assert nnn.max_u == 0.2
assert nnn.nubins == 7
assert np.isclose(nnn.ubin_size, 0.2/7)
assert nnn.min_v == 0.
assert nnn.max_v == 0.2
assert nnn.nvbins == 3
assert np.isclose(nnn.vbin_size, 0.2/3)
check_arrays(nnn)
# If only vbin_size is set for v, automatically figure out others.
# (And if necessary adjust the bin_size down a bit.)
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.3, vbin_size=0.3)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 4
assert np.isclose(nnn.ubin_size, 0.25)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 4
assert np.isclose(nnn.vbin_size, 0.25)
check_arrays(nnn)
# If only nvbins is set for v, automatically figure out others.
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
nubins=5, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.min_u == 0.
assert nnn.max_u == 1.
assert nnn.nubins == 5
assert np.isclose(nnn.ubin_size,0.2)
assert nnn.min_v == 0.
assert nnn.max_v == 1.
assert nnn.nvbins == 5
assert np.isclose(nnn.vbin_size,0.2)
check_arrays(nnn)
# If both nvbins and vbin_size are set, set min/max automatically
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, bin_size=0.1,
ubin_size=0.1, nubins=5,
vbin_size=0.1, nvbins=5)
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
assert nnn.bin_size <= 0.1
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.ubin_size == 0.1
assert nnn.nubins == 5
assert nnn.max_u == 1.
assert np.isclose(nnn.min_u,0.5)
assert nnn.vbin_size == 0.1
assert nnn.nvbins == 5
assert nnn.min_v == 0.
assert np.isclose(nnn.max_v,0.5)
check_arrays(nnn)
assert_raises(TypeError, treecorr.NNNCorrelation)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, bin_size=0.1)
assert_raises(TypeError, treecorr.NNNCorrelation, max_sep=20, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, bin_size=0.1, nbins=20)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, bin_size=0.1)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Log')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Linear')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='TwoD')
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
bin_type='Invalid')
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.3, max_u = 0.9, ubin_size=0.1, nubins=6)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.9, max_u = 0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=-0.1, max_u = 0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_u=0.1, max_u = 1.3)
assert_raises(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v = 0.9, vbin_size=0.1, nvbins=9)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.9, max_v = 0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=-0.1, max_v = 0.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20, bin_size=0.1,
min_v=0.1, max_v = 1.3)
assert_raises(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5, nbins=20,
split_method='invalid')
# Check the use of sep_units
# radians
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='radians')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5.)
np.testing.assert_almost_equal(nnn._max_sep, 20.)
assert nnn.min_sep == 5.
assert nnn.max_sep == 20.
assert nnn.nbins == 20
check_defaultuv(nnn)
check_arrays(nnn)
# arcsec
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/3600)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/3600)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
# Note that logr is in the separation units, not radians.
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# arcmin
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180/60)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180/60)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# degrees
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='degrees')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/180)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/180)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# hours
nnn = treecorr.NNNCorrelation(min_sep=5, max_sep=20, nbins=20, sep_units='hours')
#print(nnn.min_sep,nnn.max_sep,nnn.bin_size,nnn.nbins)
#print(nnn.min_u,nnn.max_u,nnn.ubin_size,nnn.nubins)
#print(nnn.min_v,nnn.max_v,nnn.vbin_size,nnn.nvbins)
np.testing.assert_almost_equal(nnn.min_sep, 5.)
np.testing.assert_almost_equal(nnn.max_sep, 20.)
np.testing.assert_almost_equal(nnn._min_sep, 5. * math.pi/12)
np.testing.assert_almost_equal(nnn._max_sep, 20. * math.pi/12)
assert nnn.nbins == 20
np.testing.assert_almost_equal(nnn.bin_size * nnn.nbins, math.log(nnn.max_sep/nnn.min_sep))
np.testing.assert_almost_equal(nnn.logr[0], math.log(5) + 0.5*nnn.bin_size)
np.testing.assert_almost_equal(nnn.logr[-1], math.log(20) - 0.5*nnn.bin_size)
assert len(nnn.logr) == nnn.nbins
check_defaultuv(nnn)
# Check bin_slop
# Start with default behavior
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Explicitly set bin_slop=1.0 does the same thing.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# Use a smaller bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.2,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.2
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.02)
np.testing.assert_almost_equal(nnn.bu, 0.006)
np.testing.assert_almost_equal(nnn.bv, 0.014)
# Use bin_slop == 0
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 0.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.0)
np.testing.assert_almost_equal(nnn.bu, 0.0)
np.testing.assert_almost_equal(nnn.bv, 0.0)
# Bigger bin_slop
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.1, bin_slop=2.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 2.0
assert nnn.bin_size == 0.1
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.2)
np.testing.assert_almost_equal(nnn.bu, 0.06)
np.testing.assert_almost_equal(nnn.bv, 0.14)
# With bin_size > 0.1, explicit bin_slop=1.0 is accepted.
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4, bin_slop=1.0,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07, verbose=0)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_slop == 1.0
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.4)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
# But implicit bin_slop is reduced so that b = 0.1
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.4,
min_u=0., max_u=0.9, ubin_size=0.03,
min_v=0., max_v=0.21, vbin_size=0.07)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.4
assert np.isclose(nnn.ubin_size, 0.03)
assert np.isclose(nnn.vbin_size, 0.07)
np.testing.assert_almost_equal(nnn.b, 0.1)
np.testing.assert_almost_equal(nnn.bu, 0.03)
np.testing.assert_almost_equal(nnn.bv, 0.07)
np.testing.assert_almost_equal(nnn.bin_slop, 0.25)
# Separately for each of the three parameters
nnn = treecorr.NNNCorrelation(min_sep=5, nbins=14, bin_size=0.05,
min_u=0., max_u=0.9, ubin_size=0.3,
min_v=0., max_v=0.17, vbin_size=0.17)
#print(nnn.bin_size,nnn.bin_slop,nnn.b)
#print(nnn.ubin_size,nnn.bu)
#print(nnn.vbin_size,nnn.bv)
assert nnn.bin_size == 0.05
assert np.isclose(nnn.ubin_size, 0.3)
assert np.isclose(nnn.vbin_size, 0.17)
np.testing.assert_almost_equal(nnn.b, 0.05)
np.testing.assert_almost_equal(nnn.bu, 0.1)
np.testing.assert_almost_equal(nnn.bv, 0.1)
np.testing.assert_almost_equal(nnn.bin_slop, 1.0) # The stored bin_slop is just for lnr
def is_ccw(x1,y1, x2,y2, x3,y3):
# Calculate the cross product of 1->2 with 1->3
x2 -= x1
x3 -= x1
y2 -= y1
y3 -= y1
return x2*y3-x3*y2 > 0.
def test_direct_count_auto():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if bin_slop=0.
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
ccw = True
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk;
ccw = is_ccw(x[i],y[i],x[j],y[j],x[k],y[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik;
ccw = is_ccw(x[j],y[j],x[i],y[i],x[k],y[k])
else:
d3 = djk; d2 = dij; d1 = dik;
ccw = is_ccw(x[j],y[j],x[k],y[k],x[i],y[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk;
ccw = is_ccw(x[i],y[i],x[k],y[k],x[j],y[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij;
ccw = is_ccw(x[k],y[k],x[i],y[i],x[j],y[j])
else:
d3 = djk; d2 = dik; d1 = dij;
ccw = is_ccw(x[k],y[k],x[j],y[j],x[i],y[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
nz = np.where((ddd.ntri > 0) | (true_ntri > 0))
print('non-zero at:')
print(nz)
print('d1 = ',ddd.meand1[nz])
print('d2 = ',ddd.meand2[nz])
print('d3 = ',ddd.meand3[nz])
print('rnom = ',ddd.rnom[nz])
print('u = ',ddd.u[nz])
print('v = ',ddd.v[nz])
print('ddd.ntri = ',ddd.ntri[nz])
print('true_ntri = ',true_ntri[nz])
print('diff = ',ddd.ntri[nz] - true_ntri[nz])
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Check that running via the corr3 script works correctly.
file_name = os.path.join('data','nnn_direct_data.dat')
with open(file_name, 'w') as fid:
for i in range(ngal):
fid.write(('%.20f %.20f\n')%(x[i],y[i]))
L = 10*s
nrand = ngal
rx = (rng.random_sample(nrand)-0.5) * L
ry = (rng.random_sample(nrand)-0.5) * L
rcat = treecorr.Catalog(x=rx, y=ry)
rand_file_name = os.path.join('data','nnn_direct_rand.dat')
with open(rand_file_name, 'w') as fid:
for i in range(nrand):
fid.write(('%.20f %.20f\n')%(rx[i],ry[i]))
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=0)
rrr.process(rcat)
zeta, varzeta = ddd.calculateZeta(rrr)
# First do this via the corr3 function.
config = treecorr.config.read_config('configs/nnn_direct.yaml')
logger = treecorr.config.setup_logger(0)
treecorr.corr3(config, logger)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
print('corr3_output = ',corr3_output)
print('corr3_output.dtype = ',corr3_output.dtype)
print('rnom = ',ddd.rnom.flatten())
print(' ',corr3_output['r_nom'])
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
print('unom = ',ddd.u.flatten())
print(' ',corr3_output['u_nom'])
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
print('vnom = ',ddd.v.flatten())
print(' ',corr3_output['v_nom'])
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
print('DDD = ',ddd.ntri.flatten())
print(' ',corr3_output['DDD'])
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
print('RRR = ',rrr.ntri.flatten())
print(' ',corr3_output['RRR'])
np.testing.assert_allclose(corr3_output['RRR'], rrr.ntri.flatten(), rtol=1.e-3)
print('zeta = ',zeta.flatten())
print('from corr3 output = ',corr3_output['zeta'])
print('diff = ',corr3_output['zeta']-zeta.flatten())
diff_index = np.where(np.abs(corr3_output['zeta']-zeta.flatten()) > 1.e-5)[0]
print('different at ',diff_index)
print('zeta[diffs] = ',zeta.flatten()[diff_index])
print('corr3.zeta[diffs] = ',corr3_output['zeta'][diff_index])
print('diff[diffs] = ',zeta.flatten()[diff_index] - corr3_output['zeta'][diff_index])
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Now calling out to the external corr3 executable.
# This is the only time we test the corr3 executable. All other tests use corr3 function.
import subprocess
corr3_exe = get_script_name('corr3')
p = subprocess.Popen( [corr3_exe,"configs/nnn_direct.yaml","verbose=0"] )
p.communicate()
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct.out'), names=True,
skip_header=1)
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Repeat with binslop = 0, since the code flow is different from bture=True
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# This should be equivalent to processing a cross correlation with each catalog being
# the same thing.
ddd.clear()
ddd.process(cat,cat,cat, num_threads=2)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Invalid to omit file_name
config['verbose'] = 0
del config['file_name']
with assert_raises(TypeError):
treecorr.corr3(config)
config['file_name'] = 'data/nnn_direct_data.dat'
# OK to not have rand_file_name
# Also, check the automatic setting of output_dots=True when verbose=2.
# It's not too annoying if we also set max_top = 0.
del config['rand_file_name']
config['verbose'] = 2
config['max_top'] = 0
treecorr.corr3(config)
data = np.genfromtxt(config['nnn_file_name'], names=True, skip_header=1)
np.testing.assert_array_equal(data['ntri'], true_ntri.flatten())
assert 'zeta' not in data.dtype.names
# Check a few basic operations with a GGCorrelation object.
do_pickle(ddd)
ddd2 = ddd.copy()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, 2*ddd.ntri)
np.testing.assert_allclose(ddd2.weight, 2*ddd.weight)
np.testing.assert_allclose(ddd2.meand1, 2*ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, 2*ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, 2*ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, 2*ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, 2*ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, 2*ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, 2*ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, 2*ddd.meanv)
ddd2.clear()
ddd2 += ddd
np.testing.assert_allclose(ddd2.ntri, ddd.ntri)
np.testing.assert_allclose(ddd2.weight, ddd.weight)
np.testing.assert_allclose(ddd2.meand1, ddd.meand1)
np.testing.assert_allclose(ddd2.meand2, ddd.meand2)
np.testing.assert_allclose(ddd2.meand3, ddd.meand3)
np.testing.assert_allclose(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd2.meanu, ddd.meanu)
np.testing.assert_allclose(ddd2.meanv, ddd.meanv)
ascii_name = 'output/nnn_ascii.txt'
ddd.write(ascii_name, precision=16)
ddd3 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd3.read(ascii_name)
np.testing.assert_allclose(ddd3.ntri, ddd.ntri)
np.testing.assert_allclose(ddd3.weight, ddd.weight)
np.testing.assert_allclose(ddd3.meand1, ddd.meand1)
np.testing.assert_allclose(ddd3.meand2, ddd.meand2)
np.testing.assert_allclose(ddd3.meand3, ddd.meand3)
np.testing.assert_allclose(ddd3.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd3.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd3.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd3.meanu, ddd.meanu)
np.testing.assert_allclose(ddd3.meanv, ddd.meanv)
with assert_raises(TypeError):
ddd2 += config
ddd4 = treecorr.NNNCorrelation(min_sep=min_sep/2, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd4
ddd5 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep*2, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd5
ddd6 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins*2,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd6
ddd7 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u-0.1, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd7
ddd8 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u+0.1, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd8
ddd9 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins*2,
min_v=min_v, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd9
ddd10 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v-0.1, max_v=max_v, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd10
ddd11 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v+0.1, nvbins=nvbins)
with assert_raises(ValueError):
ddd2 += ddd11
ddd12 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins*2)
with assert_raises(ValueError):
ddd2 += ddd12
# Check that adding results with different coords or metric emits a warning.
cat2 = treecorr.Catalog(x=x, y=y, z=x)
with CaptureLog() as cl:
ddd13 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd13.process_auto(cat2)
ddd13 += ddd2
print(cl.output)
assert "Detected a change in catalog coordinate systems" in cl.output
with CaptureLog() as cl:
ddd14 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
logger=cl.logger)
ddd14.process_auto(cat2, metric='Arc')
ddd14 += ddd2
assert "Detected a change in metric" in cl.output
# Check fits I/O
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
fits_name = 'output/nnn_fits.fits'
ddd.write(fits_name)
ddd15 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins)
ddd15.read(fits_name)
np.testing.assert_allclose(ddd15.ntri, ddd.ntri)
np.testing.assert_allclose(ddd15.weight, ddd.weight)
np.testing.assert_allclose(ddd15.meand1, ddd.meand1)
np.testing.assert_allclose(ddd15.meand2, ddd.meand2)
np.testing.assert_allclose(ddd15.meand3, ddd.meand3)
np.testing.assert_allclose(ddd15.meanlogd1, ddd.meanlogd1)
np.testing.assert_allclose(ddd15.meanlogd2, ddd.meanlogd2)
np.testing.assert_allclose(ddd15.meanlogd3, ddd.meanlogd3)
np.testing.assert_allclose(ddd15.meanu, ddd.meanu)
np.testing.assert_allclose(ddd15.meanv, ddd.meanv)
def test_direct_count_cross():
# If the catalogs are small enough, we can do a direct count of the number of triangles
# to see if comes out right. This should exactly match the treecorr code if brute=True
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1 = treecorr.Catalog(x=x1, y=y1)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2 = treecorr.Catalog(x=x2, y=y2)
x3 = rng.normal(0,s, (ngal,) )
y3 = rng.normal(0,s, (ngal,) )
cat3 = treecorr.Catalog(x=x3, y=y3)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
d3 = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
d2 = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
d1 = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if d3 == 0.: continue
if d2 == 0.: continue
if d1 == 0.: continue
if d1 < d2 or d2 < d3: continue;
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
#print('true_ntri = ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_cross.yaml')
cat1.write(config['file_name'])
cat2.write(config['file_name2'])
cat3.write(config['file_name3'])
L = 10*s
nrand = ngal
for rname in ['rand_file_name', 'rand_file_name2', 'rand_file_name3']:
rx = (rng.random_sample(nrand)-0.5) * L
ry = (rng.random_sample(nrand)-0.5) * L
rcat = treecorr.Catalog(x=rx, y=ry)
rcat.write(config[rname])
config = treecorr.config.read_config('configs/nnn_direct_cross.yaml')
config['verbose'] = 0
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_direct_cross.out'), names=True,
skip_header=1)
print('corr3_output = ',corr3_output)
print('corr3_output.dtype = ',corr3_output.dtype)
print('rnom = ',ddd.rnom.flatten())
print(' ',corr3_output['r_nom'])
np.testing.assert_allclose(corr3_output['r_nom'], ddd.rnom.flatten(), rtol=1.e-3)
print('unom = ',ddd.u.flatten())
print(' ',corr3_output['u_nom'])
np.testing.assert_allclose(corr3_output['u_nom'], ddd.u.flatten(), rtol=1.e-3)
print('vnom = ',ddd.v.flatten())
print(' ',corr3_output['v_nom'])
np.testing.assert_allclose(corr3_output['v_nom'], ddd.v.flatten(), rtol=1.e-3)
print('DDD = ',ddd.ntri.flatten())
print(' ',corr3_output['DDD'])
np.testing.assert_allclose(corr3_output['DDD'], ddd.ntri.flatten(), rtol=1.e-3)
np.testing.assert_allclose(corr3_output['ntri'], ddd.ntri.flatten(), rtol=1.e-3)
# Invalid to have rand_file_name2 but not file_name2
del config['file_name2']
with assert_raises(TypeError):
treecorr.corr3(config)
config['file_name2'] = 'data/nnn_direct_cross_data2.dat'
# Invalid to have rand_file_name3 but not file_name3
del config['file_name3']
with assert_raises(TypeError):
treecorr.corr3(config)
config['file_name3'] = 'data/nnn_direct_cross_data3.dat'
# Invalid when doing rands, to be missing one rand_file_name2
del config['rand_file_name']
with assert_raises(TypeError):
treecorr.corr3(config)
config['rand_file_name'] = 'data/nnn_direct_cross_rand1.dat'
del config['rand_file_name2']
with assert_raises(TypeError):
treecorr.corr3(config)
config['rand_file_name2'] = 'data/nnn_direct_cross_rand2.dat'
del config['rand_file_name3']
with assert_raises(TypeError):
treecorr.corr3(config)
config['rand_file_name3'] = 'data/nnn_direct_cross_rand3.dat'
# Currently not implemented to only have cat2 or cat3
with assert_raises(NotImplementedError):
ddd.process(cat1, cat2=cat2)
with assert_raises(NotImplementedError):
ddd.process(cat1, cat3=cat3)
with assert_raises(NotImplementedError):
ddd.process_cross21(cat1, cat2)
del config['rand_file_name3']
del config['file_name3']
print('config = ',config)
with assert_raises(NotImplementedError):
treecorr.corr3(config)
config['file_name3'] = 'data/nnn_direct_cross_data3.dat'
config['rand_file_name3'] = 'data/nnn_direct_cross_rand3.dat'
del config['rand_file_name2']
del config['file_name2']
print('config = ',config)
with assert_raises(NotImplementedError):
treecorr.corr3(config)
config['file_name2'] = 'data/nnn_direct_cross_data2.dat'
config['rand_file_name2'] = 'data/nnn_direct_cross_rand2.dat'
def test_direct_spherical():
# Repeat in spherical coords
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 200 # Put everything at large y, so small angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
min_sep = 1.
bin_size = 0.2
nrbins = 10
nubins = 5
nvbins = 5
max_sep = min_sep * np.exp(nrbins * bin_size)
ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
sep_units='deg', brute=True)
ddd.process(cat, num_threads=2)
r = np.sqrt(x**2 + y**2 + z**2)
x /= r; y /= r; z /= r
north_pole = coord.CelestialCoord(0*coord.radians, 90*coord.degrees)
true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
rad_min_sep = min_sep * coord.degrees / coord.radians
rad_max_sep = max_sep * coord.degrees / coord.radians
c = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra, dec)]
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2 + (z[k]-z[i])**2)
d3, d2, d1 = sorted([d12, d23, d31])
rindex = np.floor(np.log(d2/rad_min_sep) / bin_size).astype(int)
if rindex < 0 or rindex >= nrbins: continue
if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
else: assert False
# Now use ii, jj, kk rather than i,j,k, to get the indices
# that correspond to the points in the right order.
u = d3/d2
v = (d1-d2)/d3
if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
v = -v
uindex = np.floor(u / bin_size).astype(int)
assert 0 <= uindex < nubins
vindex = np.floor((v+1) / bin_size).astype(int)
assert 0 <= vindex < 2*nvbins
www = w[i] * w[j] * w[k]
true_ntri[rindex,uindex,vindex] += 1
true_weight[rindex,uindex,vindex] += www
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_spherical.yaml')
cat.write(config['file_name'])
treecorr.corr3(config)
data = fitsio.read(config['nnn_file_name'])
np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten())
np.testing.assert_allclose(data['u_nom'], ddd.u.flatten())
np.testing.assert_allclose(data['v_nom'], ddd.v.flatten())
np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten())
np.testing.assert_allclose(data['DDD'], ddd.weight.flatten())
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
sep_units='deg', bin_slop=0, max_top=0)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
def test_direct_arc():
# Repeat the spherical test with metric='Arc'
ngal = 5
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 200 # Large angles this time.
z = rng.normal(0,s, (ngal,) )
w = rng.random_sample(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
min_sep = 1.
max_sep = 180.
nrbins = 50
nubins = 5
nvbins = 5
bin_size = np.log((max_sep / min_sep)) / nrbins
ubin_size = 0.2
vbin_size = 0.2
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,
nubins=nubins, ubin_size=ubin_size,
nvbins=nvbins, vbin_size=vbin_size,
sep_units='deg', brute=True)
ddd.process(cat, metric='Arc')
r = np.sqrt(x**2 + y**2 + z**2)
x /= r; y /= r; z /= r
north_pole = coord.CelestialCoord(0*coord.radians, 90*coord.degrees)
true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
c = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra, dec)]
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
d12 = c[i].distanceTo(c[j]) / coord.degrees
d23 = c[j].distanceTo(c[k]) / coord.degrees
d31 = c[k].distanceTo(c[i]) / coord.degrees
d3, d2, d1 = sorted([d12, d23, d31])
rindex = np.floor(np.log(d2/min_sep) / bin_size).astype(int)
if rindex < 0 or rindex >= nrbins: continue
if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
else: assert False
# Now use ii, jj, kk rather than i,j,k, to get the indices
# that correspond to the points in the right order.
u = d3/d2
v = (d1-d2)/d3
if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
v = -v
uindex = np.floor(u / ubin_size).astype(int)
assert 0 <= uindex < nubins
vindex = np.floor((v+1) / vbin_size).astype(int)
assert 0 <= vindex < 2*nvbins
www = w[i] * w[j] * w[k]
true_ntri[rindex,uindex,vindex] += 1
true_weight[rindex,uindex,vindex] += www
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check that running via the corr3 script works correctly.
config = treecorr.config.read_config('configs/nnn_direct_arc.yaml')
cat.write(config['file_name'])
treecorr.corr3(config)
data = fitsio.read(config['nnn_file_name'])
np.testing.assert_allclose(data['r_nom'], ddd.rnom.flatten())
np.testing.assert_allclose(data['u_nom'], ddd.u.flatten())
np.testing.assert_allclose(data['v_nom'], ddd.v.flatten())
np.testing.assert_allclose(data['ntri'], ddd.ntri.flatten())
np.testing.assert_allclose(data['DDD'], ddd.weight.flatten())
# Repeat with binslop = 0
# And don't do any top-level recursion so we actually test not going to the leaves.
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,
nubins=nubins, ubin_size=ubin_size,
nvbins=nvbins, vbin_size=vbin_size,
sep_units='deg', bin_slop=0, max_top=0)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
np.testing.assert_allclose(ddd.weight, true_weight, rtol=1.e-5, atol=1.e-8)
def test_direct_partial():
# Test the two ways to only use parts of a catalog:
ngal = 100
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(0,s, (ngal,) )
y1 = rng.normal(0,s, (ngal,) )
cat1a = treecorr.Catalog(x=x1, y=y1, first_row=28, last_row=84)
x2 = rng.normal(0,s, (ngal,) )
y2 = rng.normal(0,s, (ngal,) )
cat2a = treecorr.Catalog(x=x2, y=y2, first_row=48, last_row=99)
x3 = rng.normal(0,s, (ngal,) )
y3 = rng.normal(0,s, (ngal,) )
cat3a = treecorr.Catalog(x=x3, y=y3, first_row=22, last_row=67)
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddda = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
ddda.process(cat1a, cat2a, cat3a)
#print('ddda.ntri = ',ddda.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(27,84):
for j in range(47,99):
for k in range(21,67):
d3 = np.sqrt((x1[i]-x2[j])**2 + (y1[i]-y2[j])**2)
d2 = np.sqrt((x1[i]-x3[k])**2 + (y1[i]-y3[k])**2)
d1 = np.sqrt((x2[j]-x3[k])**2 + (y2[j]-y3[k])**2)
if d3 == 0.: continue
if d2 == 0.: continue
if d1 == 0.: continue
if d1 < d2 or d2 < d3: continue;
ccw = is_ccw(x1[i],y1[i],x2[j],y2[j],x3[k],y3[k])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
print('true_ntri = ',true_ntri)
print('diff = ',ddda.ntri - true_ntri)
np.testing.assert_array_equal(ddda.ntri, true_ntri)
# Now check that we get the same thing with all the points, but with w=0 for the ones
# we don't want.
w1 = np.zeros(ngal)
w1[27:84] = 1.
w2 = np.zeros(ngal)
w2[47:99] = 1.
w3 = np.zeros(ngal)
w3[21:67] = 1.
cat1b = treecorr.Catalog(x=x1, y=y1, w=w1)
cat2b = treecorr.Catalog(x=x2, y=y2, w=w2)
cat3b = treecorr.Catalog(x=x3, y=y3, w=w3)
dddb = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True)
dddb.process(cat1b, cat2b, cat3b)
#print('dddb.ntri = ',dddb.ntri)
#print('diff = ',dddb.ntri - true_ntri)
np.testing.assert_array_equal(dddb.ntri, true_ntri)
def is_ccw_3d(x1,y1,z1, x2,y2,z2, x3,y3,z3):
# Calculate the cross product of 1->2 with 1->3
x2 -= x1
x3 -= x1
y2 -= y1
y3 -= y1
z2 -= z1
z3 -= z1
# The cross product:
x = y2*z3-y3*z2
y = z2*x3-z3*x2
z = x2*y3-x3*y2
# ccw if the cross product is in the opposite direction of (x1,y1,z1) from (0,0,0)
return x*x1 + y*y1 + z*z1 < 0.
def test_direct_3d_auto():
# This is the same as the above test, but using the 3d correlations
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(312, s, (ngal,) )
y = rng.normal(728, s, (ngal,) )
z = rng.normal(-932, s, (ngal,) )
r = np.sqrt( x*x + y*y + z*z )
dec = np.arcsin(z/r)
ra = np.arctan2(y,x)
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(i+1,ngal):
for k in range(j+1,ngal):
dij = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
dik = np.sqrt((x[i]-x[k])**2 + (y[i]-y[k])**2 + (z[i]-z[k])**2)
djk = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
if dij == 0.: continue
if dik == 0.: continue
if djk == 0.: continue
ccw = True
if dij < dik:
if dik < djk:
d3 = dij; d2 = dik; d1 = djk;
ccw = is_ccw_3d(x[i],y[i],z[i],x[j],y[j],z[j],x[k],y[k],z[k])
elif dij < djk:
d3 = dij; d2 = djk; d1 = dik;
ccw = is_ccw_3d(x[j],y[j],z[j],x[i],y[i],z[i],x[k],y[k],z[k])
else:
d3 = djk; d2 = dij; d1 = dik;
ccw = is_ccw_3d(x[j],y[j],z[j],x[k],y[k],z[k],x[i],y[i],z[i])
else:
if dij < djk:
d3 = dik; d2 = dij; d1 = djk;
ccw = is_ccw_3d(x[i],y[i],z[i],x[k],y[k],z[k],x[j],y[j],z[j])
elif dik < djk:
d3 = dik; d2 = djk; d1 = dij;
ccw = is_ccw_3d(x[k],y[k],z[k],x[i],y[i],z[i],x[j],y[j],z[j])
else:
d3 = djk; d2 = dik; d1 = dij;
ccw = is_ccw_3d(x[k],y[k],z[k],x[j],y[j],z[j],x[i],y[i],z[i])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And compare to the cross correlation
ddd.clear()
ddd.process(cat,cat,cat)
#print('ddd.ntri = ',ddd.ntri)
#print('true_ntri => ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Also compare to using x,y,z rather than ra,dec,r
cat = treecorr.Catalog(x=x, y=y, z=z)
ddd.process(cat)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
def test_direct_3d_cross():
# This is the same as the above test, but using the 3d correlations
ngal = 50
s = 10.
rng = np.random.RandomState(8675309)
x1 = rng.normal(312, s, (ngal,) )
y1 = rng.normal(728, s, (ngal,) )
z1 = rng.normal(-932, s, (ngal,) )
r1 = np.sqrt( x1*x1 + y1*y1 + z1*z1 )
dec1 = np.arcsin(z1/r1)
ra1 = np.arctan2(y1,x1)
cat1 = treecorr.Catalog(ra=ra1, dec=dec1, r=r1, ra_units='rad', dec_units='rad')
x2 = rng.normal(312, s, (ngal,) )
y2 = rng.normal(728, s, (ngal,) )
z2 = rng.normal(-932, s, (ngal,) )
r2 = np.sqrt( x2*x2 + y2*y2 + z2*z2 )
dec2 = np.arcsin(z2/r2)
ra2 = np.arctan2(y2,x2)
cat2 = treecorr.Catalog(ra=ra2, dec=dec2, r=r2, ra_units='rad', dec_units='rad')
x3 = rng.normal(312, s, (ngal,) )
y3 = rng.normal(728, s, (ngal,) )
z3 = rng.normal(-932, s, (ngal,) )
r3 = np.sqrt( x3*x3 + y3*y3 + z3*z3 )
dec3 = np.arcsin(z3/r3)
ra3 = np.arctan2(y3,x3)
cat3 = treecorr.Catalog(ra=ra3, dec=dec3, r=r3, ra_units='rad', dec_units='rad')
min_sep = 1.
max_sep = 50.
nbins = 50
min_u = 0.13
max_u = 0.89
nubins = 10
min_v = 0.13
max_v = 0.59
nvbins = 10
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
brute=True, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('ddd.ntri = ',ddd.ntri)
log_min_sep = np.log(min_sep)
log_max_sep = np.log(max_sep)
true_ntri = np.zeros( (nbins, nubins, 2*nvbins) )
bin_size = (log_max_sep - log_min_sep) / nbins
ubin_size = (max_u-min_u) / nubins
vbin_size = (max_v-min_v) / nvbins
for i in range(ngal):
for j in range(ngal):
for k in range(ngal):
d1sq = (x2[j]-x3[k])**2 + (y2[j]-y3[k])**2 + (z2[j]-z3[k])**2
d2sq = (x1[i]-x3[k])**2 + (y1[i]-y3[k])**2 + (z1[i]-z3[k])**2
d3sq = (x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2
d1 = np.sqrt(d1sq)
d2 = np.sqrt(d2sq)
d3 = np.sqrt(d3sq)
if d3 == 0.: continue
if d2 == 0.: continue
if d1 == 0.: continue
if d1 < d2 or d2 < d3: continue;
ccw = is_ccw_3d(x1[i],y1[i],z1[i],x2[j],y2[j],z2[j],x3[k],y3[k],z3[k])
r = d2
u = d3/d2
v = (d1-d2)/d3
if r < min_sep or r >= max_sep: continue
if u < min_u or u >= max_u: continue
if v < min_v or v >= max_v: continue
if not ccw:
v = -v
kr = int(np.floor( (np.log(r)-log_min_sep) / bin_size ))
ku = int(np.floor( (u-min_u) / ubin_size ))
if v > 0:
kv = int(np.floor( (v-min_v) / vbin_size )) + nvbins
else:
kv = int(np.floor( (v-(-max_v)) / vbin_size ))
assert 0 <= kr < nbins
assert 0 <= ku < nubins
assert 0 <= kv < 2*nvbins
true_ntri[kr,ku,kv] += 1
#print('true_ntri = ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Repeat with binslop = 0
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1)
ddd.process(cat1, cat2, cat3)
#print('binslop > 0: ddd.ntri = ',ddd.ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# And again with no top-level recursion
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, nubins=nubins,
min_v=min_v, max_v=max_v, nvbins=nvbins,
bin_slop=0, verbose=1, max_top=0)
ddd.process(cat1, cat2, cat3)
#print('max_top = 0: ddd.ntri = ',ddd.ntri)
#print('true_ntri = ',true_ntri)
#print('diff = ',ddd.ntri - true_ntri)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
# Also compare to using x,y,z rather than ra,dec,r
cat1 = treecorr.Catalog(x=x1, y=y1, z=z1)
cat2 = treecorr.Catalog(x=x2, y=y2, z=z2)
cat3 = treecorr.Catalog(x=x3, y=y3, z=z3)
ddd.process(cat1, cat2, cat3)
np.testing.assert_array_equal(ddd.ntri, true_ntri)
def test_nnn():
# Use a simple probability distribution for the galaxies:
#
# n(r) = (2pi s^2)^-1 exp(-r^2/2s^2)
#
# The Fourier transform is: n~(k) = exp(-s^2 k^2/2)
# B(k1,k2) = <n~(k1) n~(k2) n~(-k1-k2)>
# = exp(-s^2 (|k1|^2 + |k2|^2 - k1.k2))
# = exp(-s^2 (|k1|^2 + |k2|^2 + |k3|^2)/2)
#
# zeta(r1,r2) = (1/2pi)^4 int(d^2k1 int(d^2k2 exp(ik1.x1) exp(ik2.x2) B(k1,k2) ))
# = exp(-(x1^2 + y1^2 + x2^2 + y2^2 - x1x2 - y1y2)/3s^2) / 12 pi^2 s^4
# = exp(-(d1^2 + d2^2 + d3^2)/6s^2) / 12 pi^2 s^4
#
# This is also derivable as:
# zeta(r1,r2) = int(dx int(dy n(x,y) n(x+x1,y+y1) n(x+x2,y+y2)))
# which is also analytically integrable and gives the same answer.
#
# However, we need to correct for the uniform density background, so the real result
# is this minus 1/L^4 divided by 1/L^4. So:
#
# zeta(r1,r2) = 1/(12 pi^2) (L/s)^4 exp(-(d1^2+d2^2+d3^2)/6s^2) - 1
# Doing the full correlation function takes a long time. Here, we just test a small range
# of separations and a moderate range for u, v, which gives us a variety of triangle lengths.
s = 10.
if __name__ == "__main__":
ngal = 20000
nrand = 2 * ngal
L = 50. * s # Not infinity, so this introduces some error. Our integrals were to infinity.
tol_factor = 1
else:
ngal = 2000
nrand = ngal
L = 20. * s
tol_factor = 5
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
min_sep = 11.
max_sep = 13.
nbins = 2
min_u = 0.6
max_u = 0.9
nubins = 3
min_v = 0.5
max_v = 0.9
nvbins = 5
cat = treecorr.Catalog(x=x, y=y, x_units='arcmin', y_units='arcmin')
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins,
sep_units='arcmin', verbose=1)
ddd.process(cat)
#print('ddd.ntri = ',ddd.ntri)
# log(<d>) != <logd>, but it should be close:
print('meanlogd1 - log(meand1) = ',ddd.meanlogd1 - np.log(ddd.meand1))
print('meanlogd2 - log(meand2) = ',ddd.meanlogd2 - np.log(ddd.meand2))
print('meanlogd3 - log(meand3) = ',ddd.meanlogd3 - np.log(ddd.meand3))
print('meand3 / meand2 = ',ddd.meand3 / ddd.meand2)
print('meanu = ',ddd.meanu)
print('max diff = ',np.max(np.abs(ddd.meand3/ddd.meand2 -ddd.meanu)))
print('max rel diff = ',np.max(np.abs((ddd.meand3/ddd.meand2 -ddd.meanu)/ddd.meanu)))
print('(meand1 - meand2)/meand3 = ',(ddd.meand1-ddd.meand2) / ddd.meand3)
print('meanv = ',ddd.meanv)
print('max diff = ',np.max(np.abs((ddd.meand1-ddd.meand2)/ddd.meand3 -np.abs(ddd.meanv))))
print('max rel diff = ',np.max(np.abs(((ddd.meand1-ddd.meand2)/ddd.meand3-np.abs(ddd.meanv))/ddd.meanv)))
np.testing.assert_allclose(ddd.meanlogd1, np.log(ddd.meand1), rtol=1.e-3)
np.testing.assert_allclose(ddd.meanlogd2, np.log(ddd.meand2), rtol=1.e-3)
np.testing.assert_allclose(ddd.meanlogd3, np.log(ddd.meand3), rtol=1.e-3)
np.testing.assert_allclose(ddd.meand3/ddd.meand2, ddd.meanu, rtol=1.e-5 * tol_factor)
np.testing.assert_allclose((ddd.meand1-ddd.meand2)/ddd.meand3, np.abs(ddd.meanv),
rtol=1.e-5 * tol_factor, atol=1.e-5 * tol_factor)
np.testing.assert_allclose(ddd.meanlogd3-ddd.meanlogd2, np.log(ddd.meanu),
atol=1.e-3 * tol_factor)
np.testing.assert_allclose(np.log(ddd.meand1-ddd.meand2)-ddd.meanlogd3,
np.log(np.abs(ddd.meanv)), atol=2.e-3 * tol_factor)
rx = (rng.random_sample(nrand)-0.5) * L
ry = (rng.random_sample(nrand)-0.5) * L
rand = treecorr.Catalog(x=rx,y=ry, x_units='arcmin', y_units='arcmin')
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins,
sep_units='arcmin', verbose=1)
rrr.process(rand)
#print('rrr.ntri = ',rrr.ntri)
d1 = ddd.meand1
d2 = ddd.meand2
d3 = ddd.meand3
#print('rnom = ',np.exp(ddd.logr))
#print('unom = ',ddd.u)
#print('vnom = ',ddd.v)
#print('d1 = ',d1)
#print('d2 = ',d2)
#print('d3 = ',d3)
true_zeta = (1./(12.*np.pi**2)) * (L/s)**4 * np.exp(-(d1**2+d2**2+d3**2)/(6.*s**2)) - 1.
zeta, varzeta = ddd.calculateZeta(rrr)
print('zeta = ',zeta)
print('true_zeta = ',true_zeta)
print('ratio = ',zeta / true_zeta)
print('diff = ',zeta - true_zeta)
print('max rel diff = ',np.max(np.abs((zeta - true_zeta)/true_zeta)))
np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)),
atol=0.1*tol_factor)
# Check that we get the same result using the corr3 function
cat.write(os.path.join('data','nnn_data.dat'))
rand.write(os.path.join('data','nnn_rand.dat'))
config = treecorr.config.read_config('configs/nnn.yaml')
config['verbose'] = 0
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
# Check the fits write option
out_file_name1 = os.path.join('output','nnn_out1.fits')
ddd.write(out_file_name1)
data = fitsio.read(out_file_name1)
np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(data['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(data['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(data['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(data['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(data['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(data['ntri'], ddd.ntri.flatten())
header = fitsio.read_header(out_file_name1, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
out_file_name2 = os.path.join('output','nnn_out2.fits')
ddd.write(out_file_name2, rrr)
data = fitsio.read(out_file_name2)
np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(data['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(data['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(data['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(data['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(data['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(data['zeta'], zeta.flatten())
np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(varzeta).flatten())
np.testing.assert_almost_equal(data['DDD'], ddd.ntri.flatten())
np.testing.assert_almost_equal(data['RRR'], rrr.ntri.flatten() * (ddd.tot / rrr.tot))
header = fitsio.read_header(out_file_name2, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
# Check the read function
# Note: These don't need the flatten. The read function should reshape them to the right shape.
ddd2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins,
sep_units='arcmin', verbose=1)
ddd2.read(out_file_name1)
np.testing.assert_almost_equal(ddd2.logr, ddd.logr)
np.testing.assert_almost_equal(ddd2.u, ddd.u)
np.testing.assert_almost_equal(ddd2.v, ddd.v)
np.testing.assert_almost_equal(ddd2.meand1, ddd.meand1)
np.testing.assert_almost_equal(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_almost_equal(ddd2.meand2, ddd.meand2)
np.testing.assert_almost_equal(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_almost_equal(ddd2.meand3, ddd.meand3)
np.testing.assert_almost_equal(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_almost_equal(ddd2.meanu, ddd.meanu)
np.testing.assert_almost_equal(ddd2.meanv, ddd.meanv)
np.testing.assert_almost_equal(ddd2.ntri, ddd.ntri)
np.testing.assert_almost_equal(ddd2.tot/ddd.tot, 1.)
assert ddd2.coords == ddd.coords
assert ddd2.metric == ddd.metric
assert ddd2.sep_units == ddd.sep_units
assert ddd2.bin_type == ddd.bin_type
ddd2.read(out_file_name2)
np.testing.assert_almost_equal(ddd2.logr, ddd.logr)
np.testing.assert_almost_equal(ddd2.u, ddd.u)
np.testing.assert_almost_equal(ddd2.v, ddd.v)
np.testing.assert_almost_equal(ddd2.meand1, ddd.meand1)
np.testing.assert_almost_equal(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_almost_equal(ddd2.meand2, ddd.meand2)
np.testing.assert_almost_equal(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_almost_equal(ddd2.meand3, ddd.meand3)
np.testing.assert_almost_equal(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_almost_equal(ddd2.meanu, ddd.meanu)
np.testing.assert_almost_equal(ddd2.meanv, ddd.meanv)
np.testing.assert_almost_equal(ddd2.ntri, ddd.ntri)
np.testing.assert_almost_equal(ddd2.tot/ddd.tot, 1.)
assert ddd2.coords == ddd.coords
assert ddd2.metric == ddd.metric
assert ddd2.sep_units == ddd.sep_units
assert ddd2.bin_type == ddd.bin_type
# Test compensated zeta
# First just check the mechanics.
# If we don't actually do all the cross terms, then compensated is the same as simple.
zeta2, varzeta2 = ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
print('fake compensated zeta = ',zeta2)
np.testing.assert_allclose(zeta2, zeta)
np.testing.assert_allclose(varzeta2, varzeta)
with assert_raises(TypeError):
ddd.calculateZeta(drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.calculateZeta(rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.calculateZeta(rrr,drr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,rrd=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr)
rrr2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, sep_units='arcmin')
with assert_raises(ValueError):
ddd.calculateZeta(rrr2,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(ValueError):
ddd.calculateZeta(rrr,drr=rrr2,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(ValueError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr2,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(ValueError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,rrd=rrr2,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(ValueError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr2,drd=rrr,rdd=rrr)
with assert_raises(ValueError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr2,rdd=rrr)
with assert_raises(ValueError):
ddd.calculateZeta(rrr,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr2)
out_file_name3 = os.path.join('output','nnn_out3.fits')
with assert_raises(TypeError):
ddd.write(out_file_name3,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.write(out_file_name3,rrr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.write(out_file_name3,rrr=rrr,drr=rrr,rrd=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.write(out_file_name3,rrr=rrr,drr=rrr,rdr=rrr,ddr=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.write(out_file_name3,rrr=rrr,drr=rrr,rdr=rrr,rrd=rrr,drd=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.write(out_file_name3,rrr=rrr,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,rdd=rrr)
with assert_raises(TypeError):
ddd.write(out_file_name3,rrr=rrr,drr=rrr,rdr=rrr,rrd=rrr,ddr=rrr,drd=rrr)
# It's too slow to test the real calculation in nosetests runs, so we stop here if not main.
if __name__ != '__main__':
return
# This version computes the three-point function after subtracting off the appropriate
# two-point functions xi(d1) + xi(d2) + xi(d3), where [cf. test_nn() in test_nn.py]
# xi(r) = 1/4pi (L/s)^2 exp(-r^2/4s^2) - 1
ddr = ddd.copy()
drd = ddd.copy()
rdd = ddd.copy()
drr = ddd.copy()
rdr = ddd.copy()
rrd = ddd.copy()
ddr.process(cat,cat,rand)
drd.process(cat,rand,cat)
rdd.process(rand,cat,cat)
drr.process(cat,rand,rand)
rdr.process(rand,cat,rand)
rrd.process(rand,rand,cat)
zeta, varzeta = ddd.calculateZeta(rrr,drr,rdr,rrd,ddr,drd,rdd)
print('compensated zeta = ',zeta)
xi1 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d1**2/(4.*s**2)) - 1.
xi2 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d2**2/(4.*s**2)) - 1.
xi3 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d3**2/(4.*s**2)) - 1.
print('xi1 = ',xi1)
print('xi2 = ',xi2)
print('xi3 = ',xi3)
print('true_zeta + xi1 + xi2 + xi3 = ',true_zeta)
true_zeta -= xi1 + xi2 + xi3
print('true_zeta => ',true_zeta)
print('ratio = ',zeta / true_zeta)
print('diff = ',zeta - true_zeta)
print('max rel diff = ',np.max(np.abs((zeta - true_zeta)/true_zeta)))
np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)), atol=0.1*tol_factor)
try:
import fitsio
except ImportError:
print('Skipping FITS tests, since fitsio is not installed')
return
out_file_name3 = os.path.join('output','nnn_out3.fits')
ddd.write(out_file_name3, rrr,drr,rdr,rrd,ddr,drd,rdd)
data = fitsio.read(out_file_name3)
np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(data['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(data['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(data['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(data['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(data['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(data['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(data['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(data['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(data['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(data['zeta'], zeta.flatten())
np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(varzeta).flatten())
np.testing.assert_almost_equal(data['DDD'], ddd.ntri.flatten())
np.testing.assert_almost_equal(data['RRR'], rrr.ntri.flatten() * (ddd.tot / rrr.tot))
np.testing.assert_almost_equal(data['DRR'], drr.ntri.flatten() * (ddd.tot / drr.tot))
np.testing.assert_almost_equal(data['RDR'], rdr.ntri.flatten() * (ddd.tot / rdr.tot))
np.testing.assert_almost_equal(data['RRD'], rrd.ntri.flatten() * (ddd.tot / rrd.tot))
np.testing.assert_almost_equal(data['DDR'], ddr.ntri.flatten() * (ddd.tot / ddr.tot))
np.testing.assert_almost_equal(data['DRD'], drd.ntri.flatten() * (ddd.tot / drd.tot))
np.testing.assert_almost_equal(data['RDD'], rdd.ntri.flatten() * (ddd.tot / rdd.tot))
header = fitsio.read_header(out_file_name3, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
ddd2.read(out_file_name3)
np.testing.assert_almost_equal(ddd2.logr, ddd.logr)
np.testing.assert_almost_equal(ddd2.u, ddd.u)
np.testing.assert_almost_equal(ddd2.v, ddd.v)
np.testing.assert_almost_equal(ddd2.meand1, ddd.meand1)
np.testing.assert_almost_equal(ddd2.meanlogd1, ddd.meanlogd1)
np.testing.assert_almost_equal(ddd2.meand2, ddd.meand2)
np.testing.assert_almost_equal(ddd2.meanlogd2, ddd.meanlogd2)
np.testing.assert_almost_equal(ddd2.meand3, ddd.meand3)
np.testing.assert_almost_equal(ddd2.meanlogd3, ddd.meanlogd3)
np.testing.assert_almost_equal(ddd2.meanu, ddd.meanu)
np.testing.assert_almost_equal(ddd2.meanv, ddd.meanv)
np.testing.assert_almost_equal(ddd2.ntri, ddd.ntri)
np.testing.assert_almost_equal(ddd2.tot/ddd.tot, 1.)
assert ddd2.coords == ddd.coords
assert ddd2.metric == ddd.metric
assert ddd2.sep_units == ddd.sep_units
assert ddd2.bin_type == ddd.bin_type
config = treecorr.config.read_config('configs/nnn_compensated.yaml')
config['verbose'] = 0
treecorr.corr3(config)
corr3_outfile = os.path.join('output','nnn_compensated.fits')
corr3_output = fitsio.read(corr3_outfile)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_almost_equal(corr3_output['r_nom'], np.exp(ddd.logr).flatten())
np.testing.assert_almost_equal(corr3_output['u_nom'], ddd.u.flatten())
np.testing.assert_almost_equal(corr3_output['v_nom'], ddd.v.flatten())
np.testing.assert_almost_equal(corr3_output['meand1'], ddd.meand1.flatten())
np.testing.assert_almost_equal(corr3_output['meanlogd1'], ddd.meanlogd1.flatten())
np.testing.assert_almost_equal(corr3_output['meand2'], ddd.meand2.flatten())
np.testing.assert_almost_equal(corr3_output['meanlogd2'], ddd.meanlogd2.flatten())
np.testing.assert_almost_equal(corr3_output['meand3'], ddd.meand3.flatten())
np.testing.assert_almost_equal(corr3_output['meanlogd3'], ddd.meanlogd3.flatten())
np.testing.assert_almost_equal(corr3_output['meanu'], ddd.meanu.flatten())
np.testing.assert_almost_equal(corr3_output['meanv'], ddd.meanv.flatten())
np.testing.assert_almost_equal(corr3_output['zeta'], zeta.flatten())
np.testing.assert_almost_equal(corr3_output['sigma_zeta'], np.sqrt(varzeta).flatten())
np.testing.assert_almost_equal(corr3_output['DDD'], ddd.ntri.flatten())
np.testing.assert_almost_equal(corr3_output['RRR'], rrr.ntri.flatten() * (ddd.tot / rrr.tot))
np.testing.assert_almost_equal(corr3_output['DRR'], drr.ntri.flatten() * (ddd.tot / drr.tot))
np.testing.assert_almost_equal(corr3_output['RDR'], rdr.ntri.flatten() * (ddd.tot / rdr.tot))
np.testing.assert_almost_equal(corr3_output['RRD'], rrd.ntri.flatten() * (ddd.tot / rrd.tot))
np.testing.assert_almost_equal(corr3_output['DDR'], ddr.ntri.flatten() * (ddd.tot / ddr.tot))
np.testing.assert_almost_equal(corr3_output['DRD'], drd.ntri.flatten() * (ddd.tot / drd.tot))
np.testing.assert_almost_equal(corr3_output['RDD'], rdd.ntri.flatten() * (ddd.tot / rdd.tot))
header = fitsio.read_header(corr3_outfile, 1)
np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.)
def test_3d():
# For this one, build a Gaussian cloud around some random point in 3D space and do the
# correlation function in 3D.
#
# The 3D Fourier transform is: n~(k) = exp(-s^2 k^2/2)
# B(k1,k2) = <n~(k1) n~(k2) n~(-k1-k2)>
# = exp(-s^2 (|k1|^2 + |k2|^2 - k1.k2))
# = exp(-s^2 (|k1|^2 + |k2|^2 + |k3|^2)/2)
# as before, except now k1,k2 are 3d vectors, not 2d.
#
# zeta(r1,r2) = (1/2pi)^4 int(d^2k1 int(d^2k2 exp(ik1.x1) exp(ik2.x2) B(k1,k2) ))
# = exp(-(x1^2 + y1^2 + x2^2 + y2^2 - x1x2 - y1y2)/3s^2) / 12 pi^2 s^4
# = exp(-(d1^2 + d2^2 + d3^2)/6s^2) / 24 sqrt(3) pi^3 s^6
#
# And again, this is also derivable as:
# zeta(r1,r2) = int(dx int(dy int(dz n(x,y,z) n(x+x1,y+y1,z+z1) n(x+x2,y+y2,z+z2)))
# which is also analytically integrable and gives the same answer.
#
# However, we need to correct for the uniform density background, so the real result
# is this minus 1/L^6 divided by 1/L^6. So:
#
# zeta(r1,r2) = 1/(24 sqrt(3) pi^3) (L/s)^4 exp(-(d1^2+d2^2+d3^2)/6s^2) - 1
# Doing the full correlation function takes a long time. Here, we just test a small range
# of separations and a moderate range for u, v, which gives us a variety of triangle lengths.
xcen = 823 # Mpc maybe?
ycen = 342
zcen = -672
s = 10.
if __name__ == "__main__":
ngal = 5000
nrand = 20 * ngal
L = 50. * s
tol_factor = 1
else:
ngal = 1000
nrand = 5 * ngal
L = 20. * s
tol_factor = 5
rng = np.random.RandomState(8675309)
x = rng.normal(xcen, s, (ngal,) )
y = rng.normal(ycen, s, (ngal,) )
z = rng.normal(zcen, s, (ngal,) )
r = np.sqrt(x*x+y*y+z*z)
dec = np.arcsin(z/r) * (coord.radians / coord.degrees)
ra = np.arctan2(y,x) * (coord.radians / coord.degrees)
min_sep = 10.
max_sep = 20.
nbins = 8
min_u = 0.9
max_u = 1.0
nubins = 1
min_v = 0.
max_v = 0.05
nvbins = 1
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='deg', dec_units='deg')
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, verbose=1)
ddd.process(cat)
print('ddd.ntri = ',ddd.ntri.flatten())
rx = (rng.random_sample(nrand)-0.5) * L + xcen
ry = (rng.random_sample(nrand)-0.5) * L + ycen
rz = (rng.random_sample(nrand)-0.5) * L + zcen
rr = np.sqrt(rx*rx+ry*ry+rz*rz)
rdec = np.arcsin(rz/rr) * (coord.radians / coord.degrees)
rra = np.arctan2(ry,rx) * (coord.radians / coord.degrees)
rand = treecorr.Catalog(ra=rra, dec=rdec, r=rr, ra_units='deg', dec_units='deg')
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, verbose=1)
rrr.process(rand)
print('rrr.ntri = ',rrr.ntri.flatten())
d1 = ddd.meand1
d2 = ddd.meand2
d3 = ddd.meand3
print('rnom = ',np.exp(ddd.logr).flatten())
print('unom = ',ddd.u.flatten())
print('vnom = ',ddd.v.flatten())
print('d1 = ',d1.flatten())
print('d2 = ',d2.flatten())
print('d3 = ',d3.flatten())
true_zeta = ((1./(24.*np.sqrt(3)*np.pi**3)) * (L/s)**6 *
np.exp(-(d1**2+d2**2+d3**2)/(6.*s**2)) - 1.)
zeta, varzeta = ddd.calculateZeta(rrr)
print('zeta = ',zeta.flatten())
print('true_zeta = ',true_zeta.flatten())
print('ratio = ',(zeta / true_zeta).flatten())
print('diff = ',(zeta - true_zeta).flatten())
print('max rel diff = ',np.max(np.abs((zeta - true_zeta)/true_zeta)))
np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)),
atol=0.1*tol_factor)
# Check that we get the same result using the corr3 functin:
cat.write(os.path.join('data','nnn_3d_data.dat'))
rand.write(os.path.join('data','nnn_3d_rand.dat'))
config = treecorr.config.read_config('configs/nnn_3d.yaml')
config['verbose'] = 0
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_3d.out'), names=True, skip_header=1)
print('zeta = ',zeta.flatten())
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
# Check that we get the same thing when using x,y,z rather than ra,dec,r
cat = treecorr.Catalog(x=x, y=y, z=z)
rand = treecorr.Catalog(x=rx, y=ry, z=rz)
ddd.process(cat)
rrr.process(rand)
zeta, varzeta = ddd.calculateZeta(rrr)
np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor)
np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)),
atol=0.1*tol_factor)
def test_list():
# Test that we can use a list of files for either data or rand or both.
data_cats = []
rand_cats = []
ncats = 3
ngal = 100
nrand = 2 * ngal
s = 10.
L = 50. * s
rng = np.random.RandomState(8675309)
min_sep = 30.
max_sep = 50.
nbins = 3
min_u = 0
max_u = 0.2
nubins = 2
min_v = 0.5
max_v = 0.9
nvbins = 2
x = rng.normal(0,s, (ngal,ncats) )
y = rng.normal(0,s, (ngal,ncats) )
data_cats = [ treecorr.Catalog(x=x[:,k], y=y[:,k]) for k in range(ncats) ]
rx = (rng.random_sample((nrand,ncats))-0.5) * L
ry = (rng.random_sample((nrand,ncats))-0.5) * L
rand_cats = [ treecorr.Catalog(x=rx[:,k], y=ry[:,k]) for k in range(ncats) ]
ddd = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1)
ddd.process(data_cats)
print('From multiple catalogs: ddd.ntri = ',ddd.ntri)
# Now do the same thing with one big catalog
dddx = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1)
data_catx = treecorr.Catalog(x=x.reshape( (ngal*ncats,) ), y=y.reshape( (ngal*ncats,) ))
dddx.process(data_catx)
print('From single catalog: dddx.ntri = ',dddx.ntri)
# Only test to rtol=0.1, since there are now differences between the auto and cross related
# to how they characterize triangles especially when d1 ~= d2 or d2 ~= d3.
np.testing.assert_allclose(ddd.ntri, dddx.ntri, rtol=0.1)
rrr = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1)
rrr.process(rand_cats)
print('rrr.ntri = ',rrr.ntri)
rrrx = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
nubins=nubins, nvbins=nvbins, bin_slop=0.1, verbose=1)
rand_catx = treecorr.Catalog(x=rx.reshape( (nrand*ncats,) ), y=ry.reshape( (nrand*ncats,) ))
rrrx.process(rand_catx)
print('rrrx.ntri = ',rrrx.ntri)
np.testing.assert_allclose(ddd.ntri, dddx.ntri, rtol=0.1)
zeta, varzeta = ddd.calculateZeta(rrr)
zetax, varzetax = dddx.calculateZeta(rrrx)
print('zeta = ',zeta)
print('zetax = ',zetax)
#print('ratio = ',zeta/zetax)
#print('diff = ',zeta-zetax)
np.testing.assert_allclose(zeta, zetax, rtol=0.1)
# Check that we get the same result using the corr3 function:
file_list = []
rand_file_list = []
for k in range(ncats):
file_name = os.path.join('data','nnn_list_data%d.dat'%k)
data_cats[k].write(file_name)
file_list.append(file_name)
rand_file_name = os.path.join('data','nnn_list_rand%d.dat'%k)
rand_cats[k].write(rand_file_name)
rand_file_list.append(rand_file_name)
list_name = os.path.join('data','nnn_list_data_files.txt')
with open(list_name, 'w') as fid:
for file_name in file_list:
fid.write('%s\n'%file_name)
rand_list_name = os.path.join('data','nnn_list_rand_files.txt')
with open(rand_list_name, 'w') as fid:
for file_name in rand_file_list:
fid.write('%s\n'%file_name)
file_namex = os.path.join('data','nnn_list_datax.dat')
data_catx.write(file_namex)
rand_file_namex = os.path.join('data','nnn_list_randx.dat')
rand_catx.write(rand_file_namex)
config = treecorr.config.read_config('configs/nnn_list1.yaml')
config['verbose'] = 0
config['bin_slop'] = 0.1
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_list1.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
config = treecorr.config.read_config('configs/nnn_list2.json')
config['verbose'] = 0
config['bin_slop'] = 0.1
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_list2.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=0.05)
config = treecorr.config.read_config('configs/nnn_list3.params')
config['verbose'] = 0
config['bin_slop'] = 0.1
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_list3.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=0.05)
config = treecorr.config.read_config('configs/nnn_list4.config', file_type='params')
config['verbose'] = 0
config['bin_slop'] = 0.1
treecorr.corr3(config)
corr3_output = np.genfromtxt(os.path.join('output','nnn_list4.out'), names=True, skip_header=1)
print('zeta = ',zeta)
print('from corr3 output = ',corr3_output['zeta'])
print('ratio = ',corr3_output['zeta']/zeta.flatten())
print('diff = ',corr3_output['zeta']-zeta.flatten())
np.testing.assert_allclose(corr3_output['zeta'], zeta.flatten(), rtol=1.e-3)
if __name__ == '__main__':
test_log_binning()
test_direct_count_auto()
test_direct_count_cross()
test_direct_spherical()
test_direct_arc()
test_direct_partial()
test_direct_3d_auto()
test_direct_3d_cross()
test_nnn()
test_3d()
test_list()
|
[
"test_helper.assert_raises",
"numpy.arctan2",
"numpy.abs",
"numpy.floor",
"treecorr.Catalog",
"fitsio.read",
"numpy.isclose",
"numpy.exp",
"os.path.join",
"numpy.testing.assert_almost_equal",
"test_helper.do_pickle",
"numpy.genfromtxt",
"numpy.random.RandomState",
"numpy.arcsin",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"math.log",
"test_helper.get_script_name",
"fitsio.read_header",
"test_helper.CaptureLog",
"subprocess.Popen",
"treecorr.NNNCorrelation",
"numpy.ceil",
"numpy.testing.assert_array_equal",
"coord.CelestialCoord.xyz_to_radec",
"treecorr.config.read_config",
"numpy.log",
"coord.CelestialCoord",
"numpy.zeros",
"numpy.where",
"treecorr.config.setup_logger",
"treecorr.corr3",
"numpy.sqrt"
] |
[((3214, 3289), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'nbins': '(20)', 'bin_type': '"""LogRUV"""'}), "(min_sep=5, max_sep=20, nbins=20, bin_type='LogRUV')\n", (3237, 3289), False, 'import treecorr\n'), ((3646, 3772), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'nbins': '(20)', 'min_u': '(0.2)', 'max_u': '(0.9)', 'nubins': '(12)', 'min_v': '(0.0)', 'max_v': '(0.2)', 'nvbins': '(2)'}), '(min_sep=5, max_sep=20, nbins=20, min_u=0.2, max_u=\n 0.9, nubins=12, min_v=0.0, max_v=0.2, nvbins=2)\n', (3669, 3772), False, 'import treecorr\n'), ((4312, 4371), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'max_sep': '(20)', 'nbins': '(20)', 'bin_size': '(0.1)'}), '(max_sep=20, nbins=20, bin_size=0.1)\n', (4335, 4371), False, 'import treecorr\n'), ((4729, 4866), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'max_sep': '(20)', 'nbins': '(20)', 'bin_size': '(0.1)', 'max_u': '(0.9)', 'nubins': '(3)', 'ubin_size': '(0.05)', 'max_v': '(0.4)', 'nvbins': '(4)', 'vbin_size': '(0.05)'}), '(max_sep=20, nbins=20, bin_size=0.1, max_u=0.9,\n nubins=3, ubin_size=0.05, max_v=0.4, nvbins=4, vbin_size=0.05)\n', (4752, 4866), False, 'import treecorr\n'), ((5203, 5234), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.05)'], {}), '(nnn.ubin_size, 0.05)\n', (5213, 5234), True, 'import numpy as np\n'), ((5246, 5273), 'numpy.isclose', 'np.isclose', (['nnn.min_u', '(0.75)'], {}), '(nnn.min_u, 0.75)\n', (5256, 5273), True, 'import numpy as np\n'), ((5340, 5371), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.05)'], {}), '(nnn.vbin_size, 0.05)\n', (5350, 5371), True, 'import numpy as np\n'), ((5383, 5409), 'numpy.isclose', 'np.isclose', (['nnn.min_v', '(0.2)'], {}), '(nnn.min_v, 0.2)\n', (5393, 5409), True, 'import numpy as np\n'), ((5517, 5575), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(20)', 'bin_size': '(0.1)'}), '(min_sep=5, nbins=20, bin_size=0.1)\n', (5540, 5575), False, 'import treecorr\n'), ((5931, 6067), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(20)', 'bin_size': '(0.1)', 'min_u': '(0.7)', 'nubins': '(4)', 'ubin_size': '(0.05)', 'min_v': '(0.2)', 'nvbins': '(4)', 'vbin_size': '(0.05)'}), '(min_sep=5, nbins=20, bin_size=0.1, min_u=0.7,\n nubins=4, ubin_size=0.05, min_v=0.2, nvbins=4, vbin_size=0.05)\n', (5954, 6067), False, 'import treecorr\n'), ((6431, 6462), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.05)'], {}), '(nnn.ubin_size, 0.05)\n', (6441, 6462), True, 'import numpy as np\n'), ((6557, 6588), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.05)'], {}), '(nnn.vbin_size, 0.05)\n', (6567, 6588), True, 'import numpy as np\n'), ((6666, 6726), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)'}), '(min_sep=5, max_sep=20, bin_size=0.1)\n', (6689, 6726), False, 'import treecorr\n'), ((7087, 7227), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_u': '(0.2)', 'max_u': '(0.9)', 'ubin_size': '(0.03)', 'min_v': '(0.1)', 'max_v': '(0.3)', 'vbin_size': '(0.07)'}), '(min_sep=5, max_sep=20, bin_size=0.1, min_u=0.2,\n max_u=0.9, ubin_size=0.03, min_v=0.1, max_v=0.3, vbin_size=0.07)\n', (7110, 7227), False, 'import treecorr\n'), ((7650, 7685), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.7 / 24)'], {}), '(nnn.ubin_size, 0.7 / 24)\n', (7660, 7685), True, 'import numpy as np\n'), ((7778, 7812), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.2 / 3)'], {}), '(nnn.vbin_size, 0.2 / 3)\n', (7788, 7812), True, 'import numpy as np\n'), ((7897, 8015), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_u': '(0.2)', 'ubin_size': '(0.03)', 'min_v': '(0.2)', 'vbin_size': '(0.07)'}), '(min_sep=5, max_sep=20, bin_size=0.1, min_u=0.2,\n ubin_size=0.03, min_v=0.2, vbin_size=0.07)\n', (7920, 8015), False, 'import treecorr\n'), ((8347, 8382), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.8 / 27)'], {}), '(nnn.ubin_size, 0.8 / 27)\n', (8357, 8382), True, 'import numpy as np\n'), ((8475, 8510), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.8 / 12)'], {}), '(nnn.vbin_size, 0.8 / 12)\n', (8485, 8510), True, 'import numpy as np\n'), ((8541, 8659), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'max_u': '(0.2)', 'ubin_size': '(0.03)', 'max_v': '(0.2)', 'vbin_size': '(0.07)'}), '(min_sep=5, max_sep=20, bin_size=0.1, max_u=0.2,\n ubin_size=0.03, max_v=0.2, vbin_size=0.07)\n', (8564, 8659), False, 'import treecorr\n'), ((8990, 9024), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.2 / 7)'], {}), '(nnn.ubin_size, 0.2 / 7)\n', (9000, 9024), True, 'import numpy as np\n'), ((9116, 9150), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.2 / 3)'], {}), '(nnn.vbin_size, 0.2 / 3)\n', (9126, 9150), True, 'import numpy as np\n'), ((9310, 9404), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'ubin_size': '(0.3)', 'vbin_size': '(0.3)'}), '(min_sep=5, max_sep=20, bin_size=0.1, ubin_size=0.3,\n vbin_size=0.3)\n', (9333, 9404), False, 'import treecorr\n'), ((9790, 9821), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.25)'], {}), '(nnn.ubin_size, 0.25)\n', (9800, 9821), True, 'import numpy as np\n'), ((9914, 9945), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.25)'], {}), '(nnn.vbin_size, 0.25)\n', (9924, 9945), True, 'import numpy as np\n'), ((10047, 10132), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'nubins': '(5)', 'nvbins': '(5)'}), '(min_sep=5, max_sep=20, bin_size=0.1, nubins=5, nvbins=5\n )\n', (10070, 10132), False, 'import treecorr\n'), ((10517, 10547), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.2)'], {}), '(nnn.ubin_size, 0.2)\n', (10527, 10547), True, 'import numpy as np\n'), ((10639, 10669), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.2)'], {}), '(nnn.vbin_size, 0.2)\n', (10649, 10669), True, 'import numpy as np\n'), ((10772, 10886), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'ubin_size': '(0.1)', 'nubins': '(5)', 'vbin_size': '(0.1)', 'nvbins': '(5)'}), '(min_sep=5, max_sep=20, bin_size=0.1, ubin_size=0.1,\n nubins=5, vbin_size=0.1, nvbins=5)\n', (10795, 10886), False, 'import treecorr\n'), ((11311, 11337), 'numpy.isclose', 'np.isclose', (['nnn.min_u', '(0.5)'], {}), '(nnn.min_u, 0.5)\n', (11321, 11337), True, 'import numpy as np\n'), ((11434, 11460), 'numpy.isclose', 'np.isclose', (['nnn.max_v', '(0.5)'], {}), '(nnn.max_v, 0.5)\n', (11444, 11460), True, 'import numpy as np\n'), ((11487, 11536), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {}), '(TypeError, treecorr.NNNCorrelation)\n', (11500, 11536), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((11541, 11601), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)'}), '(TypeError, treecorr.NNNCorrelation, min_sep=5)\n', (11554, 11601), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((11606, 11667), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'max_sep': '(20)'}), '(TypeError, treecorr.NNNCorrelation, max_sep=20)\n', (11619, 11667), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((11672, 11735), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'bin_size': '(0.1)'}), '(TypeError, treecorr.NNNCorrelation, bin_size=0.1)\n', (11685, 11735), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((11740, 11799), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'nbins': '(20)'}), '(TypeError, treecorr.NNNCorrelation, nbins=20)\n', (11753, 11799), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((11804, 11876), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)'}), '(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20)\n', (11817, 11876), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((11881, 11955), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'bin_size': '(0.1)'}), '(TypeError, treecorr.NNNCorrelation, min_sep=5, bin_size=0.1)\n', (11894, 11955), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((11960, 12030), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'nbins': '(20)'}), '(TypeError, treecorr.NNNCorrelation, min_sep=5, nbins=20)\n', (11973, 12030), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12035, 12110), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'max_sep': '(20)', 'bin_size': '(0.1)'}), '(TypeError, treecorr.NNNCorrelation, max_sep=20, bin_size=0.1)\n', (12048, 12110), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12115, 12186), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'max_sep': '(20)', 'nbins': '(20)'}), '(TypeError, treecorr.NNNCorrelation, max_sep=20, nbins=20)\n', (12128, 12186), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12191, 12264), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'bin_size': '(0.1)', 'nbins': '(20)'}), '(TypeError, treecorr.NNNCorrelation, bin_size=0.1, nbins=20)\n', (12204, 12264), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12269, 12369), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'nbins': '(20)'}), '(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, nbins=20)\n', (12282, 12369), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12370, 12461), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(20)', 'max_sep': '(5)', 'bin_size': '(0.1)'}), '(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5,\n bin_size=0.1)\n', (12383, 12461), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12462, 12549), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(20)', 'max_sep': '(5)', 'nbins': '(20)'}), '(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5,\n nbins=20)\n', (12475, 12549), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12550, 12653), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(20)', 'max_sep': '(5)', 'nbins': '(20)', 'bin_type': '"""Log"""'}), "(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5,\n nbins=20, bin_type='Log')\n", (12563, 12653), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12672, 12778), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(20)', 'max_sep': '(5)', 'nbins': '(20)', 'bin_type': '"""Linear"""'}), "(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5,\n nbins=20, bin_type='Linear')\n", (12685, 12778), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12797, 12901), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(20)', 'max_sep': '(5)', 'nbins': '(20)', 'bin_type': '"""TwoD"""'}), "(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5,\n nbins=20, bin_type='TwoD')\n", (12810, 12901), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((12920, 13027), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(20)', 'max_sep': '(5)', 'nbins': '(20)', 'bin_type': '"""Invalid"""'}), "(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5,\n nbins=20, bin_type='Invalid')\n", (12933, 13027), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((13046, 13183), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_u': '(0.3)', 'max_u': '(0.9)', 'ubin_size': '(0.1)', 'nubins': '(6)'}), '(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, min_u=0.3, max_u=0.9, ubin_size=0.1, nubins=6)\n', (13059, 13183), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((13204, 13317), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_u': '(0.9)', 'max_u': '(0.3)'}), '(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, min_u=0.9, max_u=0.3)\n', (13217, 13317), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((13338, 13452), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_u': '(-0.1)', 'max_u': '(0.3)'}), '(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, min_u=-0.1, max_u=0.3)\n', (13351, 13452), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((13473, 13586), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_u': '(0.1)', 'max_u': '(1.3)'}), '(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, min_u=0.1, max_u=1.3)\n', (13486, 13586), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((13607, 13744), 'test_helper.assert_raises', 'assert_raises', (['TypeError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_v': '(0.1)', 'max_v': '(0.9)', 'vbin_size': '(0.1)', 'nvbins': '(9)'}), '(TypeError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, min_v=0.1, max_v=0.9, vbin_size=0.1, nvbins=9)\n', (13620, 13744), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((13765, 13878), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_v': '(0.9)', 'max_v': '(0.3)'}), '(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, min_v=0.9, max_v=0.3)\n', (13778, 13878), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((13899, 14013), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_v': '(-0.1)', 'max_v': '(0.3)'}), '(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, min_v=-0.1, max_v=0.3)\n', (13912, 14013), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((14034, 14147), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(5)', 'max_sep': '(20)', 'bin_size': '(0.1)', 'min_v': '(0.1)', 'max_v': '(1.3)'}), '(ValueError, treecorr.NNNCorrelation, min_sep=5, max_sep=20,\n bin_size=0.1, min_v=0.1, max_v=1.3)\n', (14047, 14147), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((14168, 14279), 'test_helper.assert_raises', 'assert_raises', (['ValueError', 'treecorr.NNNCorrelation'], {'min_sep': '(20)', 'max_sep': '(5)', 'nbins': '(20)', 'split_method': '"""invalid"""'}), "(ValueError, treecorr.NNNCorrelation, min_sep=20, max_sep=5,\n nbins=20, split_method='invalid')\n", (14181, 14279), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((14352, 14429), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'nbins': '(20)', 'sep_units': '"""radians"""'}), "(min_sep=5, max_sep=20, nbins=20, sep_units='radians')\n", (14375, 14429), False, 'import treecorr\n'), ((14607, 14655), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.min_sep', '(5.0)'], {}), '(nnn.min_sep, 5.0)\n', (14637, 14655), True, 'import numpy as np\n'), ((14659, 14708), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.max_sep', '(20.0)'], {}), '(nnn.max_sep, 20.0)\n', (14689, 14708), True, 'import numpy as np\n'), ((14712, 14761), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._min_sep', '(5.0)'], {}), '(nnn._min_sep, 5.0)\n', (14742, 14761), True, 'import numpy as np\n'), ((14765, 14815), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._max_sep', '(20.0)'], {}), '(nnn._max_sep, 20.0)\n', (14795, 14815), True, 'import numpy as np\n'), ((14972, 15048), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'nbins': '(20)', 'sep_units': '"""arcsec"""'}), "(min_sep=5, max_sep=20, nbins=20, sep_units='arcsec')\n", (14995, 15048), False, 'import treecorr\n'), ((15226, 15274), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.min_sep', '(5.0)'], {}), '(nnn.min_sep, 5.0)\n', (15256, 15274), True, 'import numpy as np\n'), ((15278, 15327), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.max_sep', '(20.0)'], {}), '(nnn.max_sep, 20.0)\n', (15308, 15327), True, 'import numpy as np\n'), ((15331, 15403), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._min_sep', '(5.0 * math.pi / 180 / 3600)'], {}), '(nnn._min_sep, 5.0 * math.pi / 180 / 3600)\n', (15361, 15403), True, 'import numpy as np\n'), ((15403, 15476), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._max_sep', '(20.0 * math.pi / 180 / 3600)'], {}), '(nnn._max_sep, 20.0 * math.pi / 180 / 3600)\n', (15433, 15476), True, 'import numpy as np\n'), ((15906, 15982), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'nbins': '(20)', 'sep_units': '"""arcmin"""'}), "(min_sep=5, max_sep=20, nbins=20, sep_units='arcmin')\n", (15929, 15982), False, 'import treecorr\n'), ((16160, 16208), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.min_sep', '(5.0)'], {}), '(nnn.min_sep, 5.0)\n', (16190, 16208), True, 'import numpy as np\n'), ((16212, 16261), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.max_sep', '(20.0)'], {}), '(nnn.max_sep, 20.0)\n', (16242, 16261), True, 'import numpy as np\n'), ((16265, 16335), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._min_sep', '(5.0 * math.pi / 180 / 60)'], {}), '(nnn._min_sep, 5.0 * math.pi / 180 / 60)\n', (16295, 16335), True, 'import numpy as np\n'), ((16335, 16406), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._max_sep', '(20.0 * math.pi / 180 / 60)'], {}), '(nnn._max_sep, 20.0 * math.pi / 180 / 60)\n', (16365, 16406), True, 'import numpy as np\n'), ((16775, 16852), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'nbins': '(20)', 'sep_units': '"""degrees"""'}), "(min_sep=5, max_sep=20, nbins=20, sep_units='degrees')\n", (16798, 16852), False, 'import treecorr\n'), ((17030, 17078), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.min_sep', '(5.0)'], {}), '(nnn.min_sep, 5.0)\n', (17060, 17078), True, 'import numpy as np\n'), ((17082, 17131), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.max_sep', '(20.0)'], {}), '(nnn.max_sep, 20.0)\n', (17112, 17131), True, 'import numpy as np\n'), ((17135, 17200), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._min_sep', '(5.0 * math.pi / 180)'], {}), '(nnn._min_sep, 5.0 * math.pi / 180)\n', (17165, 17200), True, 'import numpy as np\n'), ((17202, 17268), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._max_sep', '(20.0 * math.pi / 180)'], {}), '(nnn._max_sep, 20.0 * math.pi / 180)\n', (17232, 17268), True, 'import numpy as np\n'), ((17637, 17712), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'max_sep': '(20)', 'nbins': '(20)', 'sep_units': '"""hours"""'}), "(min_sep=5, max_sep=20, nbins=20, sep_units='hours')\n", (17660, 17712), False, 'import treecorr\n'), ((17890, 17938), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.min_sep', '(5.0)'], {}), '(nnn.min_sep, 5.0)\n', (17920, 17938), True, 'import numpy as np\n'), ((17942, 17991), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.max_sep', '(20.0)'], {}), '(nnn.max_sep, 20.0)\n', (17972, 17991), True, 'import numpy as np\n'), ((17995, 18059), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._min_sep', '(5.0 * math.pi / 12)'], {}), '(nnn._min_sep, 5.0 * math.pi / 12)\n', (18025, 18059), True, 'import numpy as np\n'), ((18061, 18126), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn._max_sep', '(20.0 * math.pi / 12)'], {}), '(nnn._max_sep, 20.0 * math.pi / 12)\n', (18091, 18126), True, 'import numpy as np\n'), ((18538, 18678), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(14)', 'bin_size': '(0.1)', 'min_u': '(0.0)', 'max_u': '(0.9)', 'ubin_size': '(0.03)', 'min_v': '(0.0)', 'max_v': '(0.21)', 'vbin_size': '(0.07)'}), '(min_sep=5, nbins=14, bin_size=0.1, min_u=0.0, max_u\n =0.9, ubin_size=0.03, min_v=0.0, max_v=0.21, vbin_size=0.07)\n', (18561, 18678), False, 'import treecorr\n'), ((18923, 18954), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.03)'], {}), '(nnn.ubin_size, 0.03)\n', (18933, 18954), True, 'import numpy as np\n'), ((18966, 18997), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.07)'], {}), '(nnn.vbin_size, 0.07)\n', (18976, 18997), True, 'import numpy as np\n'), ((19002, 19044), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.b', '(0.1)'], {}), '(nnn.b, 0.1)\n', (19032, 19044), True, 'import numpy as np\n'), ((19049, 19093), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bu', '(0.03)'], {}), '(nnn.bu, 0.03)\n', (19079, 19093), True, 'import numpy as np\n'), ((19098, 19142), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bv', '(0.07)'], {}), '(nnn.bv, 0.07)\n', (19128, 19142), True, 'import numpy as np\n'), ((19209, 19367), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(14)', 'bin_size': '(0.1)', 'bin_slop': '(1.0)', 'min_u': '(0.0)', 'max_u': '(0.9)', 'ubin_size': '(0.03)', 'min_v': '(0.0)', 'max_v': '(0.21)', 'vbin_size': '(0.07)'}), '(min_sep=5, nbins=14, bin_size=0.1, bin_slop=1.0,\n min_u=0.0, max_u=0.9, ubin_size=0.03, min_v=0.0, max_v=0.21, vbin_size=0.07\n )\n', (19232, 19367), False, 'import treecorr\n'), ((19608, 19639), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.03)'], {}), '(nnn.ubin_size, 0.03)\n', (19618, 19639), True, 'import numpy as np\n'), ((19651, 19682), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.07)'], {}), '(nnn.vbin_size, 0.07)\n', (19661, 19682), True, 'import numpy as np\n'), ((19687, 19729), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.b', '(0.1)'], {}), '(nnn.b, 0.1)\n', (19717, 19729), True, 'import numpy as np\n'), ((19734, 19778), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bu', '(0.03)'], {}), '(nnn.bu, 0.03)\n', (19764, 19778), True, 'import numpy as np\n'), ((19783, 19827), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bv', '(0.07)'], {}), '(nnn.bv, 0.07)\n', (19813, 19827), True, 'import numpy as np\n'), ((19868, 20026), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(14)', 'bin_size': '(0.1)', 'bin_slop': '(0.2)', 'min_u': '(0.0)', 'max_u': '(0.9)', 'ubin_size': '(0.03)', 'min_v': '(0.0)', 'max_v': '(0.21)', 'vbin_size': '(0.07)'}), '(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.2,\n min_u=0.0, max_u=0.9, ubin_size=0.03, min_v=0.0, max_v=0.21, vbin_size=0.07\n )\n', (19891, 20026), False, 'import treecorr\n'), ((20267, 20298), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.03)'], {}), '(nnn.ubin_size, 0.03)\n', (20277, 20298), True, 'import numpy as np\n'), ((20310, 20341), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.07)'], {}), '(nnn.vbin_size, 0.07)\n', (20320, 20341), True, 'import numpy as np\n'), ((20346, 20389), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.b', '(0.02)'], {}), '(nnn.b, 0.02)\n', (20376, 20389), True, 'import numpy as np\n'), ((20394, 20439), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bu', '(0.006)'], {}), '(nnn.bu, 0.006)\n', (20424, 20439), True, 'import numpy as np\n'), ((20444, 20489), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bv', '(0.014)'], {}), '(nnn.bv, 0.014)\n', (20474, 20489), True, 'import numpy as np\n'), ((20525, 20683), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(14)', 'bin_size': '(0.1)', 'bin_slop': '(0.0)', 'min_u': '(0.0)', 'max_u': '(0.9)', 'ubin_size': '(0.03)', 'min_v': '(0.0)', 'max_v': '(0.21)', 'vbin_size': '(0.07)'}), '(min_sep=5, nbins=14, bin_size=0.1, bin_slop=0.0,\n min_u=0.0, max_u=0.9, ubin_size=0.03, min_v=0.0, max_v=0.21, vbin_size=0.07\n )\n', (20548, 20683), False, 'import treecorr\n'), ((20924, 20955), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.03)'], {}), '(nnn.ubin_size, 0.03)\n', (20934, 20955), True, 'import numpy as np\n'), ((20967, 20998), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.07)'], {}), '(nnn.vbin_size, 0.07)\n', (20977, 20998), True, 'import numpy as np\n'), ((21003, 21045), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.b', '(0.0)'], {}), '(nnn.b, 0.0)\n', (21033, 21045), True, 'import numpy as np\n'), ((21050, 21093), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bu', '(0.0)'], {}), '(nnn.bu, 0.0)\n', (21080, 21093), True, 'import numpy as np\n'), ((21098, 21141), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bv', '(0.0)'], {}), '(nnn.bv, 0.0)\n', (21128, 21141), True, 'import numpy as np\n'), ((21175, 21344), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(14)', 'bin_size': '(0.1)', 'bin_slop': '(2.0)', 'min_u': '(0.0)', 'max_u': '(0.9)', 'ubin_size': '(0.03)', 'min_v': '(0.0)', 'max_v': '(0.21)', 'vbin_size': '(0.07)', 'verbose': '(0)'}), '(min_sep=5, nbins=14, bin_size=0.1, bin_slop=2.0,\n min_u=0.0, max_u=0.9, ubin_size=0.03, min_v=0.0, max_v=0.21, vbin_size=\n 0.07, verbose=0)\n', (21198, 21344), False, 'import treecorr\n'), ((21585, 21616), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.03)'], {}), '(nnn.ubin_size, 0.03)\n', (21595, 21616), True, 'import numpy as np\n'), ((21628, 21659), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.07)'], {}), '(nnn.vbin_size, 0.07)\n', (21638, 21659), True, 'import numpy as np\n'), ((21664, 21706), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.b', '(0.2)'], {}), '(nnn.b, 0.2)\n', (21694, 21706), True, 'import numpy as np\n'), ((21711, 21755), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bu', '(0.06)'], {}), '(nnn.bu, 0.06)\n', (21741, 21755), True, 'import numpy as np\n'), ((21760, 21804), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bv', '(0.14)'], {}), '(nnn.bv, 0.14)\n', (21790, 21804), True, 'import numpy as np\n'), ((21878, 22047), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(14)', 'bin_size': '(0.4)', 'bin_slop': '(1.0)', 'min_u': '(0.0)', 'max_u': '(0.9)', 'ubin_size': '(0.03)', 'min_v': '(0.0)', 'max_v': '(0.21)', 'vbin_size': '(0.07)', 'verbose': '(0)'}), '(min_sep=5, nbins=14, bin_size=0.4, bin_slop=1.0,\n min_u=0.0, max_u=0.9, ubin_size=0.03, min_v=0.0, max_v=0.21, vbin_size=\n 0.07, verbose=0)\n', (21901, 22047), False, 'import treecorr\n'), ((22288, 22319), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.03)'], {}), '(nnn.ubin_size, 0.03)\n', (22298, 22319), True, 'import numpy as np\n'), ((22331, 22362), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.07)'], {}), '(nnn.vbin_size, 0.07)\n', (22341, 22362), True, 'import numpy as np\n'), ((22367, 22409), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.b', '(0.4)'], {}), '(nnn.b, 0.4)\n', (22397, 22409), True, 'import numpy as np\n'), ((22414, 22458), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bu', '(0.03)'], {}), '(nnn.bu, 0.03)\n', (22444, 22458), True, 'import numpy as np\n'), ((22463, 22507), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bv', '(0.07)'], {}), '(nnn.bv, 0.07)\n', (22493, 22507), True, 'import numpy as np\n'), ((22574, 22714), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(14)', 'bin_size': '(0.4)', 'min_u': '(0.0)', 'max_u': '(0.9)', 'ubin_size': '(0.03)', 'min_v': '(0.0)', 'max_v': '(0.21)', 'vbin_size': '(0.07)'}), '(min_sep=5, nbins=14, bin_size=0.4, min_u=0.0, max_u\n =0.9, ubin_size=0.03, min_v=0.0, max_v=0.21, vbin_size=0.07)\n', (22597, 22714), False, 'import treecorr\n'), ((22928, 22959), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.03)'], {}), '(nnn.ubin_size, 0.03)\n', (22938, 22959), True, 'import numpy as np\n'), ((22971, 23002), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.07)'], {}), '(nnn.vbin_size, 0.07)\n', (22981, 23002), True, 'import numpy as np\n'), ((23007, 23049), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.b', '(0.1)'], {}), '(nnn.b, 0.1)\n', (23037, 23049), True, 'import numpy as np\n'), ((23054, 23098), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bu', '(0.03)'], {}), '(nnn.bu, 0.03)\n', (23084, 23098), True, 'import numpy as np\n'), ((23103, 23147), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bv', '(0.07)'], {}), '(nnn.bv, 0.07)\n', (23133, 23147), True, 'import numpy as np\n'), ((23152, 23202), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bin_slop', '(0.25)'], {}), '(nnn.bin_slop, 0.25)\n', (23182, 23202), True, 'import numpy as np\n'), ((23264, 23403), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(5)', 'nbins': '(14)', 'bin_size': '(0.05)', 'min_u': '(0.0)', 'max_u': '(0.9)', 'ubin_size': '(0.3)', 'min_v': '(0.0)', 'max_v': '(0.17)', 'vbin_size': '(0.17)'}), '(min_sep=5, nbins=14, bin_size=0.05, min_u=0.0,\n max_u=0.9, ubin_size=0.3, min_v=0.0, max_v=0.17, vbin_size=0.17)\n', (23287, 23403), False, 'import treecorr\n'), ((23619, 23649), 'numpy.isclose', 'np.isclose', (['nnn.ubin_size', '(0.3)'], {}), '(nnn.ubin_size, 0.3)\n', (23629, 23649), True, 'import numpy as np\n'), ((23661, 23692), 'numpy.isclose', 'np.isclose', (['nnn.vbin_size', '(0.17)'], {}), '(nnn.vbin_size, 0.17)\n', (23671, 23692), True, 'import numpy as np\n'), ((23697, 23740), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.b', '(0.05)'], {}), '(nnn.b, 0.05)\n', (23727, 23740), True, 'import numpy as np\n'), ((23745, 23788), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bu', '(0.1)'], {}), '(nnn.bu, 0.1)\n', (23775, 23788), True, 'import numpy as np\n'), ((23793, 23836), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bv', '(0.1)'], {}), '(nnn.bv, 0.1)\n', (23823, 23836), True, 'import numpy as np\n'), ((23841, 23890), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.bin_slop', '(1.0)'], {}), '(nnn.bin_slop, 1.0)\n', (23871, 23890), True, 'import numpy as np\n'), ((24350, 24380), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (24371, 24380), True, 'import numpy as np\n'), ((24459, 24485), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y'}), '(x=x, y=y)\n', (24475, 24485), False, 'import treecorr\n'), ((24648, 24831), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'brute': '(True)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, brute=True, verbose=1)\n', (24671, 24831), False, 'import treecorr\n'), ((24966, 24981), 'numpy.log', 'np.log', (['min_sep'], {}), '(min_sep)\n', (24972, 24981), True, 'import numpy as np\n'), ((25000, 25015), 'numpy.log', 'np.log', (['max_sep'], {}), '(max_sep)\n', (25006, 25015), True, 'import numpy as np\n'), ((25032, 25069), 'numpy.zeros', 'np.zeros', (['(nbins, nubins, 2 * nvbins)'], {}), '((nbins, nubins, 2 * nvbins))\n', (25040, 25069), True, 'import numpy as np\n'), ((27398, 27440), 'numpy.where', 'np.where', (['((ddd.ntri > 0) | (true_ntri > 0))'], {}), '((ddd.ntri > 0) | (true_ntri > 0))\n', (27406, 27440), True, 'import numpy as np\n'), ((27805, 27855), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (27834, 27855), True, 'import numpy as np\n'), ((27936, 27979), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_direct_data.dat"""'], {}), "('data', 'nnn_direct_data.dat')\n", (27948, 27979), False, 'import os\n'), ((28229, 28257), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'rx', 'y': 'ry'}), '(x=rx, y=ry)\n', (28245, 28257), False, 'import treecorr\n'), ((28279, 28322), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_direct_rand.dat"""'], {}), "('data', 'nnn_direct_rand.dat')\n", (28291, 28322), False, 'import os\n'), ((28461, 28644), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'brute': '(True)', 'verbose': '(0)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, brute=True, verbose=0)\n', (28484, 28644), False, 'import treecorr\n'), ((28862, 28916), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_direct.yaml"""'], {}), "('configs/nnn_direct.yaml')\n", (28889, 28916), False, 'import treecorr\n'), ((28930, 28961), 'treecorr.config.setup_logger', 'treecorr.config.setup_logger', (['(0)'], {}), '(0)\n', (28958, 28961), False, 'import treecorr\n'), ((28966, 28996), 'treecorr.corr3', 'treecorr.corr3', (['config', 'logger'], {}), '(config, logger)\n', (28980, 28996), False, 'import treecorr\n'), ((30887, 30911), 'test_helper.get_script_name', 'get_script_name', (['"""corr3"""'], {}), "('corr3')\n", (30902, 30911), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((30920, 30989), 'subprocess.Popen', 'subprocess.Popen', (["[corr3_exe, 'configs/nnn_direct.yaml', 'verbose=0']"], {}), "([corr3_exe, 'configs/nnn_direct.yaml', 'verbose=0'])\n", (30936, 30989), False, 'import subprocess\n'), ((31319, 31502), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'bin_slop': '(0)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, bin_slop=0, verbose=1)\n', (31342, 31502), False, 'import treecorr\n'), ((31738, 31788), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (31767, 31788), True, 'import numpy as np\n'), ((31844, 32038), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'bin_slop': '(0)', 'verbose': '(1)', 'max_top': '(0)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, bin_slop=0, verbose=1, max_top=0)\n', (31867, 32038), False, 'import treecorr\n'), ((32274, 32324), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (32303, 32324), True, 'import numpy as np\n'), ((32618, 32668), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (32647, 32668), True, 'import numpy as np\n'), ((33133, 33155), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (33147, 33155), False, 'import treecorr\n'), ((33167, 33232), 'numpy.genfromtxt', 'np.genfromtxt', (["config['nnn_file_name']"], {'names': '(True)', 'skip_header': '(1)'}), "(config['nnn_file_name'], names=True, skip_header=1)\n", (33180, 33232), True, 'import numpy as np\n'), ((33413, 33427), 'test_helper.do_pickle', 'do_pickle', (['ddd'], {}), '(ddd)\n', (33422, 33427), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((33471, 33522), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.ntri', '(2 * ddd.ntri)'], {}), '(ddd2.ntri, 2 * ddd.ntri)\n', (33497, 33522), True, 'import numpy as np\n'), ((33525, 33580), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.weight', '(2 * ddd.weight)'], {}), '(ddd2.weight, 2 * ddd.weight)\n', (33551, 33580), True, 'import numpy as np\n'), ((33583, 33638), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meand1', '(2 * ddd.meand1)'], {}), '(ddd2.meand1, 2 * ddd.meand1)\n', (33609, 33638), True, 'import numpy as np\n'), ((33641, 33696), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meand2', '(2 * ddd.meand2)'], {}), '(ddd2.meand2, 2 * ddd.meand2)\n', (33667, 33696), True, 'import numpy as np\n'), ((33699, 33754), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meand3', '(2 * ddd.meand3)'], {}), '(ddd2.meand3, 2 * ddd.meand3)\n', (33725, 33754), True, 'import numpy as np\n'), ((33757, 33818), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanlogd1', '(2 * ddd.meanlogd1)'], {}), '(ddd2.meanlogd1, 2 * ddd.meanlogd1)\n', (33783, 33818), True, 'import numpy as np\n'), ((33821, 33882), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanlogd2', '(2 * ddd.meanlogd2)'], {}), '(ddd2.meanlogd2, 2 * ddd.meanlogd2)\n', (33847, 33882), True, 'import numpy as np\n'), ((33885, 33946), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanlogd3', '(2 * ddd.meanlogd3)'], {}), '(ddd2.meanlogd3, 2 * ddd.meanlogd3)\n', (33911, 33946), True, 'import numpy as np\n'), ((33949, 34002), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanu', '(2 * ddd.meanu)'], {}), '(ddd2.meanu, 2 * ddd.meanu)\n', (33975, 34002), True, 'import numpy as np\n'), ((34005, 34058), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanv', '(2 * ddd.meanv)'], {}), '(ddd2.meanv, 2 * ddd.meanv)\n', (34031, 34058), True, 'import numpy as np\n'), ((34095, 34142), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.ntri', 'ddd.ntri'], {}), '(ddd2.ntri, ddd.ntri)\n', (34121, 34142), True, 'import numpy as np\n'), ((34147, 34198), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.weight', 'ddd.weight'], {}), '(ddd2.weight, ddd.weight)\n', (34173, 34198), True, 'import numpy as np\n'), ((34203, 34254), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meand1', 'ddd.meand1'], {}), '(ddd2.meand1, ddd.meand1)\n', (34229, 34254), True, 'import numpy as np\n'), ((34259, 34310), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meand2', 'ddd.meand2'], {}), '(ddd2.meand2, ddd.meand2)\n', (34285, 34310), True, 'import numpy as np\n'), ((34315, 34366), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meand3', 'ddd.meand3'], {}), '(ddd2.meand3, ddd.meand3)\n', (34341, 34366), True, 'import numpy as np\n'), ((34371, 34428), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanlogd1', 'ddd.meanlogd1'], {}), '(ddd2.meanlogd1, ddd.meanlogd1)\n', (34397, 34428), True, 'import numpy as np\n'), ((34433, 34490), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanlogd2', 'ddd.meanlogd2'], {}), '(ddd2.meanlogd2, ddd.meanlogd2)\n', (34459, 34490), True, 'import numpy as np\n'), ((34495, 34552), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanlogd3', 'ddd.meanlogd3'], {}), '(ddd2.meanlogd3, ddd.meanlogd3)\n', (34521, 34552), True, 'import numpy as np\n'), ((34557, 34606), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanu', 'ddd.meanu'], {}), '(ddd2.meanu, ddd.meanu)\n', (34583, 34606), True, 'import numpy as np\n'), ((34611, 34660), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd2.meanv', 'ddd.meanv'], {}), '(ddd2.meanv, ddd.meanv)\n', (34637, 34660), True, 'import numpy as np\n'), ((34753, 34913), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins)\n', (34776, 34913), False, 'import treecorr\n'), ((35006, 35053), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.ntri', 'ddd.ntri'], {}), '(ddd3.ntri, ddd.ntri)\n', (35032, 35053), True, 'import numpy as np\n'), ((35058, 35109), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.weight', 'ddd.weight'], {}), '(ddd3.weight, ddd.weight)\n', (35084, 35109), True, 'import numpy as np\n'), ((35114, 35165), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.meand1', 'ddd.meand1'], {}), '(ddd3.meand1, ddd.meand1)\n', (35140, 35165), True, 'import numpy as np\n'), ((35170, 35221), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.meand2', 'ddd.meand2'], {}), '(ddd3.meand2, ddd.meand2)\n', (35196, 35221), True, 'import numpy as np\n'), ((35226, 35277), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.meand3', 'ddd.meand3'], {}), '(ddd3.meand3, ddd.meand3)\n', (35252, 35277), True, 'import numpy as np\n'), ((35282, 35339), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.meanlogd1', 'ddd.meanlogd1'], {}), '(ddd3.meanlogd1, ddd.meanlogd1)\n', (35308, 35339), True, 'import numpy as np\n'), ((35344, 35401), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.meanlogd2', 'ddd.meanlogd2'], {}), '(ddd3.meanlogd2, ddd.meanlogd2)\n', (35370, 35401), True, 'import numpy as np\n'), ((35406, 35463), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.meanlogd3', 'ddd.meanlogd3'], {}), '(ddd3.meanlogd3, ddd.meanlogd3)\n', (35432, 35463), True, 'import numpy as np\n'), ((35468, 35517), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.meanu', 'ddd.meanu'], {}), '(ddd3.meanu, ddd.meanu)\n', (35494, 35517), True, 'import numpy as np\n'), ((35522, 35571), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd3.meanv', 'ddd.meanv'], {}), '(ddd3.meanv, ddd.meanv)\n', (35548, 35571), True, 'import numpy as np\n'), ((35642, 35806), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': '(min_sep / 2)', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep / 2, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins)\n', (35665, 35806), False, 'import treecorr\n'), ((35935, 36099), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': '(max_sep * 2)', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep * 2, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins)\n', (35958, 36099), False, 'import treecorr\n'), ((36228, 36392), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': '(nbins * 2)', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins * 2,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins)\n', (36251, 36392), False, 'import treecorr\n'), ((36521, 36687), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': '(min_u - 0.1)', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u - 0.1, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins)\n', (36544, 36687), False, 'import treecorr\n'), ((36816, 36982), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': '(max_u + 0.1)', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u + 0.1, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins)\n', (36839, 36982), False, 'import treecorr\n'), ((37111, 37275), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': '(nubins * 2)', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins * 2, min_v=min_v, max_v=max_v,\n nvbins=nvbins)\n', (37134, 37275), False, 'import treecorr\n'), ((37405, 37571), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': '(min_v - 0.1)', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v - 0.1, max_v=max_v,\n nvbins=nvbins)\n', (37428, 37571), False, 'import treecorr\n'), ((37704, 37870), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': '(max_v + 0.1)', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v + 0.1,\n nvbins=nvbins)\n', (37727, 37870), False, 'import treecorr\n'), ((38003, 38167), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': '(nvbins * 2)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins * 2)\n', (38026, 38167), False, 'import treecorr\n'), ((38381, 38412), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'x'}), '(x=x, y=y, z=x)\n', (38397, 38412), False, 'import treecorr\n'), ((39596, 39756), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins)\n', (39619, 39756), False, 'import treecorr\n'), ((39849, 39897), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.ntri', 'ddd.ntri'], {}), '(ddd15.ntri, ddd.ntri)\n', (39875, 39897), True, 'import numpy as np\n'), ((39902, 39954), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.weight', 'ddd.weight'], {}), '(ddd15.weight, ddd.weight)\n', (39928, 39954), True, 'import numpy as np\n'), ((39959, 40011), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.meand1', 'ddd.meand1'], {}), '(ddd15.meand1, ddd.meand1)\n', (39985, 40011), True, 'import numpy as np\n'), ((40016, 40068), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.meand2', 'ddd.meand2'], {}), '(ddd15.meand2, ddd.meand2)\n', (40042, 40068), True, 'import numpy as np\n'), ((40073, 40125), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.meand3', 'ddd.meand3'], {}), '(ddd15.meand3, ddd.meand3)\n', (40099, 40125), True, 'import numpy as np\n'), ((40130, 40188), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.meanlogd1', 'ddd.meanlogd1'], {}), '(ddd15.meanlogd1, ddd.meanlogd1)\n', (40156, 40188), True, 'import numpy as np\n'), ((40193, 40251), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.meanlogd2', 'ddd.meanlogd2'], {}), '(ddd15.meanlogd2, ddd.meanlogd2)\n', (40219, 40251), True, 'import numpy as np\n'), ((40256, 40314), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.meanlogd3', 'ddd.meanlogd3'], {}), '(ddd15.meanlogd3, ddd.meanlogd3)\n', (40282, 40314), True, 'import numpy as np\n'), ((40319, 40369), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.meanu', 'ddd.meanu'], {}), '(ddd15.meanu, ddd.meanu)\n', (40345, 40369), True, 'import numpy as np\n'), ((40374, 40424), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd15.meanv', 'ddd.meanv'], {}), '(ddd15.meanv, ddd.meanv)\n', (40400, 40424), True, 'import numpy as np\n'), ((40679, 40709), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (40700, 40709), True, 'import numpy as np\n'), ((40791, 40819), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x1', 'y': 'y1'}), '(x=x1, y=y1)\n', (40807, 40819), False, 'import treecorr\n'), ((40901, 40929), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x2', 'y': 'y2'}), '(x=x2, y=y2)\n', (40917, 40929), False, 'import treecorr\n'), ((41011, 41039), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x3', 'y': 'y3'}), '(x=x3, y=y3)\n', (41027, 41039), False, 'import treecorr\n'), ((41202, 41385), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'brute': '(True)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, brute=True, verbose=1)\n', (41225, 41385), False, 'import treecorr\n'), ((41568, 41583), 'numpy.log', 'np.log', (['min_sep'], {}), '(min_sep)\n', (41574, 41583), True, 'import numpy as np\n'), ((41602, 41617), 'numpy.log', 'np.log', (['max_sep'], {}), '(max_sep)\n', (41608, 41617), True, 'import numpy as np\n'), ((41634, 41671), 'numpy.zeros', 'np.zeros', (['(nbins, nubins, 2 * nvbins)'], {}), '((nbins, nubins, 2 * nvbins))\n', (41642, 41671), True, 'import numpy as np\n'), ((43184, 43234), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (43213, 43234), True, 'import numpy as np\n'), ((43276, 43459), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'bin_slop': '(0)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, bin_slop=0, verbose=1)\n', (43299, 43459), False, 'import treecorr\n'), ((43683, 43733), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (43712, 43733), True, 'import numpy as np\n'), ((43789, 43983), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'bin_slop': '(0)', 'verbose': '(1)', 'max_top': '(0)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, bin_slop=0, verbose=1, max_top=0)\n', (43812, 43983), False, 'import treecorr\n'), ((44244, 44294), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (44273, 44294), True, 'import numpy as np\n'), ((44372, 44432), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_direct_cross.yaml"""'], {}), "('configs/nnn_direct_cross.yaml')\n", (44399, 44432), False, 'import treecorr\n'), ((44836, 44896), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_direct_cross.yaml"""'], {}), "('configs/nnn_direct_cross.yaml')\n", (44863, 44896), False, 'import treecorr\n'), ((44927, 44949), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (44941, 44949), False, 'import treecorr\n'), ((47922, 47952), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (47943, 47952), True, 'import numpy as np\n'), ((48160, 48202), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (48193, 48202), False, 'import coord\n'), ((48212, 48282), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)\n", (48228, 48282), False, 'import treecorr\n'), ((48426, 48532), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'bin_size': 'bin_size', 'nbins': 'nrbins', 'sep_units': '"""deg"""', 'brute': '(True)'}), "(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,\n sep_units='deg', brute=True)\n", (48449, 48532), False, 'import treecorr\n'), ((48608, 48641), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (48615, 48641), True, 'import numpy as np\n'), ((48682, 48741), 'coord.CelestialCoord', 'coord.CelestialCoord', (['(0 * coord.radians)', '(90 * coord.degrees)'], {}), '(0 * coord.radians, 90 * coord.degrees)\n', (48702, 48741), False, 'import coord\n'), ((48755, 48804), 'numpy.zeros', 'np.zeros', (['(nrbins, nubins, 2 * nvbins)'], {'dtype': 'int'}), '((nrbins, nubins, 2 * nvbins), dtype=int)\n', (48763, 48804), True, 'import numpy as np\n'), ((48821, 48872), 'numpy.zeros', 'np.zeros', (['(nrbins, nubins, 2 * nvbins)'], {'dtype': 'float'}), '((nrbins, nubins, 2 * nvbins), dtype=float)\n', (48829, 48872), True, 'import numpy as np\n'), ((50950, 51000), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (50979, 51000), True, 'import numpy as np\n'), ((51005, 51080), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd.weight', 'true_weight'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ddd.weight, true_weight, rtol=1e-05, atol=1e-08)\n', (51031, 51080), True, 'import numpy as np\n'), ((51297, 51361), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_direct_spherical.yaml"""'], {}), "('configs/nnn_direct_spherical.yaml')\n", (51324, 51361), False, 'import treecorr\n'), ((51401, 51423), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (51415, 51423), False, 'import treecorr\n'), ((51435, 51471), 'fitsio.read', 'fitsio.read', (["config['nnn_file_name']"], {}), "(config['nnn_file_name'])\n", (51446, 51471), False, 'import fitsio\n'), ((51924, 52041), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'bin_size': 'bin_size', 'nbins': 'nrbins', 'sep_units': '"""deg"""', 'bin_slop': '(0)', 'max_top': '(0)'}), "(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,\n sep_units='deg', bin_slop=0, max_top=0)\n", (51947, 52041), False, 'import treecorr\n'), ((52097, 52147), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (52126, 52147), True, 'import numpy as np\n'), ((52152, 52227), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd.weight', 'true_weight'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ddd.weight, true_weight, rtol=1e-05, atol=1e-08)\n', (52178, 52227), True, 'import numpy as np\n'), ((52339, 52369), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (52360, 52369), True, 'import numpy as np\n'), ((52552, 52594), 'coord.CelestialCoord.xyz_to_radec', 'coord.CelestialCoord.xyz_to_radec', (['x', 'y', 'z'], {}), '(x, y, z)\n', (52585, 52594), False, 'import coord\n'), ((52604, 52674), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""', 'w': 'w'}), "(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)\n", (52620, 52674), False, 'import treecorr\n'), ((52860, 53040), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nrbins', 'nubins': 'nubins', 'ubin_size': 'ubin_size', 'nvbins': 'nvbins', 'vbin_size': 'vbin_size', 'sep_units': '"""deg"""', 'brute': '(True)'}), "(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,\n nubins=nubins, ubin_size=ubin_size, nvbins=nvbins, vbin_size=vbin_size,\n sep_units='deg', brute=True)\n", (52883, 53040), False, 'import treecorr\n'), ((53179, 53212), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2 + z ** 2)'], {}), '(x ** 2 + y ** 2 + z ** 2)\n', (53186, 53212), True, 'import numpy as np\n'), ((53253, 53312), 'coord.CelestialCoord', 'coord.CelestialCoord', (['(0 * coord.radians)', '(90 * coord.degrees)'], {}), '(0 * coord.radians, 90 * coord.degrees)\n', (53273, 53312), False, 'import coord\n'), ((53326, 53375), 'numpy.zeros', 'np.zeros', (['(nrbins, nubins, 2 * nvbins)'], {'dtype': 'int'}), '((nrbins, nubins, 2 * nvbins), dtype=int)\n', (53334, 53375), True, 'import numpy as np\n'), ((53392, 53443), 'numpy.zeros', 'np.zeros', (['(nrbins, nubins, 2 * nvbins)'], {'dtype': 'float'}), '((nrbins, nubins, 2 * nvbins), dtype=float)\n', (53400, 53443), True, 'import numpy as np\n'), ((55343, 55393), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (55372, 55393), True, 'import numpy as np\n'), ((55398, 55473), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd.weight', 'true_weight'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ddd.weight, true_weight, rtol=1e-05, atol=1e-08)\n', (55424, 55473), True, 'import numpy as np\n'), ((55690, 55748), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_direct_arc.yaml"""'], {}), "('configs/nnn_direct_arc.yaml')\n", (55717, 55748), False, 'import treecorr\n'), ((55788, 55810), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (55802, 55810), False, 'import treecorr\n'), ((55822, 55858), 'fitsio.read', 'fitsio.read', (["config['nnn_file_name']"], {}), "(config['nnn_file_name'])\n", (55833, 55858), False, 'import fitsio\n'), ((56311, 56502), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nrbins', 'nubins': 'nubins', 'ubin_size': 'ubin_size', 'nvbins': 'nvbins', 'vbin_size': 'vbin_size', 'sep_units': '"""deg"""', 'bin_slop': '(0)', 'max_top': '(0)'}), "(min_sep=min_sep, max_sep=max_sep, nbins=nrbins,\n nubins=nubins, ubin_size=ubin_size, nvbins=nvbins, vbin_size=vbin_size,\n sep_units='deg', bin_slop=0, max_top=0)\n", (56334, 56502), False, 'import treecorr\n'), ((56622, 56672), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (56651, 56672), True, 'import numpy as np\n'), ((56677, 56752), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd.weight', 'true_weight'], {'rtol': '(1e-05)', 'atol': '(1e-08)'}), '(ddd.weight, true_weight, rtol=1e-05, atol=1e-08)\n', (56703, 56752), True, 'import numpy as np\n'), ((56876, 56906), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (56897, 56906), True, 'import numpy as np\n'), ((56989, 57044), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x1', 'y': 'y1', 'first_row': '(28)', 'last_row': '(84)'}), '(x=x1, y=y1, first_row=28, last_row=84)\n', (57005, 57044), False, 'import treecorr\n'), ((57127, 57182), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x2', 'y': 'y2', 'first_row': '(48)', 'last_row': '(99)'}), '(x=x2, y=y2, first_row=48, last_row=99)\n', (57143, 57182), False, 'import treecorr\n'), ((57265, 57320), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x3', 'y': 'y3', 'first_row': '(22)', 'last_row': '(67)'}), '(x=x3, y=y3, first_row=22, last_row=67)\n', (57281, 57320), False, 'import treecorr\n'), ((57484, 57656), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'brute': '(True)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, brute=True)\n', (57507, 57656), False, 'import treecorr\n'), ((57848, 57863), 'numpy.log', 'np.log', (['min_sep'], {}), '(min_sep)\n', (57854, 57863), True, 'import numpy as np\n'), ((57882, 57897), 'numpy.log', 'np.log', (['max_sep'], {}), '(max_sep)\n', (57888, 57897), True, 'import numpy as np\n'), ((57914, 57951), 'numpy.zeros', 'np.zeros', (['(nbins, nubins, 2 * nvbins)'], {}), '((nbins, nubins, 2 * nvbins))\n', (57922, 57951), True, 'import numpy as np\n'), ((59466, 59517), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddda.ntri', 'true_ntri'], {}), '(ddda.ntri, true_ntri)\n', (59495, 59517), True, 'import numpy as np\n'), ((59639, 59653), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (59647, 59653), True, 'import numpy as np\n'), ((59682, 59696), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (59690, 59696), True, 'import numpy as np\n'), ((59725, 59739), 'numpy.zeros', 'np.zeros', (['ngal'], {}), '(ngal)\n', (59733, 59739), True, 'import numpy as np\n'), ((59771, 59805), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x1', 'y': 'y1', 'w': 'w1'}), '(x=x1, y=y1, w=w1)\n', (59787, 59805), False, 'import treecorr\n'), ((59818, 59852), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x2', 'y': 'y2', 'w': 'w2'}), '(x=x2, y=y2, w=w2)\n', (59834, 59852), False, 'import treecorr\n'), ((59865, 59899), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x3', 'y': 'y3', 'w': 'w3'}), '(x=x3, y=y3, w=w3)\n', (59881, 59899), False, 'import treecorr\n'), ((59911, 60083), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'brute': '(True)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, brute=True)\n', (59934, 60083), False, 'import treecorr\n'), ((60304, 60355), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['dddb.ntri', 'true_ntri'], {}), '(dddb.ntri, true_ntri)\n', (60333, 60355), True, 'import numpy as np\n'), ((60879, 60909), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (60900, 60909), True, 'import numpy as np\n'), ((61030, 61060), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y + z * z)'], {}), '(x * x + y * y + z * z)\n', (61037, 61060), True, 'import numpy as np\n'), ((61067, 61083), 'numpy.arcsin', 'np.arcsin', (['(z / r)'], {}), '(z / r)\n', (61076, 61083), True, 'import numpy as np\n'), ((61091, 61107), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (61101, 61107), True, 'import numpy as np\n'), ((61117, 61187), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad')\n", (61133, 61187), False, 'import treecorr\n'), ((61349, 61532), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'brute': '(True)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, brute=True, verbose=1)\n', (61372, 61532), False, 'import treecorr\n'), ((61702, 61717), 'numpy.log', 'np.log', (['min_sep'], {}), '(min_sep)\n', (61708, 61717), True, 'import numpy as np\n'), ((61736, 61751), 'numpy.log', 'np.log', (['max_sep'], {}), '(max_sep)\n', (61742, 61751), True, 'import numpy as np\n'), ((61768, 61805), 'numpy.zeros', 'np.zeros', (['(nbins, nubins, 2 * nvbins)'], {}), '((nbins, nubins, 2 * nvbins))\n', (61776, 61805), True, 'import numpy as np\n'), ((64370, 64420), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (64399, 64420), True, 'import numpy as np\n'), ((64462, 64645), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'bin_slop': '(0)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, bin_slop=0, verbose=1)\n', (64485, 64645), False, 'import treecorr\n'), ((64843, 64893), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (64872, 64893), True, 'import numpy as np\n'), ((64949, 65143), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'bin_slop': '(0)', 'verbose': '(1)', 'max_top': '(0)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, bin_slop=0, verbose=1, max_top=0)\n', (64972, 65143), False, 'import treecorr\n'), ((65379, 65429), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (65408, 65429), True, 'import numpy as np\n'), ((65639, 65689), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (65668, 65689), True, 'import numpy as np\n'), ((65756, 65787), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z'}), '(x=x, y=y, z=z)\n', (65772, 65787), False, 'import treecorr\n'), ((65813, 65863), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (65842, 65863), True, 'import numpy as np\n'), ((66003, 66033), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (66024, 66033), True, 'import numpy as np\n'), ((66158, 66194), 'numpy.sqrt', 'np.sqrt', (['(x1 * x1 + y1 * y1 + z1 * z1)'], {}), '(x1 * x1 + y1 * y1 + z1 * z1)\n', (66165, 66194), True, 'import numpy as np\n'), ((66202, 66220), 'numpy.arcsin', 'np.arcsin', (['(z1 / r1)'], {}), '(z1 / r1)\n', (66211, 66220), True, 'import numpy as np\n'), ((66229, 66247), 'numpy.arctan2', 'np.arctan2', (['y1', 'x1'], {}), '(y1, x1)\n', (66239, 66247), True, 'import numpy as np\n'), ((66258, 66331), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra1', 'dec': 'dec1', 'r': 'r1', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra1, dec=dec1, r=r1, ra_units='rad', dec_units='rad')\n", (66274, 66331), False, 'import treecorr\n'), ((66457, 66493), 'numpy.sqrt', 'np.sqrt', (['(x2 * x2 + y2 * y2 + z2 * z2)'], {}), '(x2 * x2 + y2 * y2 + z2 * z2)\n', (66464, 66493), True, 'import numpy as np\n'), ((66501, 66519), 'numpy.arcsin', 'np.arcsin', (['(z2 / r2)'], {}), '(z2 / r2)\n', (66510, 66519), True, 'import numpy as np\n'), ((66528, 66546), 'numpy.arctan2', 'np.arctan2', (['y2', 'x2'], {}), '(y2, x2)\n', (66538, 66546), True, 'import numpy as np\n'), ((66557, 66630), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra2', 'dec': 'dec2', 'r': 'r2', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra2, dec=dec2, r=r2, ra_units='rad', dec_units='rad')\n", (66573, 66630), False, 'import treecorr\n'), ((66756, 66792), 'numpy.sqrt', 'np.sqrt', (['(x3 * x3 + y3 * y3 + z3 * z3)'], {}), '(x3 * x3 + y3 * y3 + z3 * z3)\n', (66763, 66792), True, 'import numpy as np\n'), ((66800, 66818), 'numpy.arcsin', 'np.arcsin', (['(z3 / r3)'], {}), '(z3 / r3)\n', (66809, 66818), True, 'import numpy as np\n'), ((66827, 66845), 'numpy.arctan2', 'np.arctan2', (['y3', 'x3'], {}), '(y3, x3)\n', (66837, 66845), True, 'import numpy as np\n'), ((66856, 66929), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra3', 'dec': 'dec3', 'r': 'r3', 'ra_units': '"""rad"""', 'dec_units': '"""rad"""'}), "(ra=ra3, dec=dec3, r=r3, ra_units='rad', dec_units='rad')\n", (66872, 66929), False, 'import treecorr\n'), ((67091, 67274), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'brute': '(True)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, brute=True, verbose=1)\n', (67114, 67274), False, 'import treecorr\n'), ((67457, 67472), 'numpy.log', 'np.log', (['min_sep'], {}), '(min_sep)\n', (67463, 67472), True, 'import numpy as np\n'), ((67491, 67506), 'numpy.log', 'np.log', (['max_sep'], {}), '(max_sep)\n', (67497, 67506), True, 'import numpy as np\n'), ((67523, 67560), 'numpy.zeros', 'np.zeros', (['(nbins, nubins, 2 * nvbins)'], {}), '((nbins, nubins, 2 * nvbins))\n', (67531, 67560), True, 'import numpy as np\n'), ((69235, 69285), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (69264, 69285), True, 'import numpy as np\n'), ((69327, 69510), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'bin_slop': '(0)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, bin_slop=0, verbose=1)\n', (69350, 69510), False, 'import treecorr\n'), ((69734, 69784), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (69763, 69784), True, 'import numpy as np\n'), ((69840, 70034), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'bin_slop': '(0)', 'verbose': '(1)', 'max_top': '(0)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, bin_slop=0, verbose=1, max_top=0)\n', (69863, 70034), False, 'import treecorr\n'), ((70295, 70345), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (70324, 70345), True, 'import numpy as np\n'), ((70413, 70447), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x1', 'y': 'y1', 'z': 'z1'}), '(x=x1, y=y1, z=z1)\n', (70429, 70447), False, 'import treecorr\n'), ((70459, 70493), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x2', 'y': 'y2', 'z': 'z2'}), '(x=x2, y=y2, z=z2)\n', (70475, 70493), False, 'import treecorr\n'), ((70505, 70539), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x3', 'y': 'y3', 'z': 'z3'}), '(x=x3, y=y3, z=z3)\n', (70521, 70539), False, 'import treecorr\n'), ((70578, 70628), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ddd.ntri', 'true_ntri'], {}), '(ddd.ntri, true_ntri)\n', (70607, 70628), True, 'import numpy as np\n'), ((72128, 72158), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (72149, 72158), True, 'import numpy as np\n'), ((72382, 72444), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'x_units': '"""arcmin"""', 'y_units': '"""arcmin"""'}), "(x=x, y=y, x_units='arcmin', y_units='arcmin')\n", (72398, 72444), False, 'import treecorr\n'), ((72455, 72646), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'sep_units': '"""arcmin"""', 'verbose': '(1)'}), "(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, sep_units='arcmin', verbose=1)\n", (72478, 72646), False, 'import treecorr\n'), ((73878, 73969), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(ddd.meand3 / ddd.meand2)', 'ddd.meanu'], {'rtol': '(1e-05 * tol_factor)'}), '(ddd.meand3 / ddd.meand2, ddd.meanu, rtol=1e-05 *\n tol_factor)\n', (73904, 73969), True, 'import numpy as np\n'), ((74534, 74598), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'rx', 'y': 'ry', 'x_units': '"""arcmin"""', 'y_units': '"""arcmin"""'}), "(x=rx, y=ry, x_units='arcmin', y_units='arcmin')\n", (74550, 74598), False, 'import treecorr\n'), ((74608, 74799), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'sep_units': '"""arcmin"""', 'verbose': '(1)'}), "(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, sep_units='arcmin', verbose=1)\n", (74631, 74799), False, 'import treecorr\n'), ((75530, 75596), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['zeta', 'true_zeta'], {'rtol': '(0.1 * tol_factor)'}), '(zeta, true_zeta, rtol=0.1 * tol_factor)\n', (75556, 75596), True, 'import numpy as np\n'), ((75912, 75959), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn.yaml"""'], {}), "('configs/nnn.yaml')\n", (75939, 75959), False, 'import treecorr\n'), ((75990, 76012), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (76004, 76012), False, 'import treecorr\n'), ((76579, 76618), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_out1.fits"""'], {}), "('output', 'nnn_out1.fits')\n", (76591, 76618), False, 'import os\n'), ((76659, 76686), 'fitsio.read', 'fitsio.read', (['out_file_name1'], {}), '(out_file_name1)\n', (76670, 76686), False, 'import fitsio\n'), ((77579, 77616), 'fitsio.read_header', 'fitsio.read_header', (['out_file_name1', '(1)'], {}), '(out_file_name1, 1)\n', (77597, 77616), False, 'import fitsio\n'), ((77621, 77681), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["(header['tot'] / ddd.tot)", '(1.0)'], {}), "(header['tot'] / ddd.tot, 1.0)\n", (77651, 77681), True, 'import numpy as np\n'), ((77701, 77740), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_out2.fits"""'], {}), "('output', 'nnn_out2.fits')\n", (77713, 77740), False, 'import os\n'), ((77786, 77813), 'fitsio.read', 'fitsio.read', (['out_file_name2'], {}), '(out_file_name2)\n', (77797, 77813), False, 'import fitsio\n'), ((78943, 78980), 'fitsio.read_header', 'fitsio.read_header', (['out_file_name2', '(1)'], {}), '(out_file_name2, 1)\n', (78961, 78980), False, 'import fitsio\n'), ((78985, 79045), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["(header['tot'] / ddd.tot)", '(1.0)'], {}), "(header['tot'] / ddd.tot, 1.0)\n", (79015, 79045), True, 'import numpy as np\n'), ((79185, 79376), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'sep_units': '"""arcmin"""', 'verbose': '(1)'}), "(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, sep_units='arcmin', verbose=1)\n", (79208, 79376), False, 'import treecorr\n'), ((79508, 79559), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.logr', 'ddd.logr'], {}), '(ddd2.logr, ddd.logr)\n', (79538, 79559), True, 'import numpy as np\n'), ((79564, 79609), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.u', 'ddd.u'], {}), '(ddd2.u, ddd.u)\n', (79594, 79609), True, 'import numpy as np\n'), ((79614, 79659), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.v', 'ddd.v'], {}), '(ddd2.v, ddd.v)\n', (79644, 79659), True, 'import numpy as np\n'), ((79664, 79719), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand1', 'ddd.meand1'], {}), '(ddd2.meand1, ddd.meand1)\n', (79694, 79719), True, 'import numpy as np\n'), ((79724, 79785), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd1', 'ddd.meanlogd1'], {}), '(ddd2.meanlogd1, ddd.meanlogd1)\n', (79754, 79785), True, 'import numpy as np\n'), ((79790, 79845), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand2', 'ddd.meand2'], {}), '(ddd2.meand2, ddd.meand2)\n', (79820, 79845), True, 'import numpy as np\n'), ((79850, 79911), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd2', 'ddd.meanlogd2'], {}), '(ddd2.meanlogd2, ddd.meanlogd2)\n', (79880, 79911), True, 'import numpy as np\n'), ((79916, 79971), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand3', 'ddd.meand3'], {}), '(ddd2.meand3, ddd.meand3)\n', (79946, 79971), True, 'import numpy as np\n'), ((79976, 80037), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd3', 'ddd.meanlogd3'], {}), '(ddd2.meanlogd3, ddd.meanlogd3)\n', (80006, 80037), True, 'import numpy as np\n'), ((80042, 80095), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanu', 'ddd.meanu'], {}), '(ddd2.meanu, ddd.meanu)\n', (80072, 80095), True, 'import numpy as np\n'), ((80100, 80153), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanv', 'ddd.meanv'], {}), '(ddd2.meanv, ddd.meanv)\n', (80130, 80153), True, 'import numpy as np\n'), ((80158, 80209), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.ntri', 'ddd.ntri'], {}), '(ddd2.ntri, ddd.ntri)\n', (80188, 80209), True, 'import numpy as np\n'), ((80214, 80269), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(ddd2.tot / ddd.tot)', '(1.0)'], {}), '(ddd2.tot / ddd.tot, 1.0)\n', (80244, 80269), True, 'import numpy as np\n'), ((80460, 80511), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.logr', 'ddd.logr'], {}), '(ddd2.logr, ddd.logr)\n', (80490, 80511), True, 'import numpy as np\n'), ((80516, 80561), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.u', 'ddd.u'], {}), '(ddd2.u, ddd.u)\n', (80546, 80561), True, 'import numpy as np\n'), ((80566, 80611), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.v', 'ddd.v'], {}), '(ddd2.v, ddd.v)\n', (80596, 80611), True, 'import numpy as np\n'), ((80616, 80671), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand1', 'ddd.meand1'], {}), '(ddd2.meand1, ddd.meand1)\n', (80646, 80671), True, 'import numpy as np\n'), ((80676, 80737), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd1', 'ddd.meanlogd1'], {}), '(ddd2.meanlogd1, ddd.meanlogd1)\n', (80706, 80737), True, 'import numpy as np\n'), ((80742, 80797), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand2', 'ddd.meand2'], {}), '(ddd2.meand2, ddd.meand2)\n', (80772, 80797), True, 'import numpy as np\n'), ((80802, 80863), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd2', 'ddd.meanlogd2'], {}), '(ddd2.meanlogd2, ddd.meanlogd2)\n', (80832, 80863), True, 'import numpy as np\n'), ((80868, 80923), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand3', 'ddd.meand3'], {}), '(ddd2.meand3, ddd.meand3)\n', (80898, 80923), True, 'import numpy as np\n'), ((80928, 80989), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd3', 'ddd.meanlogd3'], {}), '(ddd2.meanlogd3, ddd.meanlogd3)\n', (80958, 80989), True, 'import numpy as np\n'), ((80994, 81047), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanu', 'ddd.meanu'], {}), '(ddd2.meanu, ddd.meanu)\n', (81024, 81047), True, 'import numpy as np\n'), ((81052, 81105), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanv', 'ddd.meanv'], {}), '(ddd2.meanv, ddd.meanv)\n', (81082, 81105), True, 'import numpy as np\n'), ((81110, 81161), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.ntri', 'ddd.ntri'], {}), '(ddd2.ntri, ddd.ntri)\n', (81140, 81161), True, 'import numpy as np\n'), ((81166, 81221), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(ddd2.tot / ddd.tot)', '(1.0)'], {}), '(ddd2.tot / ddd.tot, 1.0)\n', (81196, 81221), True, 'import numpy as np\n'), ((81676, 81715), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['zeta2', 'zeta'], {}), '(zeta2, zeta)\n', (81702, 81715), True, 'import numpy as np\n'), ((81720, 81765), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['varzeta2', 'varzeta'], {}), '(varzeta2, varzeta)\n', (81746, 81765), True, 'import numpy as np\n'), ((82524, 82704), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'sep_units': '"""arcmin"""'}), "(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, sep_units='arcmin')\n", (82547, 82704), False, 'import treecorr\n'), ((83601, 83640), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_out3.fits"""'], {}), "('output', 'nnn_out3.fits')\n", (83613, 83640), False, 'import os\n'), ((85805, 85871), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['zeta', 'true_zeta'], {'rtol': '(0.1 * tol_factor)'}), '(zeta, true_zeta, rtol=0.1 * tol_factor)\n', (85831, 85871), True, 'import numpy as np\n'), ((86132, 86171), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_out3.fits"""'], {}), "('output', 'nnn_out3.fits')\n", (86144, 86171), False, 'import os\n'), ((86241, 86268), 'fitsio.read', 'fitsio.read', (['out_file_name3'], {}), '(out_file_name3)\n', (86252, 86268), False, 'import fitsio\n'), ((87938, 87975), 'fitsio.read_header', 'fitsio.read_header', (['out_file_name3', '(1)'], {}), '(out_file_name3, 1)\n', (87956, 87975), False, 'import fitsio\n'), ((87980, 88040), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["(header['tot'] / ddd.tot)", '(1.0)'], {}), "(header['tot'] / ddd.tot, 1.0)\n", (88010, 88040), True, 'import numpy as np\n'), ((88073, 88124), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.logr', 'ddd.logr'], {}), '(ddd2.logr, ddd.logr)\n', (88103, 88124), True, 'import numpy as np\n'), ((88129, 88174), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.u', 'ddd.u'], {}), '(ddd2.u, ddd.u)\n', (88159, 88174), True, 'import numpy as np\n'), ((88179, 88224), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.v', 'ddd.v'], {}), '(ddd2.v, ddd.v)\n', (88209, 88224), True, 'import numpy as np\n'), ((88229, 88284), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand1', 'ddd.meand1'], {}), '(ddd2.meand1, ddd.meand1)\n', (88259, 88284), True, 'import numpy as np\n'), ((88289, 88350), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd1', 'ddd.meanlogd1'], {}), '(ddd2.meanlogd1, ddd.meanlogd1)\n', (88319, 88350), True, 'import numpy as np\n'), ((88355, 88410), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand2', 'ddd.meand2'], {}), '(ddd2.meand2, ddd.meand2)\n', (88385, 88410), True, 'import numpy as np\n'), ((88415, 88476), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd2', 'ddd.meanlogd2'], {}), '(ddd2.meanlogd2, ddd.meanlogd2)\n', (88445, 88476), True, 'import numpy as np\n'), ((88481, 88536), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meand3', 'ddd.meand3'], {}), '(ddd2.meand3, ddd.meand3)\n', (88511, 88536), True, 'import numpy as np\n'), ((88541, 88602), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanlogd3', 'ddd.meanlogd3'], {}), '(ddd2.meanlogd3, ddd.meanlogd3)\n', (88571, 88602), True, 'import numpy as np\n'), ((88607, 88660), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanu', 'ddd.meanu'], {}), '(ddd2.meanu, ddd.meanu)\n', (88637, 88660), True, 'import numpy as np\n'), ((88665, 88718), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.meanv', 'ddd.meanv'], {}), '(ddd2.meanv, ddd.meanv)\n', (88695, 88718), True, 'import numpy as np\n'), ((88723, 88774), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['ddd2.ntri', 'ddd.ntri'], {}), '(ddd2.ntri, ddd.ntri)\n', (88753, 88774), True, 'import numpy as np\n'), ((88779, 88834), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(ddd2.tot / ddd.tot)', '(1.0)'], {}), '(ddd2.tot / ddd.tot, 1.0)\n', (88809, 88834), True, 'import numpy as np\n'), ((89004, 89063), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_compensated.yaml"""'], {}), "('configs/nnn_compensated.yaml')\n", (89031, 89063), False, 'import treecorr\n'), ((89094, 89116), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (89108, 89116), False, 'import treecorr\n'), ((89137, 89183), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_compensated.fits"""'], {}), "('output', 'nnn_compensated.fits')\n", (89149, 89183), False, 'import os\n'), ((89202, 89228), 'fitsio.read', 'fitsio.read', (['corr3_outfile'], {}), '(corr3_outfile)\n', (89213, 89228), False, 'import fitsio\n'), ((91263, 91299), 'fitsio.read_header', 'fitsio.read_header', (['corr3_outfile', '(1)'], {}), '(corr3_outfile, 1)\n', (91281, 91299), False, 'import fitsio\n'), ((91304, 91364), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (["(header['tot'] / ddd.tot)", '(1.0)'], {}), "(header['tot'] / ddd.tot, 1.0)\n", (91334, 91364), True, 'import numpy as np\n'), ((92967, 92997), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (92988, 92997), True, 'import numpy as np\n'), ((93121, 93151), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y + z * z)'], {}), '(x * x + y * y + z * z)\n', (93128, 93151), True, 'import numpy as np\n'), ((93416, 93486), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'ra', 'dec': 'dec', 'r': 'r', 'ra_units': '"""deg"""', 'dec_units': '"""deg"""'}), "(ra=ra, dec=dec, r=r, ra_units='deg', dec_units='deg')\n", (93432, 93486), False, 'import treecorr\n'), ((93497, 93668), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, verbose=1)\n', (93520, 93668), False, 'import treecorr\n'), ((93957, 93993), 'numpy.sqrt', 'np.sqrt', (['(rx * rx + ry * ry + rz * rz)'], {}), '(rx * rx + ry * ry + rz * rz)\n', (93964, 93993), True, 'import numpy as np\n'), ((94120, 94193), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'ra': 'rra', 'dec': 'rdec', 'r': 'rr', 'ra_units': '"""deg"""', 'dec_units': '"""deg"""'}), "(ra=rra, dec=rdec, r=rr, ra_units='deg', dec_units='deg')\n", (94136, 94193), False, 'import treecorr\n'), ((94204, 94375), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, verbose=1)\n', (94227, 94375), False, 'import treecorr\n'), ((95209, 95275), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['zeta', 'true_zeta'], {'rtol': '(0.1 * tol_factor)'}), '(zeta, true_zeta, rtol=0.1 * tol_factor)\n', (95235, 95275), True, 'import numpy as np\n'), ((95597, 95647), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_3d.yaml"""'], {}), "('configs/nnn_3d.yaml')\n", (95624, 95647), False, 'import treecorr\n'), ((95678, 95700), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (95692, 95700), False, 'import treecorr\n'), ((96173, 96204), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x', 'y': 'y', 'z': 'z'}), '(x=x, y=y, z=z)\n', (96189, 96204), False, 'import treecorr\n'), ((96216, 96250), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'rx', 'y': 'ry', 'z': 'rz'}), '(x=rx, y=ry, z=rz)\n', (96232, 96250), False, 'import treecorr\n'), ((96341, 96407), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['zeta', 'true_zeta'], {'rtol': '(0.1 * tol_factor)'}), '(zeta, true_zeta, rtol=0.1 * tol_factor)\n', (96367, 96407), True, 'import numpy as np\n'), ((96763, 96793), 'numpy.random.RandomState', 'np.random.RandomState', (['(8675309)'], {}), '(8675309)\n', (96784, 96793), True, 'import numpy as np\n'), ((97291, 97476), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'bin_slop': '(0.1)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, bin_slop=0.1, verbose=1)\n', (97314, 97476), False, 'import treecorr\n'), ((97683, 97868), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'bin_slop': '(0.1)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, bin_slop=0.1, verbose=1)\n', (97706, 97868), False, 'import treecorr\n'), ((98288, 98345), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd.ntri', 'dddx.ntri'], {'rtol': '(0.1)'}), '(ddd.ntri, dddx.ntri, rtol=0.1)\n', (98314, 98345), True, 'import numpy as np\n'), ((98357, 98542), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'bin_slop': '(0.1)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, bin_slop=0.1, verbose=1)\n', (98380, 98542), False, 'import treecorr\n'), ((98676, 98861), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'min_v': 'min_v', 'max_v': 'max_v', 'nubins': 'nubins', 'nvbins': 'nvbins', 'bin_slop': '(0.1)', 'verbose': '(1)'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins,\n nvbins=nvbins, bin_slop=0.1, verbose=1)\n', (98699, 98861), False, 'import treecorr\n'), ((99089, 99146), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ddd.ntri', 'dddx.ntri'], {'rtol': '(0.1)'}), '(ddd.ntri, dddx.ntri, rtol=0.1)\n', (99115, 99146), True, 'import numpy as np\n'), ((99363, 99412), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['zeta', 'zetax'], {'rtol': '(0.1)'}), '(zeta, zetax, rtol=0.1)\n', (99389, 99412), True, 'import numpy as np\n'), ((99866, 99913), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_list_data_files.txt"""'], {}), "('data', 'nnn_list_data_files.txt')\n", (99878, 99913), False, 'import os\n'), ((100048, 100095), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_list_rand_files.txt"""'], {}), "('data', 'nnn_list_rand_files.txt')\n", (100060, 100095), False, 'import os\n'), ((100237, 100279), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_list_datax.dat"""'], {}), "('data', 'nnn_list_datax.dat')\n", (100249, 100279), False, 'import os\n'), ((100334, 100376), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_list_randx.dat"""'], {}), "('data', 'nnn_list_randx.dat')\n", (100346, 100376), False, 'import os\n'), ((100427, 100480), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_list1.yaml"""'], {}), "('configs/nnn_list1.yaml')\n", (100454, 100480), False, 'import treecorr\n'), ((100540, 100562), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (100554, 100562), False, 'import treecorr\n'), ((100954, 101007), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_list2.json"""'], {}), "('configs/nnn_list2.json')\n", (100981, 101007), False, 'import treecorr\n'), ((101067, 101089), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (101081, 101089), False, 'import treecorr\n'), ((101480, 101535), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_list3.params"""'], {}), "('configs/nnn_list3.params')\n", (101507, 101535), False, 'import treecorr\n'), ((101595, 101617), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (101609, 101617), False, 'import treecorr\n'), ((102008, 102083), 'treecorr.config.read_config', 'treecorr.config.read_config', (['"""configs/nnn_list4.config"""'], {'file_type': '"""params"""'}), "('configs/nnn_list4.config', file_type='params')\n", (102035, 102083), False, 'import treecorr\n'), ((102143, 102165), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (102157, 102165), False, 'import treecorr\n'), ((1005, 1091), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(nnn.ubin_size * nnn.nubins)', '(nnn.max_u - nnn.min_u)'], {}), '(nnn.ubin_size * nnn.nubins, nnn.max_u - nnn.\n min_u)\n', (1035, 1091), True, 'import numpy as np\n'), ((1093, 1179), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['(nnn.vbin_size * nnn.nvbins)', '(nnn.max_v - nnn.min_v)'], {}), '(nnn.vbin_size * nnn.nvbins, nnn.max_v - nnn.\n min_v)\n', (1123, 1179), True, 'import numpy as np\n'), ((1218, 1273), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['nnn.logr1d.shape', '(nnn.nbins,)'], {}), '(nnn.logr1d.shape, (nnn.nbins,))\n', (1241, 1273), True, 'import numpy as np\n'), ((1476, 1561), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['nnn.logr.shape', '(nnn.nbins, nnn.nubins, 2 * nnn.nvbins)'], {}), '(nnn.logr.shape, (nnn.nbins, nnn.nubins, 2 * nnn.nvbins)\n )\n', (1499, 1561), True, 'import numpy as np\n'), ((1564, 1625), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.logr[:, 0, 0]', 'nnn.logr1d'], {}), '(nnn.logr[:, 0, 0], nnn.logr1d)\n', (1594, 1625), True, 'import numpy as np\n'), ((1632, 1695), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.logr[:, -1, -1]', 'nnn.logr1d'], {}), '(nnn.logr[:, -1, -1], nnn.logr1d)\n', (1662, 1695), True, 'import numpy as np\n'), ((1775, 1828), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['nnn.u1d.shape', '(nnn.nubins,)'], {}), '(nnn.u1d.shape, (nnn.nubins,))\n', (1798, 1828), True, 'import numpy as np\n'), ((1838, 1913), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.u1d[0]', '(nnn.min_u + 0.5 * nnn.ubin_size)'], {}), '(nnn.u1d[0], nnn.min_u + 0.5 * nnn.ubin_size)\n', (1868, 1913), True, 'import numpy as np\n'), ((1920, 1996), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.u1d[-1]', '(nnn.max_u - 0.5 * nnn.ubin_size)'], {}), '(nnn.u1d[-1], nnn.max_u - 0.5 * nnn.ubin_size)\n', (1950, 1996), True, 'import numpy as np\n'), ((2003, 2080), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['nnn.u.shape', '(nnn.nbins, nnn.nubins, 2 * nnn.nvbins)'], {}), '(nnn.u.shape, (nnn.nbins, nnn.nubins, 2 * nnn.nvbins))\n', (2026, 2080), True, 'import numpy as np\n'), ((2088, 2143), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.u[0, :, 0]', 'nnn.u1d'], {}), '(nnn.u[0, :, 0], nnn.u1d)\n', (2118, 2143), True, 'import numpy as np\n'), ((2150, 2207), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.u[-1, :, -1]', 'nnn.u1d'], {}), '(nnn.u[-1, :, -1], nnn.u1d)\n', (2180, 2207), True, 'import numpy as np\n'), ((2245, 2302), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['nnn.v1d.shape', '(2 * nnn.nvbins,)'], {}), '(nnn.v1d.shape, (2 * nnn.nvbins,))\n', (2268, 2302), True, 'import numpy as np\n'), ((2310, 2386), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.v1d[0]', '(-nnn.max_v + 0.5 * nnn.vbin_size)'], {}), '(nnn.v1d[0], -nnn.max_v + 0.5 * nnn.vbin_size)\n', (2340, 2386), True, 'import numpy as np\n'), ((2393, 2469), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.v1d[-1]', '(nnn.max_v - 0.5 * nnn.vbin_size)'], {}), '(nnn.v1d[-1], nnn.max_v - 0.5 * nnn.vbin_size)\n', (2423, 2469), True, 'import numpy as np\n'), ((2476, 2565), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.v1d[nnn.nvbins]', '(nnn.min_v + 0.5 * nnn.vbin_size)'], {}), '(nnn.v1d[nnn.nvbins], nnn.min_v + 0.5 * nnn.\n vbin_size)\n', (2506, 2565), True, 'import numpy as np\n'), ((2567, 2660), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.v1d[nnn.nvbins - 1]', '(-nnn.min_v - 0.5 * nnn.vbin_size)'], {}), '(nnn.v1d[nnn.nvbins - 1], -nnn.min_v - 0.5 *\n nnn.vbin_size)\n', (2597, 2660), True, 'import numpy as np\n'), ((2661, 2738), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['nnn.v.shape', '(nnn.nbins, nnn.nubins, 2 * nnn.nvbins)'], {}), '(nnn.v.shape, (nnn.nbins, nnn.nubins, 2 * nnn.nvbins))\n', (2684, 2738), True, 'import numpy as np\n'), ((2746, 2801), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.v[0, 0, :]', 'nnn.v1d'], {}), '(nnn.v[0, 0, :], nnn.v1d)\n', (2776, 2801), True, 'import numpy as np\n'), ((2808, 2865), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['nnn.v[-1, -1, :]', 'nnn.v1d'], {}), '(nnn.v[-1, -1, :], nnn.v1d)\n', (2838, 2865), True, 'import numpy as np\n'), ((15560, 15595), 'math.log', 'math.log', (['(nnn.max_sep / nnn.min_sep)'], {}), '(nnn.max_sep / nnn.min_sep)\n', (15568, 15595), False, 'import math\n'), ((16490, 16525), 'math.log', 'math.log', (['(nnn.max_sep / nnn.min_sep)'], {}), '(nnn.max_sep / nnn.min_sep)\n', (16498, 16525), False, 'import math\n'), ((17354, 17389), 'math.log', 'math.log', (['(nnn.max_sep / nnn.min_sep)'], {}), '(nnn.max_sep / nnn.min_sep)\n', (17362, 17389), False, 'import math\n'), ((18212, 18247), 'math.log', 'math.log', (['(nnn.max_sep / nnn.min_sep)'], {}), '(nnn.max_sep / nnn.min_sep)\n', (18220, 18247), False, 'import math\n'), ((29030, 29070), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_direct.out"""'], {}), "('output', 'nnn_direct.out')\n", (29042, 29070), False, 'import os\n'), ((31043, 31083), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_direct.out"""'], {}), "('output', 'nnn_direct.out')\n", (31055, 31083), False, 'import os\n'), ((32765, 32789), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (32778, 32789), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((32799, 32821), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (32813, 32821), False, 'import treecorr\n'), ((35582, 35606), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (35595, 35606), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((35876, 35901), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (35889, 35901), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((36169, 36194), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (36182, 36194), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((36462, 36487), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (36475, 36487), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((36757, 36782), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (36770, 36782), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((37052, 37077), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (37065, 37077), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((37345, 37370), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (37358, 37370), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((37643, 37668), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (37656, 37668), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((37942, 37967), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (37955, 37967), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((38239, 38264), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (38252, 38264), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((38422, 38434), 'test_helper.CaptureLog', 'CaptureLog', ([], {}), '()\n', (38432, 38434), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((38458, 38636), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'logger': 'cl.logger'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, logger=cl.logger)\n', (38481, 38636), False, 'import treecorr\n'), ((38909, 38921), 'test_helper.CaptureLog', 'CaptureLog', ([], {}), '()\n', (38919, 38921), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((38945, 39123), 'treecorr.NNNCorrelation', 'treecorr.NNNCorrelation', ([], {'min_sep': 'min_sep', 'max_sep': 'max_sep', 'nbins': 'nbins', 'min_u': 'min_u', 'max_u': 'max_u', 'nubins': 'nubins', 'min_v': 'min_v', 'max_v': 'max_v', 'nvbins': 'nvbins', 'logger': 'cl.logger'}), '(min_sep=min_sep, max_sep=max_sep, nbins=nbins,\n min_u=min_u, max_u=max_u, nubins=nubins, min_v=min_v, max_v=max_v,\n nvbins=nvbins, logger=cl.logger)\n', (38968, 39123), False, 'import treecorr\n'), ((44759, 44787), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'rx', 'y': 'ry'}), '(x=rx, y=ry)\n', (44775, 44787), False, 'import treecorr\n'), ((44983, 45029), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_direct_cross.out"""'], {}), "('output', 'nnn_direct_cross.out')\n", (44995, 45029), False, 'import os\n'), ((46028, 46052), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (46041, 46052), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((46062, 46084), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (46076, 46084), False, 'import treecorr\n'), ((46241, 46265), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (46254, 46265), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((46275, 46297), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (46289, 46297), False, 'import treecorr\n'), ((46468, 46492), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (46481, 46492), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((46502, 46524), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (46516, 46524), False, 'import treecorr\n'), ((46633, 46657), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (46646, 46657), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((46667, 46689), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (46681, 46689), False, 'import treecorr\n'), ((46799, 46823), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (46812, 46823), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((46833, 46855), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (46847, 46855), False, 'import treecorr\n'), ((46990, 47024), 'test_helper.assert_raises', 'assert_raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (47003, 47024), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((47072, 47106), 'test_helper.assert_raises', 'assert_raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (47085, 47106), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((47154, 47188), 'test_helper.assert_raises', 'assert_raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (47167, 47188), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((47332, 47366), 'test_helper.assert_raises', 'assert_raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (47345, 47366), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((47376, 47398), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (47390, 47398), False, 'import treecorr\n'), ((47628, 47662), 'test_helper.assert_raises', 'assert_raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (47641, 47662), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((47672, 47694), 'treecorr.corr3', 'treecorr.corr3', (['config'], {}), '(config)\n', (47686, 47694), False, 'import treecorr\n'), ((48390, 48415), 'numpy.exp', 'np.exp', (['(nrbins * bin_size)'], {}), '(nrbins * bin_size)\n', (48396, 48415), True, 'import numpy as np\n'), ((48997, 49055), 'coord.CelestialCoord', 'coord.CelestialCoord', (['(r * coord.radians)', '(d * coord.radians)'], {}), '(r * coord.radians, d * coord.radians)\n', (49017, 49055), False, 'import coord\n'), ((52773, 52798), 'numpy.log', 'np.log', (['(max_sep / min_sep)'], {}), '(max_sep / min_sep)\n', (52779, 52798), True, 'import numpy as np\n'), ((53452, 53510), 'coord.CelestialCoord', 'coord.CelestialCoord', (['(r * coord.radians)', '(d * coord.radians)'], {}), '(r * coord.radians, d * coord.radians)\n', (53472, 53510), False, 'import coord\n'), ((73686, 73704), 'numpy.log', 'np.log', (['ddd.meand1'], {}), '(ddd.meand1)\n', (73692, 73704), True, 'import numpy as np\n'), ((73764, 73782), 'numpy.log', 'np.log', (['ddd.meand2'], {}), '(ddd.meand2)\n', (73770, 73782), True, 'import numpy as np\n'), ((73842, 73860), 'numpy.log', 'np.log', (['ddd.meand3'], {}), '(ddd.meand3)\n', (73848, 73860), True, 'import numpy as np\n'), ((74031, 74048), 'numpy.abs', 'np.abs', (['ddd.meanv'], {}), '(ddd.meanv)\n', (74037, 74048), True, 'import numpy as np\n'), ((74194, 74211), 'numpy.log', 'np.log', (['ddd.meanu'], {}), '(ddd.meanu)\n', (74200, 74211), True, 'import numpy as np\n'), ((75810, 75846), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_data.dat"""'], {}), "('data', 'nnn_data.dat')\n", (75822, 75846), False, 'import os\n'), ((75862, 75898), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_rand.dat"""'], {}), "('data', 'nnn_rand.dat')\n", (75874, 75898), False, 'import os\n'), ((76046, 76079), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn.out"""'], {}), "('output', 'nnn.out')\n", (76058, 76079), False, 'import os\n'), ((81776, 81800), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (81789, 81800), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((81886, 81910), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (81899, 81910), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((81992, 82016), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (82005, 82016), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((82098, 82122), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (82111, 82122), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((82204, 82228), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (82217, 82228), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((82310, 82334), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (82323, 82334), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((82416, 82440), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (82429, 82440), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((82776, 82801), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (82789, 82801), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((82892, 82917), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (82905, 82917), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((83008, 83033), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (83021, 83033), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((83124, 83149), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (83137, 83149), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((83240, 83265), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (83253, 83265), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((83356, 83381), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (83369, 83381), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((83472, 83497), 'test_helper.assert_raises', 'assert_raises', (['ValueError'], {}), '(ValueError)\n', (83485, 83497), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((83649, 83673), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (83662, 83673), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((83766, 83790), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (83779, 83790), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((83883, 83907), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (83896, 83907), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((84000, 84024), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (84013, 84024), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((84117, 84141), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (84130, 84141), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((84234, 84258), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (84247, 84258), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((84351, 84375), 'test_helper.assert_raises', 'assert_raises', (['TypeError'], {}), '(TypeError)\n', (84364, 84375), False, 'from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog\n'), ((93152, 93168), 'numpy.arcsin', 'np.arcsin', (['(z / r)'], {}), '(z / r)\n', (93161, 93168), True, 'import numpy as np\n'), ((93210, 93226), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (93220, 93226), True, 'import numpy as np\n'), ((93995, 94013), 'numpy.arcsin', 'np.arcsin', (['(rz / rr)'], {}), '(rz / rr)\n', (94004, 94013), True, 'import numpy as np\n'), ((94056, 94074), 'numpy.arctan2', 'np.arctan2', (['ry', 'rx'], {}), '(ry, rx)\n', (94066, 94074), True, 'import numpy as np\n'), ((95489, 95528), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_3d_data.dat"""'], {}), "('data', 'nnn_3d_data.dat')\n", (95501, 95528), False, 'import os\n'), ((95544, 95583), 'os.path.join', 'os.path.join', (['"""data"""', '"""nnn_3d_rand.dat"""'], {}), "('data', 'nnn_3d_rand.dat')\n", (95556, 95583), False, 'import os\n'), ((95734, 95770), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_3d.out"""'], {}), "('output', 'nnn_3d.out')\n", (95746, 95770), False, 'import os\n'), ((97034, 97072), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'x[:, k]', 'y': 'y[:, k]'}), '(x=x[:, k], y=y[:, k])\n', (97050, 97072), False, 'import treecorr\n'), ((97217, 97257), 'treecorr.Catalog', 'treecorr.Catalog', ([], {'x': 'rx[:, k]', 'y': 'ry[:, k]'}), '(x=rx[:, k], y=ry[:, k])\n', (97233, 97257), False, 'import treecorr\n'), ((99570, 99617), 'os.path.join', 'os.path.join', (['"""data"""', "('nnn_list_data%d.dat' % k)"], {}), "('data', 'nnn_list_data%d.dat' % k)\n", (99582, 99617), False, 'import os\n'), ((99715, 99762), 'os.path.join', 'os.path.join', (['"""data"""', "('nnn_list_rand%d.dat' % k)"], {}), "('data', 'nnn_list_rand%d.dat' % k)\n", (99727, 99762), False, 'import os\n'), ((100596, 100635), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_list1.out"""'], {}), "('output', 'nnn_list1.out')\n", (100608, 100635), False, 'import os\n'), ((101123, 101162), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_list2.out"""'], {}), "('output', 'nnn_list2.out')\n", (101135, 101162), False, 'import os\n'), ((101651, 101690), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_list3.out"""'], {}), "('output', 'nnn_list3.out')\n", (101663, 101690), False, 'import os\n'), ((102199, 102238), 'os.path.join', 'os.path.join', (['"""output"""', '"""nnn_list4.out"""'], {}), "('output', 'nnn_list4.out')\n", (102211, 102238), False, 'import os\n'), ((962, 997), 'math.log', 'math.log', (['(nnn.max_sep / nnn.min_sep)'], {}), '(nnn.max_sep / nnn.min_sep)\n', (970, 997), False, 'import math\n'), ((2986, 3014), 'numpy.ceil', 'np.ceil', (['(1.0 / nnn.ubin_size)'], {}), '(1.0 / nnn.ubin_size)\n', (2993, 3014), True, 'import numpy as np\n'), ((3103, 3131), 'numpy.ceil', 'np.ceil', (['(1.0 / nnn.vbin_size)'], {}), '(1.0 / nnn.vbin_size)\n', (3110, 3131), True, 'import numpy as np\n'), ((15705, 15716), 'math.log', 'math.log', (['(5)'], {}), '(5)\n', (15713, 15716), False, 'import math\n'), ((15786, 15798), 'math.log', 'math.log', (['(20)'], {}), '(20)\n', (15794, 15798), False, 'import math\n'), ((16573, 16584), 'math.log', 'math.log', (['(5)'], {}), '(5)\n', (16581, 16584), False, 'import math\n'), ((16654, 16666), 'math.log', 'math.log', (['(20)'], {}), '(20)\n', (16662, 16666), False, 'import math\n'), ((17437, 17448), 'math.log', 'math.log', (['(5)'], {}), '(5)\n', (17445, 17448), False, 'import math\n'), ((17518, 17530), 'math.log', 'math.log', (['(20)'], {}), '(20)\n', (17526, 17530), False, 'import math\n'), ((18295, 18306), 'math.log', 'math.log', (['(5)'], {}), '(5)\n', (18303, 18306), False, 'import math\n'), ((18376, 18388), 'math.log', 'math.log', (['(20)'], {}), '(20)\n', (18384, 18388), False, 'import math\n'), ((72903, 72921), 'numpy.log', 'np.log', (['ddd.meand1'], {}), '(ddd.meand1)\n', (72909, 72921), True, 'import numpy as np\n'), ((72978, 72996), 'numpy.log', 'np.log', (['ddd.meand2'], {}), '(ddd.meand2)\n', (72984, 72996), True, 'import numpy as np\n'), ((73053, 73071), 'numpy.log', 'np.log', (['ddd.meand3'], {}), '(ddd.meand3)\n', (73059, 73071), True, 'import numpy as np\n'), ((73192, 73235), 'numpy.abs', 'np.abs', (['(ddd.meand3 / ddd.meand2 - ddd.meanu)'], {}), '(ddd.meand3 / ddd.meand2 - ddd.meanu)\n', (73198, 73235), True, 'import numpy as np\n'), ((73270, 73327), 'numpy.abs', 'np.abs', (['((ddd.meand3 / ddd.meand2 - ddd.meanu) / ddd.meanu)'], {}), '((ddd.meand3 / ddd.meand2 - ddd.meanu) / ddd.meanu)\n', (73276, 73327), True, 'import numpy as np\n'), ((74303, 74334), 'numpy.log', 'np.log', (['(ddd.meand1 - ddd.meand2)'], {}), '(ddd.meand1 - ddd.meand2)\n', (74309, 74334), True, 'import numpy as np\n'), ((74389, 74406), 'numpy.abs', 'np.abs', (['ddd.meanv'], {}), '(ddd.meanv)\n', (74395, 74406), True, 'import numpy as np\n'), ((75225, 75280), 'numpy.exp', 'np.exp', (['(-(d1 ** 2 + d2 ** 2 + d3 ** 2) / (6.0 * s ** 2))'], {}), '(-(d1 ** 2 + d2 ** 2 + d3 ** 2) / (6.0 * s ** 2))\n', (75231, 75280), True, 'import numpy as np\n'), ((75487, 75525), 'numpy.abs', 'np.abs', (['((zeta - true_zeta) / true_zeta)'], {}), '((zeta - true_zeta) / true_zeta)\n', (75493, 75525), True, 'import numpy as np\n'), ((75633, 75645), 'numpy.abs', 'np.abs', (['zeta'], {}), '(zeta)\n', (75639, 75645), True, 'import numpy as np\n'), ((75655, 75672), 'numpy.abs', 'np.abs', (['true_zeta'], {}), '(true_zeta)\n', (75661, 75672), True, 'import numpy as np\n'), ((85286, 85319), 'numpy.exp', 'np.exp', (['(-d1 ** 2 / (4.0 * s ** 2))'], {}), '(-d1 ** 2 / (4.0 * s ** 2))\n', (85292, 85319), True, 'import numpy as np\n'), ((85355, 85388), 'numpy.exp', 'np.exp', (['(-d2 ** 2 / (4.0 * s ** 2))'], {}), '(-d2 ** 2 / (4.0 * s ** 2))\n', (85361, 85388), True, 'import numpy as np\n'), ((85424, 85457), 'numpy.exp', 'np.exp', (['(-d3 ** 2 / (4.0 * s ** 2))'], {}), '(-d3 ** 2 / (4.0 * s ** 2))\n', (85430, 85457), True, 'import numpy as np\n'), ((85762, 85800), 'numpy.abs', 'np.abs', (['((zeta - true_zeta) / true_zeta)'], {}), '((zeta - true_zeta) / true_zeta)\n', (85768, 85800), True, 'import numpy as np\n'), ((85908, 85920), 'numpy.abs', 'np.abs', (['zeta'], {}), '(zeta)\n', (85914, 85920), True, 'import numpy as np\n'), ((85930, 85947), 'numpy.abs', 'np.abs', (['true_zeta'], {}), '(true_zeta)\n', (85936, 85947), True, 'import numpy as np\n'), ((94859, 94914), 'numpy.exp', 'np.exp', (['(-(d1 ** 2 + d2 ** 2 + d3 ** 2) / (6.0 * s ** 2))'], {}), '(-(d1 ** 2 + d2 ** 2 + d3 ** 2) / (6.0 * s ** 2))\n', (94865, 94914), True, 'import numpy as np\n'), ((95166, 95204), 'numpy.abs', 'np.abs', (['((zeta - true_zeta) / true_zeta)'], {}), '((zeta - true_zeta) / true_zeta)\n', (95172, 95204), True, 'import numpy as np\n'), ((95312, 95324), 'numpy.abs', 'np.abs', (['zeta'], {}), '(zeta)\n', (95318, 95324), True, 'import numpy as np\n'), ((95334, 95351), 'numpy.abs', 'np.abs', (['true_zeta'], {}), '(true_zeta)\n', (95340, 95351), True, 'import numpy as np\n'), ((96444, 96456), 'numpy.abs', 'np.abs', (['zeta'], {}), '(zeta)\n', (96450, 96456), True, 'import numpy as np\n'), ((96466, 96483), 'numpy.abs', 'np.abs', (['true_zeta'], {}), '(true_zeta)\n', (96472, 96483), True, 'import numpy as np\n'), ((1329, 1350), 'math.log', 'math.log', (['nnn.min_sep'], {}), '(nnn.min_sep)\n', (1337, 1350), False, 'import math\n'), ((1426, 1447), 'math.log', 'math.log', (['nnn.max_sep'], {}), '(nnn.max_sep)\n', (1434, 1447), False, 'import math\n'), ((25319, 25367), 'numpy.sqrt', 'np.sqrt', (['((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2)'], {}), '((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2)\n', (25326, 25367), True, 'import numpy as np\n'), ((25382, 25430), 'numpy.sqrt', 'np.sqrt', (['((x[i] - x[k]) ** 2 + (y[i] - y[k]) ** 2)'], {}), '((x[i] - x[k]) ** 2 + (y[i] - y[k]) ** 2)\n', (25389, 25430), True, 'import numpy as np\n'), ((25445, 25493), 'numpy.sqrt', 'np.sqrt', (['((x[j] - x[k]) ** 2 + (y[j] - y[k]) ** 2)'], {}), '((x[j] - x[k]) ** 2 + (y[j] - y[k]) ** 2)\n', (25452, 25493), True, 'import numpy as np\n'), ((41912, 41964), 'numpy.sqrt', 'np.sqrt', (['((x1[i] - x2[j]) ** 2 + (y1[i] - y2[j]) ** 2)'], {}), '((x1[i] - x2[j]) ** 2 + (y1[i] - y2[j]) ** 2)\n', (41919, 41964), True, 'import numpy as np\n'), ((41978, 42030), 'numpy.sqrt', 'np.sqrt', (['((x1[i] - x3[k]) ** 2 + (y1[i] - y3[k]) ** 2)'], {}), '((x1[i] - x3[k]) ** 2 + (y1[i] - y3[k]) ** 2)\n', (41985, 42030), True, 'import numpy as np\n'), ((42044, 42096), 'numpy.sqrt', 'np.sqrt', (['((x2[j] - x3[k]) ** 2 + (y2[j] - y3[k]) ** 2)'], {}), '((x2[j] - x3[k]) ** 2 + (y2[j] - y3[k]) ** 2)\n', (42051, 42096), True, 'import numpy as np\n'), ((49199, 49268), 'numpy.sqrt', 'np.sqrt', (['((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2 + (z[i] - z[j]) ** 2)'], {}), '((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2 + (z[i] - z[j]) ** 2)\n', (49206, 49268), True, 'import numpy as np\n'), ((49279, 49348), 'numpy.sqrt', 'np.sqrt', (['((x[j] - x[k]) ** 2 + (y[j] - y[k]) ** 2 + (z[j] - z[k]) ** 2)'], {}), '((x[j] - x[k]) ** 2 + (y[j] - y[k]) ** 2 + (z[j] - z[k]) ** 2)\n', (49286, 49348), True, 'import numpy as np\n'), ((49359, 49428), 'numpy.sqrt', 'np.sqrt', (['((x[k] - x[i]) ** 2 + (y[k] - y[i]) ** 2 + (z[k] - z[i]) ** 2)'], {}), '((x[k] - x[i]) ** 2 + (y[k] - y[i]) ** 2 + (z[k] - z[i]) ** 2)\n', (49366, 49428), True, 'import numpy as np\n'), ((58195, 58247), 'numpy.sqrt', 'np.sqrt', (['((x1[i] - x2[j]) ** 2 + (y1[i] - y2[j]) ** 2)'], {}), '((x1[i] - x2[j]) ** 2 + (y1[i] - y2[j]) ** 2)\n', (58202, 58247), True, 'import numpy as np\n'), ((58261, 58313), 'numpy.sqrt', 'np.sqrt', (['((x1[i] - x3[k]) ** 2 + (y1[i] - y3[k]) ** 2)'], {}), '((x1[i] - x3[k]) ** 2 + (y1[i] - y3[k]) ** 2)\n', (58268, 58313), True, 'import numpy as np\n'), ((58327, 58379), 'numpy.sqrt', 'np.sqrt', (['((x2[j] - x3[k]) ** 2 + (y2[j] - y3[k]) ** 2)'], {}), '((x2[j] - x3[k]) ** 2 + (y2[j] - y3[k]) ** 2)\n', (58334, 58379), True, 'import numpy as np\n'), ((62055, 62124), 'numpy.sqrt', 'np.sqrt', (['((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2 + (z[i] - z[j]) ** 2)'], {}), '((x[i] - x[j]) ** 2 + (y[i] - y[j]) ** 2 + (z[i] - z[j]) ** 2)\n', (62062, 62124), True, 'import numpy as np\n'), ((62135, 62204), 'numpy.sqrt', 'np.sqrt', (['((x[i] - x[k]) ** 2 + (y[i] - y[k]) ** 2 + (z[i] - z[k]) ** 2)'], {}), '((x[i] - x[k]) ** 2 + (y[i] - y[k]) ** 2 + (z[i] - z[k]) ** 2)\n', (62142, 62204), True, 'import numpy as np\n'), ((62215, 62284), 'numpy.sqrt', 'np.sqrt', (['((x[j] - x[k]) ** 2 + (y[j] - y[k]) ** 2 + (z[j] - z[k]) ** 2)'], {}), '((x[j] - x[k]) ** 2 + (y[j] - y[k]) ** 2 + (z[j] - z[k]) ** 2)\n', (62222, 62284), True, 'import numpy as np\n'), ((68035, 68048), 'numpy.sqrt', 'np.sqrt', (['d1sq'], {}), '(d1sq)\n', (68042, 68048), True, 'import numpy as np\n'), ((68070, 68083), 'numpy.sqrt', 'np.sqrt', (['d2sq'], {}), '(d2sq)\n', (68077, 68083), True, 'import numpy as np\n'), ((68105, 68118), 'numpy.sqrt', 'np.sqrt', (['d3sq'], {}), '(d3sq)\n', (68112, 68118), True, 'import numpy as np\n'), ((76737, 76753), 'numpy.exp', 'np.exp', (['ddd.logr'], {}), '(ddd.logr)\n', (76743, 76753), True, 'import numpy as np\n'), ((77864, 77880), 'numpy.exp', 'np.exp', (['ddd.logr'], {}), '(ddd.logr)\n', (77870, 77880), True, 'import numpy as np\n'), ((78744, 78760), 'numpy.sqrt', 'np.sqrt', (['varzeta'], {}), '(varzeta)\n', (78751, 78760), True, 'import numpy as np\n'), ((86319, 86335), 'numpy.exp', 'np.exp', (['ddd.logr'], {}), '(ddd.logr)\n', (86325, 86335), True, 'import numpy as np\n'), ((87199, 87215), 'numpy.sqrt', 'np.sqrt', (['varzeta'], {}), '(varzeta)\n', (87206, 87215), True, 'import numpy as np\n'), ((89484, 89500), 'numpy.exp', 'np.exp', (['ddd.logr'], {}), '(ddd.logr)\n', (89490, 89500), True, 'import numpy as np\n'), ((90460, 90476), 'numpy.sqrt', 'np.sqrt', (['varzeta'], {}), '(varzeta)\n', (90467, 90476), True, 'import numpy as np\n'), ((94583, 94599), 'numpy.exp', 'np.exp', (['ddd.logr'], {}), '(ddd.logr)\n', (94589, 94599), True, 'import numpy as np\n'), ((27003, 27036), 'numpy.floor', 'np.floor', (['((u - min_u) / ubin_size)'], {}), '((u - min_u) / ubin_size)\n', (27011, 27036), True, 'import numpy as np\n'), ((42714, 42747), 'numpy.floor', 'np.floor', (['((u - min_u) / ubin_size)'], {}), '((u - min_u) / ubin_size)\n', (42722, 42747), True, 'import numpy as np\n'), ((58997, 59030), 'numpy.floor', 'np.floor', (['((u - min_u) / ubin_size)'], {}), '((u - min_u) / ubin_size)\n', (59005, 59030), True, 'import numpy as np\n'), ((63899, 63932), 'numpy.floor', 'np.floor', (['((u - min_u) / ubin_size)'], {}), '((u - min_u) / ubin_size)\n', (63907, 63932), True, 'import numpy as np\n'), ((68765, 68798), 'numpy.floor', 'np.floor', (['((u - min_u) / ubin_size)'], {}), '((u - min_u) / ubin_size)\n', (68773, 68798), True, 'import numpy as np\n'), ((73509, 73526), 'numpy.abs', 'np.abs', (['ddd.meanv'], {}), '(ddd.meanv)\n', (73515, 73526), True, 'import numpy as np\n'), ((27188, 27222), 'numpy.floor', 'np.floor', (['((v - -max_v) / vbin_size)'], {}), '((v - -max_v) / vbin_size)\n', (27196, 27222), True, 'import numpy as np\n'), ((42899, 42933), 'numpy.floor', 'np.floor', (['((v - -max_v) / vbin_size)'], {}), '((v - -max_v) / vbin_size)\n', (42907, 42933), True, 'import numpy as np\n'), ((50604, 50626), 'numpy.floor', 'np.floor', (['(u / bin_size)'], {}), '(u / bin_size)\n', (50612, 50626), True, 'import numpy as np\n'), ((50708, 50736), 'numpy.floor', 'np.floor', (['((v + 1) / bin_size)'], {}), '((v + 1) / bin_size)\n', (50716, 50736), True, 'import numpy as np\n'), ((54995, 55018), 'numpy.floor', 'np.floor', (['(u / ubin_size)'], {}), '(u / ubin_size)\n', (55003, 55018), True, 'import numpy as np\n'), ((55100, 55129), 'numpy.floor', 'np.floor', (['((v + 1) / vbin_size)'], {}), '((v + 1) / vbin_size)\n', (55108, 55129), True, 'import numpy as np\n'), ((59182, 59216), 'numpy.floor', 'np.floor', (['((v - -max_v) / vbin_size)'], {}), '((v - -max_v) / vbin_size)\n', (59190, 59216), True, 'import numpy as np\n'), ((64084, 64118), 'numpy.floor', 'np.floor', (['((v - -max_v) / vbin_size)'], {}), '((v - -max_v) / vbin_size)\n', (64092, 64118), True, 'import numpy as np\n'), ((68950, 68984), 'numpy.floor', 'np.floor', (['((v - -max_v) / vbin_size)'], {}), '((v - -max_v) / vbin_size)\n', (68958, 68984), True, 'import numpy as np\n'), ((73608, 73625), 'numpy.abs', 'np.abs', (['ddd.meanv'], {}), '(ddd.meanv)\n', (73614, 73625), True, 'import numpy as np\n'), ((27093, 27126), 'numpy.floor', 'np.floor', (['((v - min_v) / vbin_size)'], {}), '((v - min_v) / vbin_size)\n', (27101, 27126), True, 'import numpy as np\n'), ((42804, 42837), 'numpy.floor', 'np.floor', (['((v - min_v) / vbin_size)'], {}), '((v - min_v) / vbin_size)\n', (42812, 42837), True, 'import numpy as np\n'), ((59087, 59120), 'numpy.floor', 'np.floor', (['((v - min_v) / vbin_size)'], {}), '((v - min_v) / vbin_size)\n', (59095, 59120), True, 'import numpy as np\n'), ((63989, 64022), 'numpy.floor', 'np.floor', (['((v - min_v) / vbin_size)'], {}), '((v - min_v) / vbin_size)\n', (63997, 64022), True, 'import numpy as np\n'), ((68855, 68888), 'numpy.floor', 'np.floor', (['((v - min_v) / vbin_size)'], {}), '((v - min_v) / vbin_size)\n', (68863, 68888), True, 'import numpy as np\n'), ((94807, 94817), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (94814, 94817), True, 'import numpy as np\n'), ((26941, 26950), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (26947, 26950), True, 'import numpy as np\n'), ((42652, 42661), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (42658, 42661), True, 'import numpy as np\n'), ((49505, 49529), 'numpy.log', 'np.log', (['(d2 / rad_min_sep)'], {}), '(d2 / rad_min_sep)\n', (49511, 49529), True, 'import numpy as np\n'), ((53900, 53920), 'numpy.log', 'np.log', (['(d2 / min_sep)'], {}), '(d2 / min_sep)\n', (53906, 53920), True, 'import numpy as np\n'), ((58935, 58944), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (58941, 58944), True, 'import numpy as np\n'), ((63837, 63846), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (63843, 63846), True, 'import numpy as np\n'), ((68703, 68712), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (68709, 68712), True, 'import numpy as np\n')]
|
import numpy as np
import shutil
import pytest
import pysixtrack
import CollimationToolKit as ctk
#-------------------------------------------------------------------------------
#--- basic foil class with default scatter function -------------------------
#------------ should act like LimitRect -------------------------------------
#-------------------------------------------------------------------------------
def test_foil_default():
rect_aperture = pysixtrack.elements.LimitRect(
min_x=-1e-2, max_x=2e-2, min_y=-0.5e-2, max_y=2.5e-2
)
foil_aperture = ctk.elements.LimitFoil(
min_x=-1e-2, max_x=2e-2, min_y=-0.5e-2, max_y=2.5e-2
)
p_foil = pysixtrack.Particles()
N_part = 10000
p_foil.x = np.random.uniform(low=-3e-2, high=3e-2, size=N_part)
p_foil.y = np.random.uniform(low=-3e-2, high=3e-2, size=N_part)
p_foil.state = np.ones_like(p_foil.x, dtype=np.int)
p_rect = p_foil.copy()
foil_aperture.track(p_foil)
assert not np.array_equal(p_foil.state, p_rect.state), "Particles not affected by tracking"
rect_aperture.track(p_rect)
# LimitFoil with default scatter function should act like LimitRect
assert np.array_equal(p_foil.state, p_rect.state), "Particles after tracking are not identical"
#-------------------------------------------------------------------------------
#--- basic foil class with testing scatter function -------------------------
#-------------------------------------------------------------------------------
foil_min_x = -0.11
def test_foil_testfunction():
stripperfoil_test = ctk.elements.LimitFoil(
min_x=foil_min_x,
scatter=ctk.elements.test_strip_ions)
p_testscatter = pysixtrack.Particles(q0=28, mass0 = 238.02891*931.49410242e6)
p_testscatter.x = -0.12
p_testscatter.y = 0.02
p_testscatter.state = 1
p_testscatter.Z = 92
assert p_testscatter.qratio == 1.0
stripperfoil_test.track(p_testscatter)
assert p_testscatter.qratio == (p_testscatter.Z-1)/p_testscatter.q0
assert p_testscatter.chi == p_testscatter.qratio
#-------------------------------------------------------------------------------
#--- Foil with GLOBAL charge exchange code as scatter function---------------
#-------------------------------------------------------------------------------
stripperfoil_GLOBAL = ctk.elements.LimitFoil(
min_x=foil_min_x,
scatter=ctk.ScatterFunctions.GLOBAL)
Ekin = 200.0e6*238
uranium_mass = 238.0507884 * 931.49410242e6
def test_GLOBAL():
if not shutil.which('global'):
pytest.skip("GLOBAL executable not found in PATH")
p_GLOBAL = pysixtrack.Particles(q0=28, mass0 = uranium_mass,
x = -0.12, y = 0.02,
p0c = np.sqrt(Ekin**2 + 2*Ekin*uranium_mass))
p_GLOBAL.state = 1
p_GLOBAL.Z = 92
p_GLOBAL.A = 238
assert p_GLOBAL.qratio == 1.0
assert p_GLOBAL.delta == 0.0
stripperfoil_GLOBAL.track(p_GLOBAL)
assert p_GLOBAL.qratio <= (92-0)/28
assert p_GLOBAL.qratio > (92-6)/28
assert p_GLOBAL.chi == p_GLOBAL.qratio
assert p_GLOBAL.delta < -0.07
#-------------------------------------------------------------------------------
#--- Foil with GLOBAL as scatter function for mutliple particles-------------
#-------------------------------------------------------------------------------
def test_GLOBAL_vec():
if not shutil.which('global'):
pytest.skip("GLOBAL executable not found in PATH")
N_part = 200
p_vec = pysixtrack.Particles(q0=28, mass0 = uranium_mass,
p0c = np.sqrt(Ekin**2 + 2*Ekin*uranium_mass))
p_vec.x = np.random.uniform(low=-3e-1, high=3e-1, size=N_part)
p_vec.y = np.random.uniform(low=-3e-1, high=3e-1, size=N_part)
p_vec.state = np.ones_like(p_vec.x, dtype=np.int)
p_vec.qratio = np.ones_like(p_vec.x, dtype=np.float)
p_vec.delta = np.zeros_like(p_vec.x, dtype=np.float)
p_vec.Z = np.ones_like(p_vec.x, dtype=np.int) * 92
p_vec.A = np.ones_like(p_vec.x, dtype=np.int) * 238
stripperfoil_GLOBAL.track(p_vec)
for ii in range(len(p_vec.x)):
if p_vec.x[ii] <= foil_min_x:
assert p_vec.qratio[ii] <= (92-0)/28
assert p_vec.qratio[ii] > (92-6)/28
assert p_vec.delta[ii] < -0.07
else:
assert p_vec.qratio[ii] == 1.0
assert p_vec.delta[ii] == 0.0
assert np.array_equal(p_vec.chi, p_vec.qratio)
|
[
"pysixtrack.Particles",
"numpy.random.uniform",
"numpy.zeros_like",
"numpy.ones_like",
"shutil.which",
"pytest.skip",
"CollimationToolKit.elements.LimitFoil",
"pysixtrack.elements.LimitRect",
"numpy.array_equal",
"numpy.sqrt"
] |
[((2371, 2448), 'CollimationToolKit.elements.LimitFoil', 'ctk.elements.LimitFoil', ([], {'min_x': 'foil_min_x', 'scatter': 'ctk.ScatterFunctions.GLOBAL'}), '(min_x=foil_min_x, scatter=ctk.ScatterFunctions.GLOBAL)\n', (2393, 2448), True, 'import CollimationToolKit as ctk\n'), ((463, 549), 'pysixtrack.elements.LimitRect', 'pysixtrack.elements.LimitRect', ([], {'min_x': '(-0.01)', 'max_x': '(0.02)', 'min_y': '(-0.005)', 'max_y': '(0.025)'}), '(min_x=-0.01, max_x=0.02, min_y=-0.005, max_y=\n 0.025)\n', (492, 549), False, 'import pysixtrack\n'), ((581, 655), 'CollimationToolKit.elements.LimitFoil', 'ctk.elements.LimitFoil', ([], {'min_x': '(-0.01)', 'max_x': '(0.02)', 'min_y': '(-0.005)', 'max_y': '(0.025)'}), '(min_x=-0.01, max_x=0.02, min_y=-0.005, max_y=0.025)\n', (603, 655), True, 'import CollimationToolKit as ctk\n'), ((686, 708), 'pysixtrack.Particles', 'pysixtrack.Particles', ([], {}), '()\n', (706, 708), False, 'import pysixtrack\n'), ((743, 795), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.03)', 'high': '(0.03)', 'size': 'N_part'}), '(low=-0.03, high=0.03, size=N_part)\n', (760, 795), True, 'import numpy as np\n'), ((811, 863), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.03)', 'high': '(0.03)', 'size': 'N_part'}), '(low=-0.03, high=0.03, size=N_part)\n', (828, 863), True, 'import numpy as np\n'), ((883, 919), 'numpy.ones_like', 'np.ones_like', (['p_foil.x'], {'dtype': 'np.int'}), '(p_foil.x, dtype=np.int)\n', (895, 919), True, 'import numpy as np\n'), ((1193, 1235), 'numpy.array_equal', 'np.array_equal', (['p_foil.state', 'p_rect.state'], {}), '(p_foil.state, p_rect.state)\n', (1207, 1235), True, 'import numpy as np\n'), ((1600, 1678), 'CollimationToolKit.elements.LimitFoil', 'ctk.elements.LimitFoil', ([], {'min_x': 'foil_min_x', 'scatter': 'ctk.elements.test_strip_ions'}), '(min_x=foil_min_x, scatter=ctk.elements.test_strip_ions)\n', (1622, 1678), True, 'import CollimationToolKit as ctk\n'), ((1725, 1784), 'pysixtrack.Particles', 'pysixtrack.Particles', ([], {'q0': '(28)', 'mass0': '(238.02891 * 931494102.42)'}), '(q0=28, mass0=238.02891 * 931494102.42)\n', (1745, 1784), False, 'import pysixtrack\n'), ((3768, 3818), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.3)', 'high': '(0.3)', 'size': 'N_part'}), '(low=-0.3, high=0.3, size=N_part)\n', (3785, 3818), True, 'import numpy as np\n'), ((3835, 3885), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.3)', 'high': '(0.3)', 'size': 'N_part'}), '(low=-0.3, high=0.3, size=N_part)\n', (3852, 3885), True, 'import numpy as np\n'), ((3906, 3941), 'numpy.ones_like', 'np.ones_like', (['p_vec.x'], {'dtype': 'np.int'}), '(p_vec.x, dtype=np.int)\n', (3918, 3941), True, 'import numpy as np\n'), ((3961, 3998), 'numpy.ones_like', 'np.ones_like', (['p_vec.x'], {'dtype': 'np.float'}), '(p_vec.x, dtype=np.float)\n', (3973, 3998), True, 'import numpy as np\n'), ((4017, 4055), 'numpy.zeros_like', 'np.zeros_like', (['p_vec.x'], {'dtype': 'np.float'}), '(p_vec.x, dtype=np.float)\n', (4030, 4055), True, 'import numpy as np\n'), ((4529, 4568), 'numpy.array_equal', 'np.array_equal', (['p_vec.chi', 'p_vec.qratio'], {}), '(p_vec.chi, p_vec.qratio)\n', (4543, 4568), True, 'import numpy as np\n'), ((996, 1038), 'numpy.array_equal', 'np.array_equal', (['p_foil.state', 'p_rect.state'], {}), '(p_foil.state, p_rect.state)\n', (1010, 1038), True, 'import numpy as np\n'), ((2563, 2585), 'shutil.which', 'shutil.which', (['"""global"""'], {}), "('global')\n", (2575, 2585), False, 'import shutil\n'), ((2595, 2645), 'pytest.skip', 'pytest.skip', (['"""GLOBAL executable not found in PATH"""'], {}), "('GLOBAL executable not found in PATH')\n", (2606, 2645), False, 'import pytest\n'), ((3467, 3489), 'shutil.which', 'shutil.which', (['"""global"""'], {}), "('global')\n", (3479, 3489), False, 'import shutil\n'), ((3499, 3549), 'pytest.skip', 'pytest.skip', (['"""GLOBAL executable not found in PATH"""'], {}), "('GLOBAL executable not found in PATH')\n", (3510, 3549), False, 'import pytest\n'), ((4070, 4105), 'numpy.ones_like', 'np.ones_like', (['p_vec.x'], {'dtype': 'np.int'}), '(p_vec.x, dtype=np.int)\n', (4082, 4105), True, 'import numpy as np\n'), ((4125, 4160), 'numpy.ones_like', 'np.ones_like', (['p_vec.x'], {'dtype': 'np.int'}), '(p_vec.x, dtype=np.int)\n', (4137, 4160), True, 'import numpy as np\n'), ((2820, 2864), 'numpy.sqrt', 'np.sqrt', (['(Ekin ** 2 + 2 * Ekin * uranium_mass)'], {}), '(Ekin ** 2 + 2 * Ekin * uranium_mass)\n', (2827, 2864), True, 'import numpy as np\n'), ((3677, 3721), 'numpy.sqrt', 'np.sqrt', (['(Ekin ** 2 + 2 * Ekin * uranium_mass)'], {}), '(Ekin ** 2 + 2 * Ekin * uranium_mass)\n', (3684, 3721), True, 'import numpy as np\n')]
|
import logging
import os
import numpy as np
from flask import Flask, jsonify, request
from flask_cors import CORS
from tensorflow import keras
logging.basicConfig(level=logging.INFO)
base_path = os.path.abspath(os.path.dirname(__file__))
logging.info('base path: {}'.format(base_path))
app = Flask(__name__)
CORS(app)
def proprecess(figure):
predict_image = np.array(figure)
predict_image = np.expand_dims(predict_image, axis=0)
predict_image = predict_image.reshape(-1, 28 * 28) / 255.0
logging.info('image shape: {}'.format(predict_image.shape))
return predict_image
def predict(figure):
model = keras.models.load_model(os.path.join(base_path, 'mnist_model.h5'))
logging.info('模型记载完成...')
prediction = model.predict(proprecess(figure))
logging.info('预测结果: {}'.format(prediction))
return np.argmax(prediction[0])
@app.route('/', methods=['GET'])
def index():
return jsonify('this is mini mnist api.')
@app.route('/predict', methods=['GET', 'POST'])
def mnist():
figure = request.args.get('figure', '')
if figure != '':
figure = eval(figure)
figure = [v for v in figure['input'].values()]
result = predict(figure)
return jsonify(label=str(result))
return jsonify('no data.')
if __name__ == '__main__':
app.run(debug=True)
|
[
"logging.basicConfig",
"numpy.argmax",
"flask_cors.CORS",
"flask.request.args.get",
"os.path.dirname",
"flask.Flask",
"numpy.expand_dims",
"logging.info",
"flask.jsonify",
"numpy.array",
"os.path.join"
] |
[((145, 184), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (164, 184), False, 'import logging\n'), ((295, 310), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (300, 310), False, 'from flask import Flask, jsonify, request\n'), ((311, 320), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (315, 320), False, 'from flask_cors import CORS\n'), ((213, 238), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (228, 238), False, 'import os\n'), ((367, 383), 'numpy.array', 'np.array', (['figure'], {}), '(figure)\n', (375, 383), True, 'import numpy as np\n'), ((404, 441), 'numpy.expand_dims', 'np.expand_dims', (['predict_image'], {'axis': '(0)'}), '(predict_image, axis=0)\n', (418, 441), True, 'import numpy as np\n'), ((701, 726), 'logging.info', 'logging.info', (['"""模型记载完成..."""'], {}), "('模型记载完成...')\n", (713, 726), False, 'import logging\n'), ((837, 861), 'numpy.argmax', 'np.argmax', (['prediction[0]'], {}), '(prediction[0])\n', (846, 861), True, 'import numpy as np\n'), ((921, 955), 'flask.jsonify', 'jsonify', (['"""this is mini mnist api."""'], {}), "('this is mini mnist api.')\n", (928, 955), False, 'from flask import Flask, jsonify, request\n'), ((1032, 1062), 'flask.request.args.get', 'request.args.get', (['"""figure"""', '""""""'], {}), "('figure', '')\n", (1048, 1062), False, 'from flask import Flask, jsonify, request\n'), ((1256, 1275), 'flask.jsonify', 'jsonify', (['"""no data."""'], {}), "('no data.')\n", (1263, 1275), False, 'from flask import Flask, jsonify, request\n'), ((654, 695), 'os.path.join', 'os.path.join', (['base_path', '"""mnist_model.h5"""'], {}), "(base_path, 'mnist_model.h5')\n", (666, 695), False, 'import os\n')]
|
import numpy as np
import operator
_DeBug_ = False
Smth = 2
BGloop= 10
PixNo = 25
PixNo_1_2 = 12
DirNo = 15
DirAgl= [i*(180//DirNo) for i in range(DirNo)]
RowNo = lambda row: row//PixNo
ColNo = lambda col: col//PixNo
def checkTri(angle, fac=0):
if (fac==0): # 0 +/- 30
if ( angle >=30 and angle < 90): #(60)
return 0
elif (angle >=90 and angle <150): #(120)
return 128
else:
return 255 #(0)
elif (fac==1): # 30 +/- 30
if ( angle >=0 and angle < 60): #(30)
return 0
elif (angle >=60 and angle <120): #(90)
return 128
else:
return 255 #(120)
elif (fac==2): # 10 +/- 30
if ( angle >=40 and angle < 100): #(70)
return 0
elif (angle >=100 and angle <160):#(130)
return 128
else:
return 255 #(10)
elif (fac==3): # 20 +/- 30
if ( angle >= 50 and angle < 110): #(80)
return 0
elif (angle >=110 and angle <170): #(140)
return 128
else:
return 255 #(20)
elif (fac==4): # 0 +/- 10 (60)
if ( angle >=50 and angle < 70): #(60)
return 0
elif (angle >=110 and angle <130): #(120)
return 128
elif ( angle <10 or angle > 170): #(0)
return 255
else:
return 192
elif (fac==5): # 40 +/- 20 (40)
if ( angle >=20 and angle < 60): #(40)
return 0
elif (angle >=80 and angle <120): #(100)
return 128
elif ( angle >=140 and angle < 180): #(160)
return 255
else:
return 192
elif (fac==6): #70 +/- 20 (70)
if ( angle >=50 and angle < 90): #(70)
return 0
elif (angle >=110 and angle <150): #(130)
return 128
elif ( angle < 20 ): #(10)
return 255
else:
return 192
def prepWeight():
wgt = np.empty(91) # delta is b/w 0, 90
for i in range(91): wgt[i] = (0.5+(np.cos(np.pi*((i/90)))/2))**6
y_TrainW = np.zeros((180,180))
for rw in range(180):
y_TrainW[rw, rw]=1.0
for i in range(1, 91):
y_TrainW[rw,rw-i] = y_TrainW[rw,(rw+i)%180] = wgt[i]
return y_TrainW
def UT_Sobel(fgm, showData=False):
Gx=fgm[0,2]*255.+2*255.*255.*fgm[1,2]+fgm[2,2]*255.-fgm[0,0]*255.-2*255.*255.*fgm[1,0]-fgm[2,0]*255.
Gy=fgm[2,0]*255.+2*255.*255.*fgm[2,1]+fgm[2,2]*255.-fgm[0,0]*255.-2*255.*255.*fgm[0,1]-fgm[0,2]*255.
if showData:
print(fgm[0,:])
print(fgm[1,:])
print(fgm[2,:])
print(Gx, Gy)
return Gx, Gy
def FP_DirSobel(plt, fm, fbin, fpfg, showImg=False, dirNo = DirNo, pixNo=PixNo):
print("Calculating direction Sobel ({0}), Window {1}x{1} Shape:{2}".format(dirNo, pixNo, fm.shape))
height, width = fm.shape[0], fm.shape[1]
NoHeight, NoWidth = RowNo(height), ColNo(width)
if showImg: axs=[[None for _ in range(NoWidth)]]*NoWidth
fpdirSbl=np.full((NoHeight,NoWidth), -1, dtype=float)
fpdirVGx=np.full((NoHeight,NoWidth), -1, dtype=float)
for i in range(NoHeight):
for j in range(NoWidth):
#print('i, j:', i, j, 'shape:', fpdir.shape)
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
Cx, Cy, tmpagl, newagl, ctr = (a+b)//2, (c+d)//2, [[0]]*PixNo, [[0]]*DirNo, 0
Gx, Gy, VGx, VGy = 0, 0, 0., 0.
if fpfg[i][j]: # we only process froeground
#if i==3 and j==9: print(a,b,c,d)
for gx in range(a+1, b-1):
#print(gx, ':',end='')
for gy in range(c+1, d-1):
#print(gy, ' ')
Gx, Gy=UT_Sobel(fm[gx-1:gx+2,gy-1:gy+2,0], False)
VGx += 2*Gx*Gy
VGy += (Gx**2-Gy**2)
theta=(np.arctan(VGx/VGy))/2
#if (theta < 0 and VGx>0): theta+=np.pi
#if (theta < 0 and VGx<=0) or (theta>=0 and VGx>0): theta+=np.pi/2
#if i==3 and j==9: print('VGy/VGx=',VGy/VGx, 'arctan=', np.arctan2(VGy, VGx), 'theta=', theta)
fpdirSbl[i][j]=theta
fpdirVGx[i][j]=VGx
else:
fpdirSbl[i][j]=-1
if i==3 and j==9: print(' {:.2f}'.format(fpdirSbl[i][j]))
for i in range(NoHeight):
print(i,':')
for j in range(NoWidth):
print(' {:.1f}'.format(fpdirSbl[i][j]), end='')
print()
for i in range(NoHeight):
print(i,':')
for j in range(NoWidth):
if (fpdirSbl[i][j] < 0 and fpdirVGx[i][j]>0): fpdirSbl[i][j]+=np.pi
if (fpdirSbl[i][j] < 0 and fpdirVGx[i][j]<=0) or (fpdirSbl[i][j]>=0 and fpdirVGx[i][j]>0): fpdirSbl[i][j]+=np.pi/2
print(' {:.1f}'.format(fpdirSbl[i][j]), end='')
print()
UT_SetLine(plt, fpdirSbl, fm, axs, showImg, 'Red',3)
if showImg: plt.imshow(fm, cmap=plt.cm.gray)
if showImg: plt.show()
print('==================')
return fpdirSbl
def UT_SetTri(plt, fpfg, fpdir, fmdir, noColor=3):
# fpfg: foreground(true)
# fpdir: block direction
print('=Setting Tri-color Image=')
fmtri=fmdir.copy()
print(fpfg.shape, fpdir.shape, fmtri.shape)
height, width = fmtri.shape[0], fmtri.shape[1]
NoHeight, NoWidth = RowNo(height), ColNo(width)
agl_lst = [(x*180)//noColor for x in range(noColor)]
agl_d = 90//noColor
clr_lst = [(x*256)//(noColor-1) for x in range(noColor)]
clr_lst[-1]=clr_lst[-1]-1
clr_delta=256/(noColor-1)
for i in range(fpfg.shape[0]):
for j in range(fpfg.shape[1]):
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
if ( fpfg[i][j] ) :
#print(i, j, fm[a:b,c:d] )
for k in range(a,b):
for l in range(c,d):
fmtri[k,l]=192
#fmtri[a:b,c:d].fill(192)#=192
# print(fm[a:b,c:d])
#else :
# fm[a:b,c:d]=255
#fm[a:b,c:d]=clr
print('=Setting Tri-color Image....Done!')
return fmtri
def UT_PixTri(plt, fpfg, pixdir, pixtri, noColor=3, SWD=2, SMLoop=4):
print('=Setting Pix-Tri. Image=')
height, width = pixtri.shape[0], pixtri.shape[1]
#NoHeight, NoWidth = RowNo(height), ColNo(width)
#fpdirpn = np.full((NoHeight,NoWidth), -1, dtype=float)
print('pixdir.shape: ', pixdir.shape)
print('pixtri.shape: ', pixtri.shape)
pixsmt=pixtri.copy() # smooth base array
W12 = 12
for i in range(height):
for j in range(width):
if i < W12 or i>= height - W12 or j < W12 or j>= width-12:
pixtri[i][j], pixsmt[i][j]=192, -1
else:
if ( fpfg[i//PixNo][j//PixNo] ):
pd=pixdir[i-W12][j-W12]
pixtri[i-W12][j-W12] = pixsmt[i-W12][j-W12]=checkTri(pd,0)
#if ( pd >= 30 and pd < 90 ):
# pixtri[i-W12][j-W12], pixsmt[i-W12][j-W12]=0, 0
#elif ( pd >= 90 and pd < 150 ):
# pixtri[i-W12][j-W12], pixsmt[i-W12][j-W12]=128, 128
#else:
# pixtri[i-W12][j-W12], pixsmt[i-W12][j-W12]=255, 255
else:
pixtri[i-W12][j-W12], pixsmt[i-W12][j-W12]=192, 192
#SWD=2 # for 3x3 ; 5x5 needs 2
th_no = ((2*SWD+1)**2)//2 + 1
print('==== th_no =====' , th_no)
for sm_no in range(SMLoop):
pixbuf=pixsmt.copy()
for i in range(height):
for j in range(width):
if i < W12 or i>= height - W12 or j < W12 or j>= width-12: continue
zary = pixbuf[i-SWD,j-SWD:j+SWD+1].flatten()
for m in range(2*SWD+1):
if m == 0: continue
mm = m - SWD
for n in range(2*SWD+1):
nn = n - SWD
if mm == 0 and nn == 0: continue
zary = np.append(zary, [pixbuf[i+mm, j+nn]])
un, uc = np.unique(zary, return_counts=True)
undict = dict(zip(un,uc))
if undict[max(undict, key=undict.get)] >= th_no:
pixsmt[i,j]=max(undict, key=undict.get)
#plt.imshow(pixsmt, cmap=plt.cm.gray)
#plt.show()
print('=Setting Pix-Tri. Image....Done!')
return pixsmt
def UT_SetTri(plt, fpfg, fpdir, fm, r_fac=0, noColor=3):
print('=Setting Tri. Image=')
height, width = fm.shape[0], fm.shape[1]
NoHeight, NoWidth = RowNo(height), ColNo(width)
fpdirpn = np.full((NoHeight,NoWidth), -1, dtype=float)
for i in range(fpfg.shape[0]):
for j in range(fpfg.shape[1]):
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
if ( fpfg[i][j] ):
clr=checkTri(fpdir[i][j],0)
if ( clr == 0 ):
fpdirpn[i][j] = 0
elif (clr==128):
fpdirpn[i][j] = 1
elif (clr==255):
fpdirpn[i][j] = 2
else:
pass
#if ( fpfg[i][j] ):
# if ( fpdir[i][j] >= 30 and fpdir[i][j] < 90 ):
# clr, fpdirpn[i][j] = 0, 0
# elif ( fpdir[i][j] >= 90 and fpdir[i][j] < 150 ):
# clr, fpdirpn[i][j] = 128, 1
# else:
# clr, fpdirpn[i][j] = 255, 2
else:
clr, fpdirpn[i][j] =192, -1
#print('fm:', a, b, c, d)
fm[a:b,c:d]=clr
if r_fac != 0:
fm_fac = fm.copy()
for i in range(0,fpfg.shape[0],r_fac):
for j in range(0,fpfg.shape[1],r_fac):
a,b,c,d=i*PixNo,(i+r_fac)*PixNo,j*PixNo,(j+r_fac)*PixNo
zarry = np.zeros((noColor+1,), dtype=np.int)
for m in range(r_fac):
if i+m >= fpfg.shape[0]: continue
for n in range(r_fac):
if j+n >= fpfg.shape[1]: continue
clridx = int(fpdirpn[i+m][j+n])
if clridx>=0:
zarry[clridx]+=1
else:
zarry[noColor]+=1
#print (i, j, fpdirpn[i][j], fpdirpn[i][j+1],fpdirpn[i+1][j], fpdirpn[i+1][j+1], zarry, np.argmax(zarry) )
#print (i, j, zarry, zarry[-1],np.argmax(zarry) )
#if ( fpfg[i][j] ):
if ( zarry[-1]==zarry[np.argmax(zarry)] ):
clr=192
elif ( np.argmax(zarry)==0 ):
clr = 0
elif ( np.argmax(zarry)==1 ):
clr = 128
elif ( np.argmax(zarry)==2 ):
clr = 255
else:
clr =192
#print('fm_fac:', a, b, c, d)
fm_fac[a:b,c:d]=clr
print('=Setting Tri. Image....Done!')
if r_fac!=0:
return fm_fac
def UT_SetGray(plt, fpfg, fm, fpdir_prob):
print('=Setting Prob. Image=')
height, width = fm.shape[0], fm.shape[1]
NoHeight, NoWidth = RowNo(height), ColNo(width)
fpdirpn = np.full((NoHeight,NoWidth), -1, dtype=float)
cmax, cmin= 0, 255
for i in range(fpfg.shape[0]):
for j in range(fpfg.shape[1]):
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
#print('setting', i,j, '=',a,b,c,d,':', fpfg[i][j], end=' ')
clr=int(fpdir_prob[i][j]*255) if ( fpfg[i][j] ) else 192
fm[a:b,c:d]=clr
#print('Prob.',i,j,'....', fpdir_porg[i][j], fpdir_prob[i][j])
if ( fpfg[i][j] ):
fpdirpn[i][j] = int(fpdir_prob[i][j]*255)
print('{} {} {:3d} {:.3f}'.format(i, j, int(fpdir_prob[i][j]*255), fpdir_prob[i][j]))
print('=Setting rob. Image....Done!')
def UT_SetLine2(plt, fpfg, fpdir, fmS, showImg=False, Clr='white', Width=3):
print('=Setting Line Segment=', Clr, Clr=='Red')
print(fmS)
fm=np.full(fmS, 255, dtype=int)
CC = 255 if Clr == 'white' else 0
CW = 0
CB = int(128)
for i in range(fpfg.shape[0]):
for j in range(fpfg.shape[1]):
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
Cx, Cy, tmpagl, newagl, ctr = (a+b)//2, (c+d)//2, [[0]]*PixNo, [[0]]*DirNo, 0
CxX, CyY=0,0
if ( fpfg[i][j]):
if (fpdir[i][j]<=45 or fpdir[i][j] >=135):
CxOff = PixNo_1_2 * np.tan(fpdir[i][j]*np.pi/180)
# to point
CxX, CyY=int(Cx-CxOff), int(Cy+PixNo_1_2)
# from point
_CxX, _CyY = Cx*2-CxX,Cy*2-CyY
RGB = 0 if Clr=='Red' else 2 if Clr=='Blue' else 1
for _r in range(PixNo):
_x_, _y_ = int(_CxX-(_r * np.tan(fpdir[i][j]*np.pi/180))), _CyY+_r
if Width==5:
fm[_x_,_y_]=fm[_x_-1,_y_]=fm[_x_-2,_y_]=fm[_x_+1,_y_]=fm[_x_+2,_y_]=CC
else:
fm[_x_,_y_]=fm[_x_-1,_y_]=fm[_x_+1,_y_]=CC
else:
CyOff = PixNo_1_2 / np.tan(fpdir[i][j]*np.pi/180)
CxX, CyY=int(Cx-PixNo_1_2), int(Cy+CyOff)
_CxX, _CyY = Cx*2-CxX, Cy*2-CyY
RGB = 0 if Clr=='Red' else 2 if Clr=='Blue' else 1
for _r in range(PixNo):
_x_, _y_ = _CxX-_r, int(_CyY+ (_r/np.tan(fpdir[i][j]*np.pi/180)))
if Width==5:
fm[_x_,_y_]=fm[_x_,_y_-1]=fm[_x_,_y_-2]=fm[_x_,_y_+1]=fm[_x_,_y_+2]=CC
else:
fm[_x_,_y_]=fm[_x_,_y_-1]=fm[_x_,_y_+1]=CC
else:
fm[a:b,c:d]=int(192)
print('=Setting Line Segment....Done!')
return fm
def UT_SetLine(plt, fpfg, fpdir, fm, axs, showImg=False, Clr='white', Width=3):
print('=Setting Line Segment=', Clr, Clr=='Red')
CC = 255 if Clr == 'white' else 0
for i in range(fpdir.shape[0]):
for j in range(fpdir.shape[1]):
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
Cx, Cy, tmpagl, newagl, ctr = (a+b)//2, (c+d)//2, [[0]]*PixNo, [[0]]*DirNo, 0
CxX, CyY=0,0
if ( fpfg[i][j]):
if (fpdir[i][j]<=45 or fpdir[i][j] >=135):
CxOff = PixNo_1_2 * np.tan(fpdir[i][j]*np.pi/180)
# to point
CxX, CyY=int(Cx-CxOff), int(Cy+PixNo_1_2)
# from point
_CxX, _CyY = Cx*2-CxX,Cy*2-CyY
RGB = 0 if Clr=='Red' else 2 if Clr=='Blue' else 1
#print('i,j ({},{}), a,b ({},{}), from ({},{}) to ({},{}), dir({})'.format(i,j,a,b,_CxX, _CyY,CxX, CyY,fpdir[i][j]))
for _r in range(PixNo):
_x_, _y_ = int(_CxX-(_r * np.tan(fpdir[i][j]*np.pi/180))), _CyY+_r
#_x_, _y_ = int(a-(_r * np.tan(fpdir[i][j]*np.pi/180))), _CyY+_r
if Width==5:
fm[_x_,_y_]=fm[_x_-1,_y_]=fm[_x_-2,_y_]=fm[_x_+1,_y_]=fm[_x_+2,_y_]=CC
else:
fm[_x_,_y_]=fm[_x_-1,_y_]=fm[_x_+1,_y_]=CC
else:
CyOff = PixNo_1_2 / np.tan(fpdir[i][j]*np.pi/180)
CxX, CyY=int(Cx-PixNo_1_2), int(Cy+CyOff)
_CxX, _CyY = Cx*2-CxX, Cy*2-CyY
#from (CxX,CyY) -> (Cx*2-CxX,Cy*2-CyY), plot one red pixles
RGB = 0 if Clr=='Red' else 2 if Clr=='Blue' else 1
#print('i,j ({},{}), a,b ({},{}), from ({},{}) to ({},{}), dir({})'.format(i,j,a,b,_CxX, _CyY,CxX, CyY,fpdir[i][j]))
for _r in range(PixNo):
_x_, _y_ = _CxX-_r, int(_CyY+ (_r/np.tan(fpdir[i][j]*np.pi/180)))
if Width==5:
fm[_x_,_y_]=fm[_x_,_y_-1]=fm[_x_,_y_-2]=fm[_x_,_y_+1]=fm[_x_,_y_+2]=CC
else:
fm[_x_,_y_]=fm[_x_,_y_-1]=fm[_x_,_y_+1]=CC
else:
fm[a:b,c:d]=192
if showImg: axs[i][j]=plt.subplot2grid((fpdir.shape[0],fpdir.shape[1]),(i,j))
if showImg: axs[i][j].imshow(fm[a:b,c:d], cmap=plt.cm.gray)
#if showImg and fpdir[i][j]!=-1: axs[i][j].plot((Cx,Cy),(Cx*2-CxX,Cy*2-CyY), color="red", linewidth=2.0, linestyle="-" )
if showImg: axs[i][j].set_axis_off()
#print('i, j:', i, j, 'Dir:', fpdir[i])
#fpdir[i][j]=0
if showImg: plt.axis('off')
if showImg: plt.show()
print('=Setting Line Segment....Done!')
UT_WaveFrq_buffer=np.full((DirNo,3), 0, dtype=float)
def UT_WaveFrq(fYs, showData=False):
res=0
for i in range(fYs.shape[0]):
UT_WaveFrq_buffer[i][0], UT_WaveFrq_buffer[i][1]=np.average(fYs[i]), np.std(fYs[i])
UT_WaveFrq_buffer[0,2]=(UT_WaveFrq_buffer[DirNo-1,1]+UT_WaveFrq_buffer[0,1]+UT_WaveFrq_buffer[1,1])/3
for i in range(1, fYs.shape[0]-1):
UT_WaveFrq_buffer[i,2]=np.average(UT_WaveFrq_buffer[i-1:i+2,1])
UT_WaveFrq_buffer[DirNo-1,2]=(UT_WaveFrq_buffer[DirNo-1,1]+UT_WaveFrq_buffer[0,1]+UT_WaveFrq_buffer[DirNo-2,1])/3
#a = np.array(UT_WaveFrq_buffer[:,2]) # smooth std does not help
a = np.array(UT_WaveFrq_buffer[:,1])
if ( showData ) : print('===>', a, '\n', UT_WaveFrq_buffer)
return np.where(a == a.min())[0][0]
def FP_FindDir(plt, fm, fbin, fpfg, showImg=False, dirNo = DirNo, pixNo=PixNo):
print("Calculating direction Frequency ({0}), Window {1}x{1} Shape:{2}".format(dirNo, pixNo, fm.shape))
height, width = fm.shape[0], fm.shape[1]
NoHeight, NoWidth = RowNo(height), ColNo(width)
if showImg: axs=[[None for _ in range(NoWidth)]]*NoWidth
fpdir=np.full((NoHeight,NoWidth), -1, dtype=float)
fYs =np.full((DirNo,PixNo), 0, dtype=float)
#print('anx:', axs, '\n', DirNo, ':', DirAgl, '\n', fpdir, 'H/W', NoHeight, NoWidth)
for i in range(NoHeight):
for j in range(NoWidth):
#print('i, j:', i, j, 'shape:', fpdir.shape)
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
Cx, Cy, tmpagl, newagl, ctr = (a+b)//2, (c+d)//2, [[0]]*PixNo, [[0]]*DirNo, 0
if fpfg[i][j]: # we only process froeground
for agl in DirAgl:
newagl[ctr] = [[0]]*PixNo
if ( agl<=45 or agl >=135):
ta = np.tan(agl*np.pi/180)
tmpagl=[(x, int(x*ta)) for x in range(PixNo)]
CxOff, CyOff= -tmpagl[PixNo_1_2][0], -tmpagl[PixNo_1_2][1]
for x in range(PixNo):
newagl[ctr][x]=[Cx-(tmpagl[x][1]+CyOff), Cy+(tmpagl[x][0]+CxOff)]
else:
ta = np.tan(agl*np.pi/180)
tmpagl=[(int(y/ta),y) for y in range(PixNo)]
CxOff, CyOff= -tmpagl[PixNo_1_2][0], -tmpagl[PixNo_1_2][1]
for y in range(PixNo):
newagl[ctr][y]=[Cx-(tmpagl[y][1]+CyOff), Cy+(tmpagl[y][0]+CxOff)]
# for every angle place it's fY[i]
for idx in range(PixNo):
ia, ib = newagl[ctr][idx][0], newagl[ctr][idx][1]
fYs[ctr][idx] = fm [ia] [ib] [0]
ctr+=1
fpdir[i][j] = DirAgl[UT_WaveFrq(fYs)]
UT_SetLine(plt, fpdir, fm, axs, showImg, 'Red',5)
return fpdir
def FP_FindBG(plt, fm, showImg=False, pixNo=PixNo):
#fdir=np.copy(fm)
print("Finding foreground image, Window {0}x{0} Shape:{1}".format(pixNo, fm.shape))
height, width = fm.shape[0], fm.shape[1]
NoHeight, NoWidth = RowNo(height), ColNo(width)
if showImg: axs=[[None for _ in range(NoWidth)]]*NoHeight
aavg, astd =np.average(fm), np.std(fm)
if _DeBug_: print("...average{} std{}".format(aavg,astd))
# foreground image
fpfg=np.ndarray((NoHeight,NoWidth), dtype=bool)
for i in range(NoHeight):
if _DeBug_: print("i:({:2d})".format(i),end='')
for j in range(NoWidth):
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
ravg=np.average(fm[a:b,c:d])
rstd=np.std(fm[a:b,c:d])
if _DeBug_: print(",{:2d},{:2d}".format(int(ravg*100),int(rstd*100)),end='')
fpfg[i][j] = False if ravg > aavg + astd/4 else True
if not fpfg[i][j]:
for m in range(a,b):
for n in range(c,d):
fm[m][n]=0
if showImg: axs[i][j]=plt.subplot2grid((NoHeight,NoWidth),(i,j))
if showImg: axs[i][j].imshow(fm[a:b,c:d], cmap=plt.cm.gray)
if showImg: axs[i][j].set_axis_off()
if _DeBug_: print('')
# remove island
for k in range(BGloop):
for i in range(NoHeight):
for j in range(NoWidth):
# check if this blk is really a forground
if fpfg[i][j]:
count=0;
for m in range(i-1,i+2):
for n in range(j-1,j+2):
if m==i and n==j: continue
if m<0 or m==NoHeight:
count-=1.1
continue
elif n<0 or n == NoWidth:
count-=1.1
continue
else:
count=count+1 if fpfg[m][n] else count-1
if count<0:
fpfg[i][j]=False
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
for m in range(a,b):
for n in range(c,d):
fm[m][n]=1
if showImg: plt.axis('off')
if showImg: plt.show()
if showImg: plt.imshow(fm,cmap=plt.cm.gray)
if showImg: plt.show()
return fpfg
def FP_Binary(plt, fm, showImg=False):
global fbin
fbin=np.copy(fm)
print("Binarizing gray scale image ", fm.shape, " window", PixNo)
avg=np.average(fm[:,:])
std=np.std(fm[:,:])
height, width = fm.shape[0], fm.shape[1]
NoHeight, NoWidth = RowNo(height), ColNo(width)
if showImg: axs=[[None for _ in range(NoWidth)]]*NoHeight
#plt.axis('off')
th=np.empty([NoHeight,NoWidth], dtype = float)
for i in range(NoHeight):
#print(i,'= ', end='')
for j in range(NoWidth):
#if fpfg[i][j]:
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
ravg=np.average(fm[a:b,c:d])
rstd=np.std(fm[a:b,c:d])
w=0
if ravg > avg and rstd < 0.1:
b_avg,w = avg + std/3, 1
elif ravg > avg and rstd >= 0.1:
if ravg - avg < std/3:
if rstd < 0.15:
b_avg, w = avg + std/4, 2.1
else:
b_avg, w = ravg - std, 2.2 # + std/4 + 0.1, 2.2
else:
if ravg > 0.75:
if ( rstd > 0.15 ) :
b_avg, w = avg - std/3 , 2.3
else:
b_avg, w = avg + std/4 , 2.4
else:
b_avg, w = avg , 2.5
elif ravg > (avg - std) :
b_avg, w = (ravg + (avg - std/2) )/2, 3
else:
b_avg, w = (ravg + (avg - std) )/2, 4
th[i][j]=b_avg
for i in range(NoHeight):
for j in range(NoWidth):
a,b,c,d=i*PixNo,(i+1)*PixNo,j*PixNo,(j+1)*PixNo
if showImg:axs[i][j]=plt.subplot2grid((NoHeight,NoWidth),(i,j))
for m in range(a,b):
for n in range(c,d):
fbin[m][n]=0 if fm[m][n] < th[i][j] else 1
if showImg: axs[i][j].imshow(fbin[a:b,c:d], cmap=plt.cm.gray)
if showImg: axs[i][j].set_axis_off()
if showImg: plt.imshow(fbin)
if showImg: plt.axis('off')
if showImg: plt.show()
return plt, fbin
def FP_Smooth(fdata):
global fm
fm=np.copy(fdata)
print("Smoothing gray scale image ", fm.shape, " window", PixNo)
height, width = fdata.shape[0], fdata.shape[1]
for r in range(Smth, height-Smth):
for c in range(Smth, width-Smth):
total= 0
## do smooth here
for x in range(r-Smth, r+Smth+1):
for y in range(c-Smth,c+Smth+1):
total+=fdata[x][y];
fm[r][c]=total/((Smth*2+1)**2)
return fm
|
[
"numpy.full",
"numpy.average",
"numpy.copy",
"numpy.argmax",
"numpy.std",
"numpy.empty",
"numpy.zeros",
"numpy.append",
"numpy.tan",
"numpy.array",
"numpy.cos",
"numpy.arctan",
"numpy.ndarray",
"numpy.unique"
] |
[((14876, 14911), 'numpy.full', 'np.full', (['(DirNo, 3)', '(0)'], {'dtype': 'float'}), '((DirNo, 3), 0, dtype=float)\n', (14883, 14911), True, 'import numpy as np\n'), ((1695, 1707), 'numpy.empty', 'np.empty', (['(91)'], {}), '(91)\n', (1703, 1707), True, 'import numpy as np\n'), ((1814, 1834), 'numpy.zeros', 'np.zeros', (['(180, 180)'], {}), '((180, 180))\n', (1822, 1834), True, 'import numpy as np\n'), ((2699, 2744), 'numpy.full', 'np.full', (['(NoHeight, NoWidth)', '(-1)'], {'dtype': 'float'}), '((NoHeight, NoWidth), -1, dtype=float)\n', (2706, 2744), True, 'import numpy as np\n'), ((2755, 2800), 'numpy.full', 'np.full', (['(NoHeight, NoWidth)', '(-1)'], {'dtype': 'float'}), '((NoHeight, NoWidth), -1, dtype=float)\n', (2762, 2800), True, 'import numpy as np\n'), ((7754, 7799), 'numpy.full', 'np.full', (['(NoHeight, NoWidth)', '(-1)'], {'dtype': 'float'}), '((NoHeight, NoWidth), -1, dtype=float)\n', (7761, 7799), True, 'import numpy as np\n'), ((9922, 9967), 'numpy.full', 'np.full', (['(NoHeight, NoWidth)', '(-1)'], {'dtype': 'float'}), '((NoHeight, NoWidth), -1, dtype=float)\n', (9929, 9967), True, 'import numpy as np\n'), ((10724, 10752), 'numpy.full', 'np.full', (['fmS', '(255)'], {'dtype': 'int'}), '(fmS, 255, dtype=int)\n', (10731, 10752), True, 'import numpy as np\n'), ((15479, 15512), 'numpy.array', 'np.array', (['UT_WaveFrq_buffer[:, 1]'], {}), '(UT_WaveFrq_buffer[:, 1])\n', (15487, 15512), True, 'import numpy as np\n'), ((15962, 16007), 'numpy.full', 'np.full', (['(NoHeight, NoWidth)', '(-1)'], {'dtype': 'float'}), '((NoHeight, NoWidth), -1, dtype=float)\n', (15969, 16007), True, 'import numpy as np\n'), ((16015, 16054), 'numpy.full', 'np.full', (['(DirNo, PixNo)', '(0)'], {'dtype': 'float'}), '((DirNo, PixNo), 0, dtype=float)\n', (16022, 16054), True, 'import numpy as np\n'), ((17948, 17991), 'numpy.ndarray', 'np.ndarray', (['(NoHeight, NoWidth)'], {'dtype': 'bool'}), '((NoHeight, NoWidth), dtype=bool)\n', (17958, 17991), True, 'import numpy as np\n'), ((19635, 19646), 'numpy.copy', 'np.copy', (['fm'], {}), '(fm)\n', (19642, 19646), True, 'import numpy as np\n'), ((19721, 19741), 'numpy.average', 'np.average', (['fm[:, :]'], {}), '(fm[:, :])\n', (19731, 19741), True, 'import numpy as np\n'), ((19747, 19763), 'numpy.std', 'np.std', (['fm[:, :]'], {}), '(fm[:, :])\n', (19753, 19763), True, 'import numpy as np\n'), ((19941, 19983), 'numpy.empty', 'np.empty', (['[NoHeight, NoWidth]'], {'dtype': 'float'}), '([NoHeight, NoWidth], dtype=float)\n', (19949, 19983), True, 'import numpy as np\n'), ((21515, 21529), 'numpy.copy', 'np.copy', (['fdata'], {}), '(fdata)\n', (21522, 21529), True, 'import numpy as np\n'), ((15248, 15293), 'numpy.average', 'np.average', (['UT_WaveFrq_buffer[i - 1:i + 2, 1]'], {}), '(UT_WaveFrq_buffer[i - 1:i + 2, 1])\n', (15258, 15293), True, 'import numpy as np\n'), ((17832, 17846), 'numpy.average', 'np.average', (['fm'], {}), '(fm)\n', (17842, 17846), True, 'import numpy as np\n'), ((17848, 17858), 'numpy.std', 'np.std', (['fm'], {}), '(fm)\n', (17854, 17858), True, 'import numpy as np\n'), ((15043, 15061), 'numpy.average', 'np.average', (['fYs[i]'], {}), '(fYs[i])\n', (15053, 15061), True, 'import numpy as np\n'), ((15063, 15077), 'numpy.std', 'np.std', (['fYs[i]'], {}), '(fYs[i])\n', (15069, 15077), True, 'import numpy as np\n'), ((18170, 18194), 'numpy.average', 'np.average', (['fm[a:b, c:d]'], {}), '(fm[a:b, c:d])\n', (18180, 18194), True, 'import numpy as np\n'), ((18207, 18227), 'numpy.std', 'np.std', (['fm[a:b, c:d]'], {}), '(fm[a:b, c:d])\n', (18213, 18227), True, 'import numpy as np\n'), ((20163, 20187), 'numpy.average', 'np.average', (['fm[a:b, c:d]'], {}), '(fm[a:b, c:d])\n', (20173, 20187), True, 'import numpy as np\n'), ((20200, 20220), 'numpy.std', 'np.std', (['fm[a:b, c:d]'], {}), '(fm[a:b, c:d])\n', (20206, 20220), True, 'import numpy as np\n'), ((7260, 7295), 'numpy.unique', 'np.unique', (['zary'], {'return_counts': '(True)'}), '(zary, return_counts=True)\n', (7269, 7295), True, 'import numpy as np\n'), ((8814, 8852), 'numpy.zeros', 'np.zeros', (['(noColor + 1,)'], {'dtype': 'np.int'}), '((noColor + 1,), dtype=np.int)\n', (8822, 8852), True, 'import numpy as np\n'), ((1769, 1793), 'numpy.cos', 'np.cos', (['(np.pi * (i / 90))'], {}), '(np.pi * (i / 90))\n', (1775, 1793), True, 'import numpy as np\n'), ((3447, 3467), 'numpy.arctan', 'np.arctan', (['(VGx / VGy)'], {}), '(VGx / VGy)\n', (3456, 3467), True, 'import numpy as np\n'), ((7206, 7247), 'numpy.append', 'np.append', (['zary', '[pixbuf[i + mm, j + nn]]'], {}), '(zary, [pixbuf[i + mm, j + nn]])\n', (7215, 7247), True, 'import numpy as np\n'), ((9355, 9371), 'numpy.argmax', 'np.argmax', (['zarry'], {}), '(zarry)\n', (9364, 9371), True, 'import numpy as np\n'), ((9407, 9423), 'numpy.argmax', 'np.argmax', (['zarry'], {}), '(zarry)\n', (9416, 9423), True, 'import numpy as np\n'), ((11160, 11193), 'numpy.tan', 'np.tan', (['(fpdir[i][j] * np.pi / 180)'], {}), '(fpdir[i][j] * np.pi / 180)\n', (11166, 11193), True, 'import numpy as np\n'), ((11747, 11780), 'numpy.tan', 'np.tan', (['(fpdir[i][j] * np.pi / 180)'], {}), '(fpdir[i][j] * np.pi / 180)\n', (11753, 11780), True, 'import numpy as np\n'), ((12850, 12883), 'numpy.tan', 'np.tan', (['(fpdir[i][j] * np.pi / 180)'], {}), '(fpdir[i][j] * np.pi / 180)\n', (12856, 12883), True, 'import numpy as np\n'), ((13642, 13675), 'numpy.tan', 'np.tan', (['(fpdir[i][j] * np.pi / 180)'], {}), '(fpdir[i][j] * np.pi / 180)\n', (13648, 13675), True, 'import numpy as np\n'), ((16567, 16592), 'numpy.tan', 'np.tan', (['(agl * np.pi / 180)'], {}), '(agl * np.pi / 180)\n', (16573, 16592), True, 'import numpy as np\n'), ((16872, 16897), 'numpy.tan', 'np.tan', (['(agl * np.pi / 180)'], {}), '(agl * np.pi / 180)\n', (16878, 16897), True, 'import numpy as np\n'), ((9461, 9477), 'numpy.argmax', 'np.argmax', (['zarry'], {}), '(zarry)\n', (9470, 9477), True, 'import numpy as np\n'), ((9517, 9533), 'numpy.argmax', 'np.argmax', (['zarry'], {}), '(zarry)\n', (9526, 9533), True, 'import numpy as np\n'), ((11471, 11504), 'numpy.tan', 'np.tan', (['(fpdir[i][j] * np.pi / 180)'], {}), '(fpdir[i][j] * np.pi / 180)\n', (11477, 11504), True, 'import numpy as np\n'), ((12013, 12046), 'numpy.tan', 'np.tan', (['(fpdir[i][j] * np.pi / 180)'], {}), '(fpdir[i][j] * np.pi / 180)\n', (12019, 12046), True, 'import numpy as np\n'), ((13288, 13321), 'numpy.tan', 'np.tan', (['(fpdir[i][j] * np.pi / 180)'], {}), '(fpdir[i][j] * np.pi / 180)\n', (13294, 13321), True, 'import numpy as np\n'), ((14105, 14138), 'numpy.tan', 'np.tan', (['(fpdir[i][j] * np.pi / 180)'], {}), '(fpdir[i][j] * np.pi / 180)\n', (14111, 14138), True, 'import numpy as np\n')]
|
# You can run this example via
#
# $ civis-compute submit iris.py
# $ <JOBID>
# $ civis-compute status
# $ civis-compute get <JOBID>
#
import os
import pickle
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
# Civis Platform container configuration.
#CIVIS name=my iris example
#CIVIS required_resources={'cpu': 1024, 'memory': 8192, 'disk_space': 10.0}
# Load and shuffle data.
iris = load_iris()
X = iris.data
y = iris.target
# Shuffle the data.
idx = np.arange(X.shape[0])
np.random.seed(45687)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Fit and score.
rf = RandomForestClassifier(n_estimators=10)
clf = rf.fit(X, y)
score = clf.score(X, y)
print("score:", score)
# Now lets save the results.
# Just write the data to the location given by the environment
# variable CIVIS_JOB_DATA
with open(os.path.expandvars(
os.path.join('${CIVIS_JOB_DATA}', 'iris.pkl')), 'wb') as fp:
pickle.dump(rf, fp)
# This data will get tar gziped, put in the files endpoint and then attached to
# the job state. You can get it by running civis-compute get {scriptid}.
|
[
"sklearn.ensemble.RandomForestClassifier",
"sklearn.datasets.load_iris",
"pickle.dump",
"numpy.random.seed",
"numpy.arange",
"os.path.join",
"numpy.random.shuffle"
] |
[((462, 473), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (471, 473), False, 'from sklearn.datasets import load_iris\n'), ((531, 552), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (540, 552), True, 'import numpy as np\n'), ((553, 574), 'numpy.random.seed', 'np.random.seed', (['(45687)'], {}), '(45687)\n', (567, 574), True, 'import numpy as np\n'), ((575, 597), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (592, 597), True, 'import numpy as np\n'), ((643, 682), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(10)'}), '(n_estimators=10)\n', (665, 682), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((971, 990), 'pickle.dump', 'pickle.dump', (['rf', 'fp'], {}), '(rf, fp)\n', (982, 990), False, 'import pickle\n'), ((906, 951), 'os.path.join', 'os.path.join', (['"""${CIVIS_JOB_DATA}"""', '"""iris.pkl"""'], {}), "('${CIVIS_JOB_DATA}', 'iris.pkl')\n", (918, 951), False, 'import os\n')]
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_data.ipynb (unless otherwise specified).
__all__ = ['RegionST', 'extract_region', 'coords2bbox', 'split_region', 'merge_tifs', 'filter_region', 'filter_cloudy',
'n_least_cloudy', 'download_topography_data', 'download_data', 'download_data_ts', 'get_event_data']
# Cell
import ee
import os
import requests
import rasterio
import pandas as pd
import numpy as np
import zipfile
import json
from IPython.core.debugger import set_trace
from pathlib import Path
import warnings
from fastprogress.fastprogress import progress_bar
from banet.geo import open_tif, merge, Region
from banet.geo import downsample
# Cell
class RegionST(Region):
"Defines a region in space and time with a name, a bounding box and the pixel size."
def __init__(self, name:str, bbox:list, pixel_size:float=None, scale_meters:int=None,
time_start:str=None, time_end:str=None, time_freq:str='D', time_margin:int=0,
shape:tuple=None, epsg=4326):
if scale_meters is not None and pixel_size is not None:
raise Exception('Either pixel_size or scale_meters must be set to None.')
self.name = name
self.bbox = rasterio.coords.BoundingBox(*bbox) # left, bottom, right, top
if pixel_size is not None:
self.pixel_size = pixel_size
else:
self.pixel_size = scale_meters/111000
self.epsg = epsg
self.scale_meters = scale_meters
self._shape = shape
self.time_start = pd.Timestamp(str(time_start))
self.time_end = pd.Timestamp(str(time_end))
self.time_margin = time_margin
self.time_freq = time_freq
@property
def shape(self):
"Shape of the region (height, width)"
if self._shape is None:
return (self.height, self.width)
else: return self._shape
@property
def times(self):
"Property that computes the date_range for the region."
tstart = self.time_start - pd.Timedelta(days=self.time_margin)
tend = self.time_end + pd.Timedelta(days=self.time_margin)
return pd.date_range(tstart, tend, freq=self.time_freq)
@classmethod
def load(cls, file, time_start=None, time_end=None):
"Loads region information from json file"
with open(file, 'r') as f:
args = json.load(f)
if time_start is None:
time_start = args['time_start']
if time_end is None:
time_end = args['time_end']
return cls(args['name'], args['bbox'], args['pixel_size'],
time_start=time_start, time_end=time_end)
def extract_region(df_row, cls=Region):
"Create Region object from a row of the metadata dataframe."
if issubclass(cls, RegionST):
return cls(df_row.event_id, df_row.bbox, df_row.pixel_size,
df_row.time_start, df_row.time_end)
elif issubclass(cls, Region):
return cls(df_row.event_id, df_row.bbox, df_row.pixel_size)
else: raise NotImplemented('cls must be one of the following [Region, RegionST]')
# Cell
def coords2bbox(lon, lat, pixel_size):
return [lon.min(), lat.min(), lon.max()+pixel_size, lat.max()+pixel_size]
def split_region(region:RegionST, size:int, cls=Region):
lon, lat = region.coords()
Nlon = (len(lon)//size)*size
Nlat = (len(lat)//size)*size
lons = [*lon[:Nlon].reshape(-1, size), lon[Nlon:][None]]
lats = [*lat[:Nlat].reshape(-1, size), lat[Nlat:][None]]
if len(lats[-1].reshape(-1)) == 0 and len(lons[-1].reshape(-1)) == 0:
lons = lons[:-1]
lats = lats[:-1]
#lons = lon.reshape(-1, size)
#lats = lat.reshape(-1, size)
if issubclass(cls, RegionST):
return [cls('', coords2bbox(ilon, ilat, region.pixel_size),
pixel_size=region.pixel_size, time_start=region.time_start,
time_end=region.time_end, time_freq=region.time_freq,
time_margin=region.time_margin) for ilon in lons for ilat in lats]
elif issubclass(cls, Region):
return [cls('', coords2bbox(ilon, ilat, region.pixel_size), pixel_size=region.pixel_size)
for ilon in lons for ilat in lats]
else: raise NotImplemented('cls must be one of the following [Region, RegionST]')
return
def merge_tifs(files:list, fname:str, delete=False):
data, tfm = merge([open_tif(str(f)) for f in files])
data = data.squeeze()
fname = Path(files[0]).parent/fname
profile = open_tif(str(files[0])).profile
with rasterio.Env():
height, width = data.shape
profile.update(width=width, height=height, transform=tfm, compress='lzw')
with rasterio.open(str(fname), 'w', **profile) as dst:
dst.write(data, 1)
if delete:
for f in files: os.remove(f)
# Cell
def filter_region(image_collection:ee.ImageCollection, region:RegionST, times:tuple, bands=None):
image_collection = image_collection.filterDate(times[0], times[1])
geometry = ee.Geometry.Rectangle(region.bbox)
image_collection = image_collection.filterBounds(geometry)
if bands is not None:
image_collection = image_collection.select(bands)
return image_collection
def filter_cloudy(image_collection:ee.ImageCollection, max_cloud_fraction=0.2):
return image_collection.filterMetadata(
'CLOUDY_PIXEL_PERCENTAGE', 'not_greater_than', max_cloud_fraction)
def n_least_cloudy(image_collection:ee.ImageCollection, n=5):
image_collection = image_collection.sort(prop='CLOUDY_PIXEL_PERCENTAGE')
image_collection = image_collection.toList(image_collection.size())
colsize = image_collection.size().getInfo()
if colsize < n:
warnings.warn(f'Total number of images in the collection {colsize} less than n={n}. Setting n={colsize}')
n = colsize
image_collection = ee.ImageCollection([ee.Image(image_collection.get(i)) for i in range(n)])
return image_collection
def download_topography_data(R:RegionST, path_save=Path('.'), scale=None,
download_crop_size=1000, show_progress=False):
if scale is None: scale = R.scale_meters
ee.Initialize()
image = ee.Image('srtm90_v4')
path_save.mkdir(exist_ok=True, parents=True)
sR = [R] if min(R.shape) <= download_crop_size else split_region(R, size=download_crop_size)
if not (path_save/'srtm90_v4.elevation.tif').is_file():
files = []
loop = enumerate(sR) if not show_progress else progress_bar(enumerate(sR),total=len(sR))
for j, R in loop:
region = (f"[[{R.bbox.left}, {R.bbox.bottom}], [{R.bbox.right}, {R.bbox.bottom}], " +
f"[{R.bbox.right}, {R.bbox.top}], [{R.bbox.left}, {R.bbox.top}]]")
url = image.getDownloadUrl(
{'scale': scale, 'crs': 'EPSG:4326', 'region': f'{region}'})
r = requests.get(url)
with open(str(path_save/'data.zip'), 'wb') as f:
f.write(r.content)
with zipfile.ZipFile(str(path_save/'data.zip'), 'r') as f:
f.extractall(str(path_save))
os.rename(str(path_save/'srtm90_v4.elevation.tif'),
str(path_save/f'srtm90_v4.elevation_{j}.tif'))
files.append(str(path_save/f'srtm90_v4.elevation_{j}.tif'))
os.remove(str(path_save/'data.zip'))
merge_tifs(files, 'srtm90_v4.elevation.tif', delete=True)
def download_data(R:RegionST, times, products, bands, path_save, scale=None, max_cloud_fraction=None,
use_least_cloudy=None, download_crop_size=1000, show_progress=False):
if scale is None: scale = R.scale_meters
ee.Initialize()
path_save.mkdir(exist_ok=True, parents=True)
if not ((path_save/f'download.{bands[0]}.tif').is_file() and
(path_save/f'download.{bands[1]}.tif').is_file() and
(path_save/f'download.{bands[2]}.tif').is_file()):
sR = [R] if min(R.shape) <= download_crop_size else split_region(R, size=download_crop_size, cls=RegionST)
fsaves = []
#for j, R in tqdm(enumerate(sR), total=len(sR)):
loop = enumerate(sR) if not show_progress else progress_bar(enumerate(sR),total=len(sR))
for j, R in loop:
region = (f"[[{R.bbox.left}, {R.bbox.bottom}], [{R.bbox.right}, {R.bbox.bottom}], " +
f"[{R.bbox.right}, {R.bbox.top}], [{R.bbox.left}, {R.bbox.top}]]")
if not ((path_save/f'download.{bands[0]}_{j}.tif').is_file() and
(path_save/f'download.{bands[1]}_{j}.tif').is_file() and
(path_save/f'download.{bands[2]}_{j}.tif').is_file()):
# Merge products to single image collection
imCol = ee.ImageCollection(products[0])
for i in range(1, len(products)):
imCol = imCol.merge(ee.ImageCollection(products[i]))
imCol = filter_region(imCol, R, times=times, bands=bands)
if max_cloud_fraction is not None:
imCol = filter_cloudy(imCol, max_cloud_fraction=max_cloud_fraction)
if use_least_cloudy is not None:
imCol = n_least_cloudy(imCol, n=use_least_cloudy)
im = imCol.median()
imCol = ee.ImageCollection([im])
colList = imCol.toList(imCol.size())
# info = colList.getInfo()
# data_times = [pd.to_datetime(o['properties']['system:time_start'], unit='ms') for o in info]
# data_cloudy = [o['properties']['CLOUDY_PIXEL_PERCENTAGE'] for o in info]
# Download each image
for i in range(colList.size().getInfo()):
image = ee.Image(colList.get(i))
fname = 'download'
#fname = image.get('system:id').getInfo().split('/')[-1]
fnames_full = [f'{fname}.{b}.tif' for b in bands]
fnames_partial0 = [f'{fname}.{b}_{j}.tif' for b in bands]
fnames_full = all([(path_save/f).is_file() for f in fnames_full])
fnames_partial = all([(path_save/f).is_file() for f in fnames_partial0])
if not fnames_full:
fsaves.append([path_save/f for f in fnames_partial0])
if not fnames_partial:
zip_error = True
for i in range(10): # Try 10 times
if zip_error:
try:
url = image.getDownloadURL(
{'scale': scale, 'crs': 'EPSG:4326',
'region': f'{region}'})
r = requests.get(url)
with open(str(path_save/'data.zip'), 'wb') as f:
f.write(r.content)
with zipfile.ZipFile(str(path_save/'data.zip'), 'r') as f:
files = f.namelist()
f.extractall(str(path_save))
os.remove(str(path_save/'data.zip'))
zip_error = False
except:
zip_error = True
os.remove(str(path_save/'data.zip'))
time.sleep(10)
if zip_error: raise Exception(f'Failed to process {url}')
for f in files:
f = path_save/f
os.rename(str(f), str(path_save/f'{f.stem}_{j}{f.suffix}'))
# Merge files
suffix = '.tif'
files = path_save.ls(include=[suffix])
#files = np.unique(fsaves)
files = [o.stem for o in files]
ref = np.unique(['_'.join(o.split('_')[:-1])
for o in files if len(o.split('_')[-1]) < 6])
ids = np.unique([int(o.split('_')[-1])
for o in files if len(o.split('_')[-1]) < 6])
#file_groups = [[path_save/f'{r}_{i}{suffix}' for i in ids] for r in ref]
file_groups = [[path_save/f'{r}_{i}{suffix}' for i in ids
if f'{r}_{i}' in files] for r in ref]
for fs in file_groups:
if len(fs) < 500:
fsave = '_'.join(fs[0].stem.split('_')[:-1]) + suffix
merge_tifs(fs, fsave, delete=True)
else:
fs_break = np.array(fs)[:(len(fs)//500)*500].reshape(len(fs)//500,-1).tolist()
if len(fs[(len(fs)//500)*500:]) > 0:
fs_break.append(fs[(len(fs)//500)*500:])
for fsi, fs2 in enumerate(fs_break):
fsave = '_'.join(fs2[0].stem.split('_')[:-1]) + f'_break{fsi}' + suffix
merge_tifs(fs2, fsave, delete=True)
files = path_save.ls(include=[suffix, '_break'])
files = [o.stem for o in files]
ref = np.unique(['_'.join(o.split('_')[:-1])
for o in files if len(o.split('_')[-1]) < 11])
ids = np.unique([o.split('_')[-1]
for o in files if len(o.split('_')[-1]) < 11])
#file_groups = [[path_save/f'{r}_{i}{suffix}' for i in ids] for r in ref]
file_groups = [[path_save/f'{r}_{i}{suffix}' for i in ids
if f'{r}_{i}' in files] for r in ref]
for fs in file_groups:
fsave = '_'.join(fs[0].stem.split('_')[:-1]) + suffix
merge_tifs(fs, fsave, delete=True)
def download_data_ts(R:RegionST, products, bands, path_save, scale=None,
download_crop_size=1000, show_progress=False):
if scale is None: scale = R.scale_meters
ee.Initialize()
times = (R.times[0], R.times[-1])
path_save.mkdir(exist_ok=True, parents=True)
sR = [R] if min(R.shape) <= download_crop_size else split_region(R, size=download_crop_size, cls=RegionST)
loop = enumerate(sR) if not show_progress else progress_bar(enumerate(sR),total=len(sR))
for j, R in loop:
region = (f"[[{R.bbox.left}, {R.bbox.bottom}], [{R.bbox.right}, {R.bbox.bottom}], " +
f"[{R.bbox.right}, {R.bbox.top}], [{R.bbox.left}, {R.bbox.top}]]")
# Merge products to single image collection
imCol = ee.ImageCollection(products[0])
for i in range(1, len(products)):
imCol = imCol.merge(ee.ImageCollection(products[i]))
imCol = filter_region(imCol, R, times=times, bands=bands)
imCol = ee.ImageCollection(imCol)
colList = imCol.toList(imCol.size())
# Download each image
for i in range(colList.size().getInfo()):
image = ee.Image(colList.get(i))
zip_error = True
for i in range(10): # Try 10 times
if zip_error:
try:
url = image.getDownloadURL(
{'scale': scale, 'crs': 'EPSG:4326',
'region': f'{region}'})
r = requests.get(url)
with open(str(path_save/'data.zip'), 'wb') as f:
f.write(r.content)
with zipfile.ZipFile(str(path_save/'data.zip'), 'r') as f:
files = f.namelist()
f.extractall(str(path_save))
os.remove(str(path_save/'data.zip'))
zip_error = False
except:
zip_error = True
os.remove(str(path_save/'data.zip'))
time.sleep(10)
if zip_error: raise Exception(f'Failed to process {url}')
for f in files:
f = path_save/f
os.rename(str(f), str(path_save/f'{f.stem}_{j}{f.suffix}'))
# Merge files
suffix = '.tif'
files = path_save.ls(include=[suffix])
files = [o.stem for o in files]
ref = np.unique(['_'.join(o.split('_')[:-1])
for o in files if len(o.split('_')[-1]) < 6])
ids = np.unique([int(o.split('_')[-1])
for o in files if len(o.split('_')[-1]) < 6])
file_groups = [[path_save/f'{r}_{i}{suffix}' for i in ids
if f'{r}_{i}' in files] for r in ref]
for fs in file_groups:
if len(fs) < 500:
fsave = '_'.join(fs[0].stem.split('_')[:-1]) + suffix
merge_tifs(fs, fsave, delete=True)
else:
fs_break = np.array(fs)[:(len(fs)//500)*500].reshape(len(fs)//500,-1).tolist()
if len(fs[(len(fs)//500)*500:]) > 0:
fs_break.append(fs[(len(fs)//500)*500:])
for fsi, fs2 in enumerate(fs_break):
fsave = '_'.join(fs2[0].stem.split('_')[:-1]) + f'_break{fsi}' + suffix
merge_tifs(fs2, fsave, delete=True)
files = path_save.ls(include=[suffix, '_break'])
files = [o.stem for o in files]
ref = np.unique(['_'.join(o.split('_')[:-1])
for o in files if len(o.split('_')[-1]) < 11])
ids = np.unique([o.split('_')[-1]
for o in files if len(o.split('_')[-1]) < 11])
file_groups = [[path_save/f'{r}_{i}{suffix}' for i in ids
if f'{r}_{i}' in files] for r in ref]
for fs in file_groups:
fsave = '_'.join(fs[0].stem.split('_')[:-1]) + suffix
merge_tifs(fs, fsave, delete=True)
# Cell
def get_event_data(event_id, year, coarse_mask_file, path=Path('.'),
coarse_mask_doy_layer=2, products=['COPERNICUS/S2'],
bands=['B4', 'B8', 'B12'], scale_factor=1e-4, composite_days=[60,60],
max_cloud_fraction=None, use_least_cloudy=None, scale=10,
topography=False, banet_pixel_size=0.001):
rst_ba100 = open_tif(coarse_mask_file)
doys = rst_ba100.read(coarse_mask_doy_layer).astype(np.float16)
doys[doys==0] = np.nan
doy_start, doy_end = np.nanmin(doys), np.nanmax(doys)
del doys
time_start = pd.Timestamp(f'{year}-01-01') + pd.Timedelta(days=doy_start-1)
time_end = pd.Timestamp(f'{year}-01-01') + pd.Timedelta(days=doy_end-1)
print('Event time_start:', str(time_start))
print('Event time_end:', str(time_end))
R = RegionST(event_id, list(rst_ba100.bounds), scale_meters=scale,
time_start=time_start, time_end=time_end, time_margin=1)
R_banet = R.new(pixel_size=banet_pixel_size)
before = (R.times[0]-pd.Timedelta(days=composite_days[0]), R.times[0])
after = (R.times[-1], R.times[-1]+pd.Timedelta(days=composite_days[1]))
for mode, time_window in zip(['before', 'after'], [before, after]):
path_save = path/R.name/mode
print('Downloading GEE median composite for:', ' to '.join([str(o) for o in time_window]))
download_data(R, time_window, products, bands, path_save,
max_cloud_fraction=max_cloud_fraction, use_least_cloudy=use_least_cloudy,
scale=scale)
if topography:
print('Downloading topography data.')
download_topography_data(R, path/event_id/'topography', scale=scale)
rst_ba100 = rst_ba100.read(coarse_mask_doy_layer)
s10before_files = np.array((path/R.name/'before').ls(exclude=['.xml']))[[1,2,0]].tolist()
s10after_files = np.array((path/R.name/'after').ls(exclude=['.xml']))[[1,2,0]].tolist()
transform = rasterio.open(str(s10before_files[0])).transform
crs = rasterio.open(str(s10before_files[0])).crs
rst_s10before = np.concatenate(
[rasterio.open(str(f)).read() for f in s10before_files]).astype(np.float16)*scale_factor
rst_s10after = np.concatenate(
[rasterio.open(str(f)).read() for f in s10after_files]).astype(np.float16)*scale_factor
rst_ba100 = downsample(rst_ba100, src_tfm=R_banet.transform, dst_tfm=transform,
dst_shape=(1, *rst_s10before.shape[-2:]), resampling='bilinear').astype(np.float32)
im = np.concatenate([rst_s10before, rst_s10after, rst_ba100], axis=0).transpose(1,2,0)
return im, transform, crs
|
[
"os.remove",
"banet.geo.downsample",
"banet.geo.open_tif",
"pathlib.Path",
"rasterio.coords.BoundingBox",
"requests.get",
"pandas.Timedelta",
"ee.Initialize",
"pandas.date_range",
"rasterio.Env",
"numpy.concatenate",
"numpy.nanmax",
"pandas.Timestamp",
"json.load",
"ee.ImageCollection",
"ee.Image",
"numpy.nanmin",
"numpy.array",
"ee.Geometry.Rectangle",
"warnings.warn"
] |
[((5042, 5076), 'ee.Geometry.Rectangle', 'ee.Geometry.Rectangle', (['region.bbox'], {}), '(region.bbox)\n', (5063, 5076), False, 'import ee\n'), ((6043, 6052), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (6047, 6052), False, 'from pathlib import Path\n'), ((6191, 6206), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (6204, 6206), False, 'import ee\n'), ((6219, 6240), 'ee.Image', 'ee.Image', (['"""srtm90_v4"""'], {}), "('srtm90_v4')\n", (6227, 6240), False, 'import ee\n'), ((7703, 7718), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (7716, 7718), False, 'import ee\n'), ((14007, 14022), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (14020, 14022), False, 'import ee\n'), ((17790, 17799), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (17794, 17799), False, 'from pathlib import Path\n'), ((18117, 18143), 'banet.geo.open_tif', 'open_tif', (['coarse_mask_file'], {}), '(coarse_mask_file)\n', (18125, 18143), False, 'from banet.geo import open_tif, merge, Region\n'), ((1214, 1248), 'rasterio.coords.BoundingBox', 'rasterio.coords.BoundingBox', (['*bbox'], {}), '(*bbox)\n', (1241, 1248), False, 'import rasterio\n'), ((2161, 2209), 'pandas.date_range', 'pd.date_range', (['tstart', 'tend'], {'freq': 'self.time_freq'}), '(tstart, tend, freq=self.time_freq)\n', (2174, 2209), True, 'import pandas as pd\n'), ((4571, 4585), 'rasterio.Env', 'rasterio.Env', ([], {}), '()\n', (4583, 4585), False, 'import rasterio\n'), ((5740, 5855), 'warnings.warn', 'warnings.warn', (['f"""Total number of images in the collection {colsize} less than n={n}. Setting n={colsize}"""'], {}), "(\n f'Total number of images in the collection {colsize} less than n={n}. Setting n={colsize}'\n )\n", (5753, 5855), False, 'import warnings\n'), ((14585, 14616), 'ee.ImageCollection', 'ee.ImageCollection', (['products[0]'], {}), '(products[0])\n', (14603, 14616), False, 'import ee\n'), ((14806, 14831), 'ee.ImageCollection', 'ee.ImageCollection', (['imCol'], {}), '(imCol)\n', (14824, 14831), False, 'import ee\n'), ((18264, 18279), 'numpy.nanmin', 'np.nanmin', (['doys'], {}), '(doys)\n', (18273, 18279), True, 'import numpy as np\n'), ((18281, 18296), 'numpy.nanmax', 'np.nanmax', (['doys'], {}), '(doys)\n', (18290, 18296), True, 'import numpy as np\n'), ((18327, 18356), 'pandas.Timestamp', 'pd.Timestamp', (['f"""{year}-01-01"""'], {}), "(f'{year}-01-01')\n", (18339, 18356), True, 'import pandas as pd\n'), ((18359, 18391), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(doy_start - 1)'}), '(days=doy_start - 1)\n', (18371, 18391), True, 'import pandas as pd\n'), ((18407, 18436), 'pandas.Timestamp', 'pd.Timestamp', (['f"""{year}-01-01"""'], {}), "(f'{year}-01-01')\n", (18419, 18436), True, 'import pandas as pd\n'), ((18439, 18469), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(doy_end - 1)'}), '(days=doy_end - 1)\n', (18451, 18469), True, 'import pandas as pd\n'), ((2043, 2078), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'self.time_margin'}), '(days=self.time_margin)\n', (2055, 2078), True, 'import pandas as pd\n'), ((2110, 2145), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'self.time_margin'}), '(days=self.time_margin)\n', (2122, 2145), True, 'import pandas as pd\n'), ((2389, 2401), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2398, 2401), False, 'import json\n'), ((4488, 4502), 'pathlib.Path', 'Path', (['files[0]'], {}), '(files[0])\n', (4492, 4502), False, 'from pathlib import Path\n'), ((4837, 4849), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (4846, 4849), False, 'import os\n'), ((6901, 6918), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (6913, 6918), False, 'import requests\n'), ((18779, 18815), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'composite_days[0]'}), '(days=composite_days[0])\n', (18791, 18815), True, 'import pandas as pd\n'), ((18868, 18904), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'composite_days[1]'}), '(days=composite_days[1])\n', (18880, 18904), True, 'import pandas as pd\n'), ((20092, 20228), 'banet.geo.downsample', 'downsample', (['rst_ba100'], {'src_tfm': 'R_banet.transform', 'dst_tfm': 'transform', 'dst_shape': '(1, *rst_s10before.shape[-2:])', 'resampling': '"""bilinear"""'}), "(rst_ba100, src_tfm=R_banet.transform, dst_tfm=transform,\n dst_shape=(1, *rst_s10before.shape[-2:]), resampling='bilinear')\n", (20102, 20228), False, 'from banet.geo import downsample\n'), ((20279, 20343), 'numpy.concatenate', 'np.concatenate', (['[rst_s10before, rst_s10after, rst_ba100]'], {'axis': '(0)'}), '([rst_s10before, rst_s10after, rst_ba100], axis=0)\n', (20293, 20343), True, 'import numpy as np\n'), ((8774, 8805), 'ee.ImageCollection', 'ee.ImageCollection', (['products[0]'], {}), '(products[0])\n', (8792, 8805), False, 'import ee\n'), ((9321, 9345), 'ee.ImageCollection', 'ee.ImageCollection', (['[im]'], {}), '([im])\n', (9339, 9345), False, 'import ee\n'), ((14691, 14722), 'ee.ImageCollection', 'ee.ImageCollection', (['products[i]'], {}), '(products[i])\n', (14709, 14722), False, 'import ee\n'), ((8896, 8927), 'ee.ImageCollection', 'ee.ImageCollection', (['products[i]'], {}), '(products[i])\n', (8914, 8927), False, 'import ee\n'), ((15332, 15349), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (15344, 15349), False, 'import requests\n'), ((16800, 16812), 'numpy.array', 'np.array', (['fs'], {}), '(fs)\n', (16808, 16812), True, 'import numpy as np\n'), ((12746, 12758), 'numpy.array', 'np.array', (['fs'], {}), '(fs)\n', (12754, 12758), True, 'import numpy as np\n'), ((10858, 10875), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (10870, 10875), False, 'import requests\n')]
|
#https://towardsdatascience.com/outlier-detection-with-isolation-forest-3d190448d45e
#reference link
# importing libaries ----
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pylab import savefig
from sklearn.ensemble import IsolationForest
# Generating data ----
rng = np.random.RandomState(42)
# Generating training data
X_train = 0.2 * rng.randn(1000, 2)
X_train = np.r_[X_train + 3, X_train]
X_train = pd.DataFrame(X_train, columns = ['x1', 'x2'])
# Generating new, 'normal' observation
X_test = 0.2 * rng.randn(200, 2)
X_test = np.r_[X_test + 3, X_test]
X_test = pd.DataFrame(X_test, columns = ['x1', 'x2'])
# Generating outliers
X_outliers = rng.uniform(low=-1, high=5, size=(50, 2))
X_outliers = pd.DataFrame(X_outliers, columns = ['x1', 'x2'])
# Isolation Forest ----
# training the model
clf = IsolationForest(max_samples=100, random_state=rng)
clf.fit(X_train)
# predictions
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
# new, 'normal' observations ----
print("Accuracy:", list(y_pred_test).count(1)/y_pred_test.shape[0])
# Accuracy: 0.93
# outliers ----
print("Accuracy:", list(y_pred_outliers).count(-1)/y_pred_outliers.shape[0])
# Accuracy: 0.96
|
[
"pandas.DataFrame",
"sklearn.ensemble.IsolationForest",
"numpy.random.RandomState"
] |
[((300, 325), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (321, 325), True, 'import numpy as np\n'), ((438, 481), 'pandas.DataFrame', 'pd.DataFrame', (['X_train'], {'columns': "['x1', 'x2']"}), "(X_train, columns=['x1', 'x2'])\n", (450, 481), True, 'import pandas as pd\n'), ((601, 643), 'pandas.DataFrame', 'pd.DataFrame', (['X_test'], {'columns': "['x1', 'x2']"}), "(X_test, columns=['x1', 'x2'])\n", (613, 643), True, 'import pandas as pd\n'), ((737, 783), 'pandas.DataFrame', 'pd.DataFrame', (['X_outliers'], {'columns': "['x1', 'x2']"}), "(X_outliers, columns=['x1', 'x2'])\n", (749, 783), True, 'import pandas as pd\n'), ((840, 890), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'max_samples': '(100)', 'random_state': 'rng'}), '(max_samples=100, random_state=rng)\n', (855, 890), False, 'from sklearn.ensemble import IsolationForest\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 22 16:37:22 2017
@author: dzhaojie
"""
#import HelpFunctionsForCellTracking as HFCT
import os
import numpy as np
from skimage import io
def extract_intensity(num_of_field):
F=io.imread('C:/Users/Desktop/AVG_flatfield-5x-590 nm LED.tif')
F=F.astype(np.float64)
mga=16844.556
C=F/mga
def cmask(index,radius,array):
a,b = index
nx,ny = array.shape
y,x = np.ogrid[-a:nx-a,-b:ny-b]
mask = x*x + y*y <= radius*radius
return(sum(array[mask]))
for field_of_view in num_of_field:
#HFCT.clear_all
try:
images_name=os.listdir('C:/Users/data-'+str(field_of_view))
except:
continue
xyl=np.load('C:/Users/data-results/local maxium separate cell/'+str(field_of_view)+'/xyl.npy')
uniquexyl=np.unique(xyl[:,4])
cell_cell_dis=np.zeros((xyl.shape[0],1))
for ui in range (uniquexyl.shape[0]-1):
num_of_cell=uniquexyl[ui].astype(np.int32)
try:
X=np.load('C:/Users/data-results/local maxium separate cell/'+str(field_of_view)+'/'+str(num_of_cell)+'cell/X.npy')
Y=np.load('C:/Users/data-results/local maxium separate cell/'+str(field_of_view)+'/'+str(num_of_cell)+'cell/Y.npy')
except:
continue
r=np.zeros((X.shape[0],1))
g=np.zeros((X.shape[0],X.shape[1]))
for i in range (X.shape[1]):
for k in range (xyl.shape[0]):
cell_cell_dis[k]=np.sqrt( np.square(X[0,i]-xyl[k,0]) + np.square(Y[0,i]-xyl[k,1]) )
diss=np.sort(cell_cell_dis,axis=0)
if diss[1]>=8:
r[i]=4
else:
r[i]=np.floor(diss[1]/2)
for j in range (X.shape[0]):
AO=io.imread(os.path.join('C:/Users/data-'+str(field_of_view),images_name[j]))
A=AO.astype(np.float64)
A=A/C
for i in range (X.shape[1]):
g[j,i]=cmask((X[j,i],Y[j,i]),r[i],A)
np.save('C:/Users/data-results/local maxium separate cell/'+str(field_of_view)+'/'+str(num_of_cell)+'cell/g.npy',g)
|
[
"skimage.io.imread",
"numpy.floor",
"numpy.square",
"numpy.zeros",
"numpy.sort",
"numpy.unique"
] |
[((246, 307), 'skimage.io.imread', 'io.imread', (['"""C:/Users/Desktop/AVG_flatfield-5x-590 nm LED.tif"""'], {}), "('C:/Users/Desktop/AVG_flatfield-5x-590 nm LED.tif')\n", (255, 307), False, 'from skimage import io\n'), ((918, 938), 'numpy.unique', 'np.unique', (['xyl[:, 4]'], {}), '(xyl[:, 4])\n', (927, 938), True, 'import numpy as np\n'), ((961, 988), 'numpy.zeros', 'np.zeros', (['(xyl.shape[0], 1)'], {}), '((xyl.shape[0], 1))\n', (969, 988), True, 'import numpy as np\n'), ((1483, 1508), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 1)'], {}), '((X.shape[0], 1))\n', (1491, 1508), True, 'import numpy as np\n'), ((1523, 1557), 'numpy.zeros', 'np.zeros', (['(X.shape[0], X.shape[1])'], {}), '((X.shape[0], X.shape[1]))\n', (1531, 1557), True, 'import numpy as np\n'), ((1778, 1808), 'numpy.sort', 'np.sort', (['cell_cell_dis'], {'axis': '(0)'}), '(cell_cell_dis, axis=0)\n', (1785, 1808), True, 'import numpy as np\n'), ((1917, 1938), 'numpy.floor', 'np.floor', (['(diss[1] / 2)'], {}), '(diss[1] / 2)\n', (1925, 1938), True, 'import numpy as np\n'), ((1695, 1725), 'numpy.square', 'np.square', (['(X[0, i] - xyl[k, 0])'], {}), '(X[0, i] - xyl[k, 0])\n', (1704, 1725), True, 'import numpy as np\n'), ((1726, 1756), 'numpy.square', 'np.square', (['(Y[0, i] - xyl[k, 1])'], {}), '(Y[0, i] - xyl[k, 1])\n', (1735, 1756), True, 'import numpy as np\n')]
|
import numpy as np
from correlations import *
from normalizations import *
# equal weighting
def equal_weighting(X):
N = np.shape(X)[1]
return np.ones(N) / N
# entropy weighting
def entropy_weighting(X):
# normalization for profit criteria
criteria_type = np.ones(np.shape(X)[1])
pij = sum_normalization(X, criteria_type)
m, n = np.shape(pij)
H = np.zeros((m, n))
for j in range(n):
for i in range(m):
if pij[i, j] != 0:
H[i, j] = pij[i, j] * np.log(pij[i, j])
h = np.sum(H, axis = 0) * (-1 * ((np.log(m)) ** (-1)))
d = 1 - h
w = d / (np.sum(d))
return w
# standard deviation weighting
def std_weighting(X):
stdv = np.std(X, axis = 0)
return stdv / np.sum(stdv)
# CRITIC weighting
def critic_weighting(X):
# normalization for profit criteria
criteria_type = np.ones(np.shape(X)[1])
x_norm = minmax_normalization(X, criteria_type)
std = np.std(x_norm, axis = 0)
n = np.shape(x_norm)[1]
correlations = np.zeros((n, n))
for i in range(0, n):
for j in range(0, n):
correlations[i, j] = pearson_coeff(x_norm[:, i], x_norm[:, j])
difference = 1 - correlations
suma = np.sum(difference, axis = 0)
C = std * suma
w = C / (np.sum(C, axis = 0))
return w
# Equal distribution of main weights on the hierarchical structure of the model criteria
def structured_equal_weights(modules, main_weights):
flag_begin = True
crit_list = []
num_of_modules = len(modules)
for g, module in enumerate(modules):
num_of_submodules = len(module)
for submodule in module:
num_of_elements = len(submodule)
subweights = np.ones(num_of_elements) * ((main_weights[g] / num_of_submodules) / num_of_elements)
if flag_begin:
old_subweights = copy.deepcopy(subweights)
flag_begin = False
else:
old_subweights = np.hstack((old_subweights, subweights))
for sub in submodule:
crit_list.append(sub)
return old_subweights, crit_list
|
[
"numpy.sum",
"numpy.log",
"numpy.std",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.shape"
] |
[((357, 370), 'numpy.shape', 'np.shape', (['pij'], {}), '(pij)\n', (365, 370), True, 'import numpy as np\n'), ((380, 396), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (388, 396), True, 'import numpy as np\n'), ((712, 729), 'numpy.std', 'np.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (718, 729), True, 'import numpy as np\n'), ((955, 977), 'numpy.std', 'np.std', (['x_norm'], {'axis': '(0)'}), '(x_norm, axis=0)\n', (961, 977), True, 'import numpy as np\n'), ((1027, 1043), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1035, 1043), True, 'import numpy as np\n'), ((1221, 1247), 'numpy.sum', 'np.sum', (['difference'], {'axis': '(0)'}), '(difference, axis=0)\n', (1227, 1247), True, 'import numpy as np\n'), ((127, 138), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (135, 138), True, 'import numpy as np\n'), ((153, 163), 'numpy.ones', 'np.ones', (['N'], {}), '(N)\n', (160, 163), True, 'import numpy as np\n'), ((543, 560), 'numpy.sum', 'np.sum', (['H'], {'axis': '(0)'}), '(H, axis=0)\n', (549, 560), True, 'import numpy as np\n'), ((621, 630), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (627, 630), True, 'import numpy as np\n'), ((750, 762), 'numpy.sum', 'np.sum', (['stdv'], {}), '(stdv)\n', (756, 762), True, 'import numpy as np\n'), ((988, 1004), 'numpy.shape', 'np.shape', (['x_norm'], {}), '(x_norm)\n', (996, 1004), True, 'import numpy as np\n'), ((1282, 1299), 'numpy.sum', 'np.sum', (['C'], {'axis': '(0)'}), '(C, axis=0)\n', (1288, 1299), True, 'import numpy as np\n'), ((284, 295), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (292, 295), True, 'import numpy as np\n'), ((877, 888), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (885, 888), True, 'import numpy as np\n'), ((573, 582), 'numpy.log', 'np.log', (['m'], {}), '(m)\n', (579, 582), True, 'import numpy as np\n'), ((1719, 1743), 'numpy.ones', 'np.ones', (['num_of_elements'], {}), '(num_of_elements)\n', (1726, 1743), True, 'import numpy as np\n'), ((1989, 2028), 'numpy.hstack', 'np.hstack', (['(old_subweights, subweights)'], {}), '((old_subweights, subweights))\n', (1998, 2028), True, 'import numpy as np\n'), ((516, 533), 'numpy.log', 'np.log', (['pij[i, j]'], {}), '(pij[i, j])\n', (522, 533), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import time
from pydmd import HODMD
def myfunc(x):
return np.cos(x)*np.sin(np.cos(x)) + np.cos(x*.2)
x = np.linspace(0, 10, 64)
y = myfunc(x)
snapshots = y
plt.plot(x, snapshots, '.')
plt.show()
hodmd = HODMD(svd_rank=0, exact=True, opt=True, d=30).fit(snapshots)
hodmd.reconstructed_data.shape
hodmd.plot_eigs()
hodmd.original_time['dt'] = hodmd.dmd_time['dt'] = x[1] - x[0]
hodmd.original_time['t0'] = hodmd.dmd_time['t0'] = x[0]
hodmd.original_time['tend'] = hodmd.dmd_time['tend'] = x[-1]
plt.plot(hodmd.original_timesteps, snapshots, '.', label='snapshots')
plt.plot(hodmd.original_timesteps, y, '-', label='original function')
plt.plot(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label='DMD output')
plt.legend()
plt.show()
hodmd.dmd_time['tend'] = 50
fig = plt.figure(figsize=(15, 5))
plt.plot(hodmd.original_timesteps, snapshots, '.', label='snapshots')
plt.plot(np.linspace(0, 50, 128), myfunc(np.linspace(0, 50, 128)), '-', label='original function')
plt.plot(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label='DMD output')
plt.legend()
plt.show()
noise_range = [.01, .05, .1, .2]
fig = plt.figure(figsize=(15, 10))
future = 20
for id_plot, i in enumerate(noise_range, start=1):
snapshots = y + np.random.uniform(-i, i, size=y.shape)
hodmd = HODMD(svd_rank=0, exact=True, opt=True, d=30).fit(snapshots)
hodmd.original_time['dt'] = hodmd.dmd_time['dt'] = x[1] - x[0]
hodmd.original_time['t0'] = hodmd.dmd_time['t0'] = x[0]
hodmd.original_time['tend'] = hodmd.dmd_time['tend'] = x[-1]
hodmd.dmd_time['tend'] = 20
plt.subplot(2, 2, id_plot)
plt.plot(hodmd.original_timesteps, snapshots, '.', label='snapshots')
plt.plot(np.linspace(0, future, 128), myfunc(np.linspace(0, future, 128)), '-', label='original function')
plt.plot(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label='DMD output')
plt.legend()
plt.title('Noise [{} - {}]'.format(-i, i))
plt.show()
|
[
"matplotlib.pyplot.subplot",
"numpy.random.uniform",
"matplotlib.pyplot.show",
"pydmd.HODMD",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.cos"
] |
[((165, 187), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(64)'], {}), '(0, 10, 64)\n', (176, 187), True, 'import numpy as np\n'), ((216, 243), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'snapshots', '"""."""'], {}), "(x, snapshots, '.')\n", (224, 243), True, 'import matplotlib.pyplot as plt\n'), ((244, 254), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (252, 254), True, 'import matplotlib.pyplot as plt\n'), ((556, 625), 'matplotlib.pyplot.plot', 'plt.plot', (['hodmd.original_timesteps', 'snapshots', '"""."""'], {'label': '"""snapshots"""'}), "(hodmd.original_timesteps, snapshots, '.', label='snapshots')\n", (564, 625), True, 'import matplotlib.pyplot as plt\n'), ((626, 695), 'matplotlib.pyplot.plot', 'plt.plot', (['hodmd.original_timesteps', 'y', '"""-"""'], {'label': '"""original function"""'}), "(hodmd.original_timesteps, y, '-', label='original function')\n", (634, 695), True, 'import matplotlib.pyplot as plt\n'), ((696, 790), 'matplotlib.pyplot.plot', 'plt.plot', (['hodmd.dmd_timesteps', 'hodmd.reconstructed_data[0].real', '"""--"""'], {'label': '"""DMD output"""'}), "(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label\n ='DMD output')\n", (704, 790), True, 'import matplotlib.pyplot as plt\n'), ((786, 798), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (796, 798), True, 'import matplotlib.pyplot as plt\n'), ((799, 809), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (807, 809), True, 'import matplotlib.pyplot as plt\n'), ((846, 873), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (856, 873), True, 'import matplotlib.pyplot as plt\n'), ((874, 943), 'matplotlib.pyplot.plot', 'plt.plot', (['hodmd.original_timesteps', 'snapshots', '"""."""'], {'label': '"""snapshots"""'}), "(hodmd.original_timesteps, snapshots, '.', label='snapshots')\n", (882, 943), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1137), 'matplotlib.pyplot.plot', 'plt.plot', (['hodmd.dmd_timesteps', 'hodmd.reconstructed_data[0].real', '"""--"""'], {'label': '"""DMD output"""'}), "(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label\n ='DMD output')\n", (1051, 1137), True, 'import matplotlib.pyplot as plt\n'), ((1133, 1145), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1143, 1145), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1156), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1154, 1156), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (1207, 1225), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2031), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2029, 2031), True, 'import matplotlib.pyplot as plt\n'), ((953, 976), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(128)'], {}), '(0, 50, 128)\n', (964, 976), True, 'import numpy as np\n'), ((1651, 1677), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', 'id_plot'], {}), '(2, 2, id_plot)\n', (1662, 1677), True, 'import matplotlib.pyplot as plt\n'), ((1682, 1751), 'matplotlib.pyplot.plot', 'plt.plot', (['hodmd.original_timesteps', 'snapshots', '"""."""'], {'label': '"""snapshots"""'}), "(hodmd.original_timesteps, snapshots, '.', label='snapshots')\n", (1690, 1751), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1961), 'matplotlib.pyplot.plot', 'plt.plot', (['hodmd.dmd_timesteps', 'hodmd.reconstructed_data[0].real', '"""--"""'], {'label': '"""DMD output"""'}), "(hodmd.dmd_timesteps, hodmd.reconstructed_data[0].real, '--', label\n ='DMD output')\n", (1875, 1961), True, 'import matplotlib.pyplot as plt\n'), ((1961, 1973), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1971, 1973), True, 'import matplotlib.pyplot as plt\n'), ((146, 161), 'numpy.cos', 'np.cos', (['(x * 0.2)'], {}), '(x * 0.2)\n', (152, 161), True, 'import numpy as np\n'), ((264, 309), 'pydmd.HODMD', 'HODMD', ([], {'svd_rank': '(0)', 'exact': '(True)', 'opt': '(True)', 'd': '(30)'}), '(svd_rank=0, exact=True, opt=True, d=30)\n', (269, 309), False, 'from pydmd import HODMD\n'), ((985, 1008), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(128)'], {}), '(0, 50, 128)\n', (996, 1008), True, 'import numpy as np\n'), ((1310, 1348), 'numpy.random.uniform', 'np.random.uniform', (['(-i)', 'i'], {'size': 'y.shape'}), '(-i, i, size=y.shape)\n', (1327, 1348), True, 'import numpy as np\n'), ((1765, 1792), 'numpy.linspace', 'np.linspace', (['(0)', 'future', '(128)'], {}), '(0, future, 128)\n', (1776, 1792), True, 'import numpy as np\n'), ((116, 125), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (122, 125), True, 'import numpy as np\n'), ((1361, 1406), 'pydmd.HODMD', 'HODMD', ([], {'svd_rank': '(0)', 'exact': '(True)', 'opt': '(True)', 'd': '(30)'}), '(svd_rank=0, exact=True, opt=True, d=30)\n', (1366, 1406), False, 'from pydmd import HODMD\n'), ((1801, 1828), 'numpy.linspace', 'np.linspace', (['(0)', 'future', '(128)'], {}), '(0, future, 128)\n', (1812, 1828), True, 'import numpy as np\n'), ((133, 142), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (139, 142), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 17:02:17 2020
@author: rowe1
"""
from __future__ import print_function
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
def scale(arr,minimum=-2,maximum=2):
''' Scale a np.random.rand array to range from minimum to maximum'''
return (arr-0.5)*(maximum-minimum)
def reporter(history, plot=True, savefile='./ga_snake_history/'):
'''Prints statistics about the most recent population to monitor growth'''
print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
print('~~~~~~~~~~~~~~GENERATION: '+str(len(history['best'])+1)+'~~~~~~~~~~~~~~~~~~')
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
print('Best:',str(round(history['best'][-1],2)))
print('Average:',str(round(history['average'][-1],2)))
print('Standard Deviation:',str(round(history['std'][-1],2)))
print('Run Time:',str(round(history['run_time'][-1],2)))
if plot:
generations=np.linspace(1,len(history['best']),len(history['best']))
best=history['best']
average=history['average']
std=history['std']
average_std_over=[a+s for (a,s) in zip(average,std)]
average_std_under=[a-s for (a,s) in zip(average,std)]
plt.plot(generations,best,'r-',label='Best',lw=2)
plt.plot(generations,average,'b-',label='Average',lw=2)
plt.plot(generations,average_std_over,'g--',label='+1 STD')
plt.plot(generations,average_std_under,'g--',label='-1 STD')
plt.xlabel('Generation')
plt.ylabel('Fitness')
plt.legend(['Best','Average','+1 STD','-1 STD'],loc=2)
plt.savefig(savefile+'progress_plot.png')
def mutate(nets, mutation_type='gaussian', mutation_range=[-2,2], mutation_rate=0.03, nn_shape=[24,30,30,4], activation_functions=['tanh','tanh','tanh','softmax']):
if mutation_rate==0:
return nets
mutated_nets=[]
for net in nets:
#Flatten neural network to 1D list
net = np.array(flatten_net(net))
#use a list of booleans to denote whether a gene will be mutated
mutate = np.random.rand(len(net)) <= mutation_rate
if mutation_type=='gaussian':
#Add a random value
gaussian_mutations = np.random.normal(size=len(net))
net[mutate] += gaussian_mutations[mutate]
else:
#replace value with a random value
for idx,result in enumerate(mutate):
if result:
net[idx]=scale(np.random.rand(),minimum=mutation_range[0],maximum=mutation_range[1])
#Rebuild the neural_network model from the flattened child net
connection_weights,bias_weights = rebuild_net(net,nn_shape)
mutated_net = make_nets(connection_weights,bias_weights,activation_functions)
mutated_nets.append(mutated_net)
return mutated_nets
def make_nets(connection_weights,bias_weights,activation_functions):
''' Each layer after the initial input layer of a densly connected FFNN
will have connection weights in the form of numpy array with the shape of
connection_weight_shape=(number_of_previous_layers_nodes,number_of_current_layers_nodes)
there will also be one bias weight for each node in a layer with the shape of
bias_weight_shape=(number_of_nodes_in_current_layer, )
A densly connected NN will be made given weights for each connection and bias
activation_functions should be given as a list where acceptable values are:
'sigmoid','tanh','relu','softmax'
Provide one array of connection_weights, one of bias_weights, and one activation
function for each layer beyond the initial layer:
i.e. for two hidden layers with 20 inputs, 12 hidden nodes, 8 hidden nodes, 4 output nodes:
make_nets([np.random.rand(20,12),np.random.rand(12,8),np.random.rand(8,4)],
[np.random.rand(12,), np.random.rand(8,), np.random.rand(4,)],
['tanh','tanh','softmax'])
note, this is only for the first guess at the neural net weights. After which,
use the genetic algorithm to choose weights instead of using np.random.rand
'''
connections=[conn for conn in connection_weights]
biases=[bias for bias in bias_weights]
activations=[fcn for fcn in activation_functions]
model=keras.models.Sequential([keras.layers.Input(shape=(connections[0].shape[0],))])
for (c,b,a) in zip(connections,biases,activations):
model.add(keras.layers.Dense(c.shape[1],weights=[c,b],activation=a))
return model
def selection(nets,fitness, survival_fraction):
'''Returns a zipped list of the top {survival_fraction} percent of neural
networks based on their fitness'''
agents=zip(nets,fitness)
agents=sorted(agents, key=lambda agent: agent[1], reverse=True)
#Return the top 20% of most fit agents to move on and breed
return agents[:int(survival_fraction*len(agents))]
def relu(x):
'''Helper function for normalizing the fitness during crossover'''
return x if x>0 else max(0.01, x+0.8)
def crossover(agents,nn_shape,activation_functions,population):
child_nets=[]
temp_fitness=[agent[1] for agent in agents]
#Set all negative values to 0 in fitness
temp_fitness=[relu(fit) for fit in temp_fitness]
sum_fit=np.sum(temp_fitness)
normalized_fitness=[fit/sum_fit for fit in temp_fitness]
for i in range(int(0.5*(population-len(agents)))):
#create one child each loop, until len(nets)+len(child_nets)=population
#Select two parents giving higher probability of selection to the more fit snakes
agent_index_1 = np.random.choice(len(agents),p=normalized_fitness)
agent_index_2 = np.random.choice(len(agents),p=normalized_fitness)
#Make sure the parents are not identical
while agent_index_1 == agent_index_2:
agent_index_2 = np.random.choice(len(agents),p=normalized_fitness)
#Flatten parents neural_net weights (both connection and bias weights) to a 1D list for crossover
parent_1 = flatten_net(agents[agent_index_1][0])
parent_2 = flatten_net(agents[agent_index_2][0])
#Fitness of each parent
fitness_1 = agents[agent_index_1][1]
fitness_2 = agents[agent_index_2][1]
#Randomly select which parent the child gets its gene on while giving
#a higher probability to the more fit parents genes
try:
probability_threshold = fitness_1 / (fitness_2 + fitness_1)
except:
#in the case that fitness_1+fitness_2=0
probability_threshold = 0.5
#If p1_genes is true, the child gets that gene from parent 1
p1_genes = np.random.rand(len(parent_1)) <= probability_threshold
child_1=np.array([0]*len(parent_1))
child_2=np.array([0]*len(parent_1))
child_gene_index=0
for p1,p2,p1_gene in zip(parent_1,parent_2,p1_genes):
if p1_gene:
child_1[child_gene_index]=p1
child_2[child_gene_index]=p2
else:
child_2[child_gene_index]=p1
child_1[child_gene_index]=p2
child_gene_index+=1
#Rebuild the neural_network model from the flattened child net CHILD 1
connection_weights, bias_weights = rebuild_net(child_1, nn_shape)
child_net = make_nets(connection_weights, bias_weights, activation_functions)
child_nets.append(child_net)
#Rebuild the neural_network model from the flattened child net CHILD 2
connection_weights, bias_weights = rebuild_net(child_2, nn_shape)
child_net = make_nets(connection_weights, bias_weights, activation_functions)
child_nets.append(child_net)
return child_nets
def flatten_net(net):
#Extract Numpy arrays of connection and bias weights from model
layers=[layer.numpy() for layer in net.weights]
#Convert each array to 1 dimension along the x-axis
flat_layers=[np.reshape(layer,(-1,1)) for layer in layers]
#Collect all connection andn bias weights into a list
flattened_net=[]
for layer in flat_layers:
flattened_net.extend(layer)
#convert all values to floats
flattened_net=[float(weight) for weight in flattened_net]
return flattened_net
def rebuild_net(flattened_net,nn_shape):
'''
Takes a list of the connection and bias weights in 1D form:
List of all node connection weights for hidden layer 1
List of all bias weights for hidden layer 1
List of all node connedction weights for hidden layer 2
...
List of all node connection weights for output layer
List of all bias weights for output layer
Restructures the flattened_net into arrays where each node layer
has a 1D bias array and each connection layer has a 2D connection weight array
the shape of each bias array is (number_of_nodes_in_layer,1)
the shape of each connection weight array is (number_of_nodes_in_previous_layer,number_of_nodes_in_current_layer)
i.e.: for a model with 3 input, 1 hidden layer of 2 nodes, and 1 output:
connection_weights layer 1: 0.5, 0.7, -0.3, 0.4, 0.8, -0.6
bias_weights layer 1: 0, 0
connection_weights output layers: 0.8, -0.4
bias_weights output layer: 0
In : rebuild_net([0.5,0.7,-0.3,0.4,0.8,-0.6,0,0,0.8,-0.4,0])
Out: ( list of connection weight numpy arrays, list of bias weight numpy arrays )
( [[[0.5,0.7,-0.3],[0.4,0.8,-0.6]], [0.8,-0.4]], [[0,0], [0]] )
'''
connection_weights=[]
bias_weights=[]
start_idx=0
for idx in range(1,len(nn_shape)):
#Add a reshaped layer to the connection_weights list
end_idx=int(start_idx+nn_shape[idx-1]*nn_shape[idx])
connection_weights.append(np.reshape(flattened_net[start_idx:end_idx],(nn_shape[idx-1], nn_shape[idx])))
start_idx=end_idx
#Add reshaped bias weights
end_idx=int(start_idx+nn_shape[idx])
bias_weights.append(np.reshape(flattened_net[start_idx:end_idx],(nn_shape[idx],)))
start_idx=end_idx
return (connection_weights,bias_weights)
|
[
"numpy.sum",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.legend",
"numpy.reshape",
"tensorflow.keras.layers.Input",
"numpy.random.rand",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((5691, 5711), 'numpy.sum', 'np.sum', (['temp_fitness'], {}), '(temp_fitness)\n', (5697, 5711), True, 'import numpy as np\n'), ((1440, 1493), 'matplotlib.pyplot.plot', 'plt.plot', (['generations', 'best', '"""r-"""'], {'label': '"""Best"""', 'lw': '(2)'}), "(generations, best, 'r-', label='Best', lw=2)\n", (1448, 1493), True, 'import matplotlib.pyplot as plt\n'), ((1498, 1557), 'matplotlib.pyplot.plot', 'plt.plot', (['generations', 'average', '"""b-"""'], {'label': '"""Average"""', 'lw': '(2)'}), "(generations, average, 'b-', label='Average', lw=2)\n", (1506, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1562, 1624), 'matplotlib.pyplot.plot', 'plt.plot', (['generations', 'average_std_over', '"""g--"""'], {'label': '"""+1 STD"""'}), "(generations, average_std_over, 'g--', label='+1 STD')\n", (1570, 1624), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1693), 'matplotlib.pyplot.plot', 'plt.plot', (['generations', 'average_std_under', '"""g--"""'], {'label': '"""-1 STD"""'}), "(generations, average_std_under, 'g--', label='-1 STD')\n", (1638, 1693), True, 'import matplotlib.pyplot as plt\n'), ((1699, 1723), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generation"""'], {}), "('Generation')\n", (1709, 1723), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1753), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Fitness"""'], {}), "('Fitness')\n", (1742, 1753), True, 'import matplotlib.pyplot as plt\n'), ((1762, 1820), 'matplotlib.pyplot.legend', 'plt.legend', (["['Best', 'Average', '+1 STD', '-1 STD']"], {'loc': '(2)'}), "(['Best', 'Average', '+1 STD', '-1 STD'], loc=2)\n", (1772, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1868), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savefile + 'progress_plot.png')"], {}), "(savefile + 'progress_plot.png')\n", (1836, 1868), True, 'import matplotlib.pyplot as plt\n'), ((8491, 8517), 'numpy.reshape', 'np.reshape', (['layer', '(-1, 1)'], {}), '(layer, (-1, 1))\n', (8501, 8517), True, 'import numpy as np\n'), ((4699, 4751), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(connections[0].shape[0],)'}), '(shape=(connections[0].shape[0],))\n', (4717, 4751), False, 'from tensorflow import keras\n'), ((4833, 4893), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['c.shape[1]'], {'weights': '[c, b]', 'activation': 'a'}), '(c.shape[1], weights=[c, b], activation=a)\n', (4851, 4893), False, 'from tensorflow import keras\n'), ((10396, 10481), 'numpy.reshape', 'np.reshape', (['flattened_net[start_idx:end_idx]', '(nn_shape[idx - 1], nn_shape[idx])'], {}), '(flattened_net[start_idx:end_idx], (nn_shape[idx - 1], nn_shape[idx])\n )\n', (10406, 10481), True, 'import numpy as np\n'), ((10618, 10680), 'numpy.reshape', 'np.reshape', (['flattened_net[start_idx:end_idx]', '(nn_shape[idx],)'], {}), '(flattened_net[start_idx:end_idx], (nn_shape[idx],))\n', (10628, 10680), True, 'import numpy as np\n'), ((2739, 2755), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2753, 2755), True, 'import numpy as np\n')]
|
# Data-enriching GAN (DeGAN)/ DCGAN for retrieving images from a trained classifier
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.nn.functional as F
from tensorboardX import SummaryWriter
import numpy as np
from dcgan_model import Generator, Discriminator
#from gdppgan32 import Generator, Discriminator
from alexnet import AlexNet
from torch.autograd import Variable
writer = SummaryWriter()
# CUDA_VISIBLE_DEVICES=0 python dfgan.py --dataroot ../../../datasets --imageSize 32 --cuda --outf out_cifar --manualSeed 108 --niter 200 --batchSize 2048
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64, help='input batch size')
parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')
parser.add_argument('--randomcrop', type=int, default=32, help='the height / width of the input image patch to discriminator network')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.RandomCrop(opt.randomcrop),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
nc=3
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True, num_workers=int(opt.workers))
device = torch.device("cuda:0" if opt.cuda else "cpu")
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
#TODO
# Functions for random cropping in a batch
def randncrop(input_tensor, patch_size):
h = input_tensor.size(2)
w = input_tensor.size(3)
top = np.random.randint(0, h-patch_size)
left = np.random.randint(0, w - patch_size)
out_tensor = input_tensor[:,: , top:top+patch_size , left:left + patch_size]
return out_tensor
def compute_gdpp(phi_fake, phi_real):
def compute_diversity(phi):
phi = F.normalize(phi, p=2, dim=1)
S_B = torch.mm(phi, phi.t())
eig_vals, eig_vecs = torch.eig(S_B, eigenvectors=True)
return Variable(eig_vals[:, 0]), Variable(eig_vecs)
def normalize_min_max(eig_vals):
min_v, max_v = torch.min(eig_vals), torch.max(eig_vals)
return (eig_vals - min_v) / (max_v - min_v)
fake_eig_vals, fake_eig_vecs = compute_diversity(phi_fake)
real_eig_vals, real_eig_vecs = compute_diversity(phi_real)
# Scaling factor to make the two losses operating in comparable ranges.
magnitude_loss = 0.0001 * F.mse_loss(target=real_eig_vals, input=fake_eig_vals)
structure_loss = -torch.sum(torch.mul(fake_eig_vecs, real_eig_vecs), 0)
normalized_real_eig_vals = normalize_min_max(real_eig_vals)
weighted_structure_loss = torch.sum(torch.mul(normalized_real_eig_vals, structure_loss))
return magnitude_loss + weighted_structure_loss
netG = Generator(ngpu).to(device)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netC = AlexNet(ngpu).to(device)
netC.load_state_dict(torch.load('./best_model.pth'))
print(netC)
netC.eval()
netD = Discriminator(ngpu).to(device)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
criterion_sum = nn.BCELoss(reduction = 'sum')
fixed_noise = torch.randn(opt.batchSize, 100 ,1 , 1, device=device)
real_label = 1
fake_label = 0
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
threshold = []
for epoch in range(opt.niter):
num_greater_thresh = 0
count_class = [0]*10
count_class_less = [0]*10
count_class_hist = [0]*10
count_class_less_hist = [0]*10
classification_loss_sum = 0
errD_real_sum = 0
errD_fake_sum = 0
errD_sum = 0
errG_adv_sum = 0
data_size = 0
accD_real_sum = 0
accD_fake_sum = 0
accG_sum = 0
accD_sum = 0
div_loss_sum = 0
gdpp_loss_sum = 0
gdpp_check_sum = 0
for i, data in enumerate(dataloader, 0):
netD.zero_grad()
real_cpu = data[0].to(device)
batch_size = real_cpu.size(0)
#print(batch_size)
data_size = data_size + batch_size
label = torch.full((batch_size,), real_label, device=device)
output , h_real = netD(real_cpu)
output = output.view(output.size(0))
errD_real = criterion(output, label)
errD_real_sum = errD_real_sum + (criterion_sum(output,label)).cpu().data.numpy()
accD_real = (label[output>0.5]).shape[0]
accD_real_sum = accD_real_sum + float(accD_real)
errD_real.backward()
D_x = output.mean().item()
# train with fake
noise = torch.randn(batch_size, 100,1,1, device=device)
fake = netG(noise)
#TODO
#fake.size()= [batch_size, 3, 32, 32]
if opt.randomcrop == fake.size(2):
fake_patch = fake
else:
fake_patch = randncrop(fake, opt.randomcrop)
fake_class = netC(fake)
sm_fake_class = F.softmax(fake_class, dim=1)
class_max = fake_class.max(1,keepdim=True)[0]
class_argmax = fake_class.max(1,keepdim=True)[1]
# Classification loss
classification_loss = torch.mean(torch.sum(-sm_fake_class*torch.log(sm_fake_class+1e-5),dim=1))
classification_loss_add = torch.sum(-sm_fake_class*torch.log(sm_fake_class+1e-5))
classification_loss_sum = classification_loss_sum + (classification_loss_add).cpu().data.numpy()
sm_batch_mean = torch.mean(sm_fake_class,dim=0)
div_loss = torch.sum(sm_batch_mean*torch.log(sm_batch_mean)) # Maximize entropy across batch
div_loss_sum = div_loss_sum + div_loss*batch_size
label.fill_(fake_label)
#print(label.size())
output , h_fake = netD(fake_patch.detach())
output = output.view(output.size(0))
errD_fake = criterion(output, label)
errD_fake_sum = errD_fake_sum + (criterion_sum(output, label)).cpu().data.numpy()
accD_fake = (label[output<=0.5]).shape[0]
accD_fake_sum = accD_fake_sum + float(accD_fake)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
errD_sum = errD_real_sum + errD_fake_sum
accD = accD_real + accD_fake
accD_sum = accD_real_sum + accD_fake_sum
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
output, h_fake = netD(fake_patch)
gdpp_loss = compute_gdpp(h_fake, h_real)
gdpp_loss_sum += gdpp_loss
gdpp_check = compute_gdpp(h_real, h_real)
gdpp_check_sum = gdpp_check_sum + gdpp_check
output = output.view(output.size(0))
c_l = 0 # Hyperparameter to weigh entropy loss
d_l = 5 # Hyperparameter to weigh the diversity loss
g_l = 1
errG_adv = criterion(output, label)
errG_adv_sum = errG_adv_sum + (criterion_sum(output, label)).cpu().data.numpy()
accG = (label[output>0.5]).shape[0]
accG_sum = accG_sum + float(accG)
errG = errG_adv + c_l * classification_loss + d_l * div_loss + g_l*gdpp_loss
errG_sum = errG_adv_sum + c_l * classification_loss_sum + d_l * div_loss_sum + g_l* gdpp_loss_sum
errG.backward()
D_G_z2 = output.mean().item()
optimizerG.step()
print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
pred_class = F.softmax(fake_class,dim=1).max(1, keepdim=True)[0]
pred_class_argmax = F.softmax(fake_class,dim=1).max(1, keepdim=True)[1]
num_greater_thresh = num_greater_thresh + (torch.sum(pred_class > 0.9).cpu().data.numpy())
for argmax, val in zip(pred_class_argmax, pred_class):
if val > 0.9:
count_class_hist.append(argmax)
count_class[argmax] = count_class[argmax] + 1
else:
count_class_less_hist.append(argmax)
count_class_less[argmax] = count_class_less[argmax] + 1
if i % 100 == 0:
writer.add_image("Gen Imgs Training", (fake+1)/2, epoch)
# do checkpointing
if (epoch+1)%50 == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
# Generate fake samples for visualization
test_size = 1000
noise_test = torch.randn(test_size, 100, 1, 1, device=device)
fake_test = netG(noise_test)
fake_test_class = netC(fake_test)
pred_test_class_max = F.softmax(fake_test_class,dim=1).max(1, keepdim=True)[0]
pred_test_class_argmax = F.softmax(fake_test_class,dim=1).max(1, keepdim=True)[1]
for i in range(10):
print("Score>0.9: Class",i,":",torch.sum(((pred_test_class_argmax.view(test_size)==i) & (pred_test_class_max.view(test_size)>0.9)).float()))
print("Score<0.9: Class",i,":",torch.sum(((pred_test_class_argmax.view(test_size)==i) & (pred_test_class_max.view(test_size)<0.9)).float()))
if fake_test[pred_test_class_argmax.view(test_size)==0].shape[0] > 0:
writer.add_image("Gen Imgs Test: Airplane", (fake_test[pred_test_class_argmax.view(test_size)==0]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==1].shape[0] > 0:
writer.add_image("Gen Imgs Test: Automobile", (fake_test[pred_test_class_argmax.view(test_size)==1]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==2].shape[0] > 0:
writer.add_image("Gen Imgs Test: Bird", (fake_test[pred_test_class_argmax.view(test_size)==2]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==3].shape[0] > 0:
writer.add_image("Gen Imgs Test: Cat", (fake_test[pred_test_class_argmax.view(test_size)==3]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==4].shape[0] > 0:
writer.add_image("Gen Imgs Test: Deer", (fake_test[pred_test_class_argmax.view(test_size)==4]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==5].shape[0] > 0:
writer.add_image("Gen Imgs Test: Dog", (fake_test[pred_test_class_argmax.view(test_size)==5]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==6].shape[0] > 0:
writer.add_image("Gen Imgs Test: Frog", (fake_test[pred_test_class_argmax.view(test_size)==6]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==7].shape[0] > 0:
writer.add_image("Gen Imgs Test: Horse", (fake_test[pred_test_class_argmax.view(test_size)==7]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==8].shape[0] > 0:
writer.add_image("Gen Imgs Test: Ship", (fake_test[pred_test_class_argmax.view(test_size)==8]+1)/2, epoch)
if fake_test[pred_test_class_argmax.view(test_size)==9].shape[0] > 0:
writer.add_image("Gen Imgs Test: Truck", (fake_test[pred_test_class_argmax.view(test_size)==9]+1)/2, epoch)
print(count_class , "Above 0.9")
print(count_class_less, "Below 0.9")
writer.add_histogram("above 0.9", np.asarray(count_class), epoch, bins=10)
writer.add_histogram("above 0.9", np.asarray(count_class), epoch, bins=10)
threshold.append(num_greater_thresh)
writer.add_scalar("1 Train Discriminator accuracy(all)", accD_sum/ (2*data_size), epoch)
writer.add_scalar("2 Train Discriminator accuracy(fake)", accD_fake_sum/ data_size, epoch)
writer.add_scalar("3 Train Discriminator accuracy(real)", accD_real_sum/ data_size, epoch)
writer.add_scalar("4 Train Generator accuracy(fake)", accG_sum/ data_size, epoch)
writer.add_scalar("5 Train Discriminator loss (real)", errD_real_sum/ data_size, epoch)
writer.add_scalar("6 Train Discriminator loss (fake)", errD_fake_sum/ data_size, epoch)
writer.add_scalar("7 Train Discriminator loss (all)", errD_sum/(2* data_size), epoch)
writer.add_scalar("8 Train Generator loss (adv)", errG_adv_sum/ data_size, epoch)
writer.add_scalar("9 Train Generator loss (classification)", classification_loss_sum/ data_size, epoch)
writer.add_scalar("10 Train Generator loss (diversity)", div_loss_sum/ data_size, epoch)
writer.add_scalar("11 Train Generator loss (gdpploss)", gdpp_loss_sum/ data_size, epoch)
writer.add_scalar("12 Train Generator loss (gdppcheck)", gdpp_check_sum/ data_size, epoch)
writer.add_scalar("13 Train Generator loss (all)", errG_sum/ data_size, epoch)
writer.export_scalars_to_json("./all_scalars.json")
writer.close()
|
[
"argparse.ArgumentParser",
"dcgan_model.Generator",
"torch.randn",
"torch.full",
"numpy.random.randint",
"torch.device",
"torchvision.transforms.Normalize",
"torch.nn.functional.normalize",
"alexnet.AlexNet",
"torch.nn.BCELoss",
"random.randint",
"torchvision.transforms.Scale",
"torch.load",
"random.seed",
"torch.log",
"torch.mean",
"dcgan_model.Discriminator",
"torch.manual_seed",
"numpy.asarray",
"torch.nn.functional.mse_loss",
"torch.autograd.Variable",
"torch.mul",
"torch.cuda.is_available",
"torch.max",
"torch.sum",
"torch.min",
"torchvision.transforms.RandomCrop",
"tensorboardX.SummaryWriter",
"os.makedirs",
"torch.nn.functional.softmax",
"torch.eig",
"torchvision.transforms.ToTensor"
] |
[((652, 667), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (665, 667), False, 'from tensorboardX import SummaryWriter\n'), ((870, 895), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (893, 895), False, 'import argparse\n'), ((2663, 2690), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2674, 2690), False, 'import random\n'), ((2695, 2728), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (2712, 2728), False, 'import torch\n'), ((3545, 3590), 'torch.device', 'torch.device', (["('cuda:0' if opt.cuda else 'cpu')"], {}), "('cuda:0' if opt.cuda else 'cpu')\n", (3557, 3590), False, 'import torch\n'), ((5955, 5967), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (5965, 5967), True, 'import torch.nn as nn\n'), ((5988, 6015), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (5998, 6015), True, 'import torch.nn as nn\n'), ((6037, 6089), 'torch.randn', 'torch.randn', (['opt.batchSize', '(100)', '(1)', '(1)'], {'device': 'device'}), '(opt.batchSize, 100, 1, 1, device=device)\n', (6048, 6089), False, 'import torch\n'), ((2479, 2500), 'os.makedirs', 'os.makedirs', (['opt.outf'], {}), '(opt.outf)\n', (2490, 2500), False, 'import os\n'), ((2591, 2615), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2605, 2615), False, 'import random\n'), ((2765, 2790), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2788, 2790), False, 'import torch\n'), ((4205, 4241), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - patch_size)'], {}), '(0, h - patch_size)\n', (4222, 4241), True, 'import numpy as np\n'), ((4255, 4291), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - patch_size)'], {}), '(0, w - patch_size)\n', (4272, 4291), True, 'import numpy as np\n'), ((5712, 5742), 'torch.load', 'torch.load', (['"""./best_model.pth"""'], {}), "('./best_model.pth')\n", (5722, 5742), False, 'import torch\n'), ((12105, 12153), 'torch.randn', 'torch.randn', (['test_size', '(100)', '(1)', '(1)'], {'device': 'device'}), '(test_size, 100, 1, 1, device=device)\n', (12116, 12153), False, 'import torch\n'), ((4517, 4545), 'torch.nn.functional.normalize', 'F.normalize', (['phi'], {'p': '(2)', 'dim': '(1)'}), '(phi, p=2, dim=1)\n', (4528, 4545), True, 'import torch.nn.functional as F\n'), ((4620, 4653), 'torch.eig', 'torch.eig', (['S_B'], {'eigenvectors': '(True)'}), '(S_B, eigenvectors=True)\n', (4629, 4653), False, 'import torch\n'), ((5133, 5186), 'torch.nn.functional.mse_loss', 'F.mse_loss', ([], {'target': 'real_eig_vals', 'input': 'fake_eig_vals'}), '(target=real_eig_vals, input=fake_eig_vals)\n', (5143, 5186), True, 'import torch.nn.functional as F\n'), ((5379, 5430), 'torch.mul', 'torch.mul', (['normalized_real_eig_vals', 'structure_loss'], {}), '(normalized_real_eig_vals, structure_loss)\n', (5388, 5430), False, 'import torch\n'), ((5500, 5515), 'dcgan_model.Generator', 'Generator', (['ngpu'], {}), '(ngpu)\n', (5509, 5515), False, 'from dcgan_model import Generator, Discriminator\n'), ((5608, 5628), 'torch.load', 'torch.load', (['opt.netG'], {}), '(opt.netG)\n', (5618, 5628), False, 'import torch\n'), ((5662, 5675), 'alexnet.AlexNet', 'AlexNet', (['ngpu'], {}), '(ngpu)\n', (5669, 5675), False, 'from alexnet import AlexNet\n'), ((5788, 5807), 'dcgan_model.Discriminator', 'Discriminator', (['ngpu'], {}), '(ngpu)\n', (5801, 5807), False, 'from dcgan_model import Generator, Discriminator\n'), ((5900, 5920), 'torch.load', 'torch.load', (['opt.netD'], {}), '(opt.netD)\n', (5910, 5920), False, 'import torch\n'), ((7154, 7206), 'torch.full', 'torch.full', (['(batch_size,)', 'real_label'], {'device': 'device'}), '((batch_size,), real_label, device=device)\n', (7164, 7206), False, 'import torch\n'), ((7708, 7757), 'torch.randn', 'torch.randn', (['batch_size', '(100)', '(1)', '(1)'], {'device': 'device'}), '(batch_size, 100, 1, 1, device=device)\n', (7719, 7757), False, 'import torch\n'), ((8084, 8112), 'torch.nn.functional.softmax', 'F.softmax', (['fake_class'], {'dim': '(1)'}), '(fake_class, dim=1)\n', (8093, 8112), True, 'import torch.nn.functional as F\n'), ((8645, 8677), 'torch.mean', 'torch.mean', (['sm_fake_class'], {'dim': '(0)'}), '(sm_fake_class, dim=0)\n', (8655, 8677), False, 'import torch\n'), ((14868, 14891), 'numpy.asarray', 'np.asarray', (['count_class'], {}), '(count_class)\n', (14878, 14891), True, 'import numpy as np\n'), ((14951, 14974), 'numpy.asarray', 'np.asarray', (['count_class'], {}), '(count_class)\n', (14961, 14974), True, 'import numpy as np\n'), ((4673, 4697), 'torch.autograd.Variable', 'Variable', (['eig_vals[:, 0]'], {}), '(eig_vals[:, 0])\n', (4681, 4697), False, 'from torch.autograd import Variable\n'), ((4699, 4717), 'torch.autograd.Variable', 'Variable', (['eig_vecs'], {}), '(eig_vecs)\n', (4707, 4717), False, 'from torch.autograd import Variable\n'), ((4787, 4806), 'torch.min', 'torch.min', (['eig_vals'], {}), '(eig_vals)\n', (4796, 4806), False, 'import torch\n'), ((4808, 4827), 'torch.max', 'torch.max', (['eig_vals'], {}), '(eig_vals)\n', (4817, 4827), False, 'import torch\n'), ((5223, 5262), 'torch.mul', 'torch.mul', (['fake_eig_vecs', 'real_eig_vecs'], {}), '(fake_eig_vecs, real_eig_vecs)\n', (5232, 5262), False, 'import torch\n'), ((3050, 3081), 'torchvision.transforms.Scale', 'transforms.Scale', (['opt.imageSize'], {}), '(opt.imageSize)\n', (3066, 3081), True, 'import torchvision.transforms as transforms\n'), ((3114, 3151), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['opt.randomcrop'], {}), '(opt.randomcrop)\n', (3135, 3151), True, 'import torchvision.transforms as transforms\n'), ((3184, 3205), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3203, 3205), True, 'import torchvision.transforms as transforms\n'), ((3238, 3292), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (3258, 3292), True, 'import torchvision.transforms as transforms\n'), ((8463, 8495), 'torch.log', 'torch.log', (['(sm_fake_class + 1e-05)'], {}), '(sm_fake_class + 1e-05)\n', (8472, 8495), False, 'import torch\n'), ((8724, 8748), 'torch.log', 'torch.log', (['sm_batch_mean'], {}), '(sm_batch_mean)\n', (8733, 8748), False, 'import torch\n'), ((12263, 12296), 'torch.nn.functional.softmax', 'F.softmax', (['fake_test_class'], {'dim': '(1)'}), '(fake_test_class, dim=1)\n', (12272, 12296), True, 'import torch.nn.functional as F\n'), ((12353, 12386), 'torch.nn.functional.softmax', 'F.softmax', (['fake_test_class'], {'dim': '(1)'}), '(fake_test_class, dim=1)\n', (12362, 12386), True, 'import torch.nn.functional as F\n'), ((8362, 8394), 'torch.log', 'torch.log', (['(sm_fake_class + 1e-05)'], {}), '(sm_fake_class + 1e-05)\n', (8371, 8394), False, 'import torch\n'), ((11051, 11079), 'torch.nn.functional.softmax', 'F.softmax', (['fake_class'], {'dim': '(1)'}), '(fake_class, dim=1)\n', (11060, 11079), True, 'import torch.nn.functional as F\n'), ((11135, 11163), 'torch.nn.functional.softmax', 'F.softmax', (['fake_class'], {'dim': '(1)'}), '(fake_class, dim=1)\n', (11144, 11163), True, 'import torch.nn.functional as F\n'), ((11242, 11269), 'torch.sum', 'torch.sum', (['(pred_class > 0.9)'], {}), '(pred_class > 0.9)\n', (11251, 11269), False, 'import torch\n')]
|
from tensorflow.keras.preprocessing import image
import numpy as np
from .augment_and_mix import augment_and_mix
import albumentations
def segmentation_alb(input_image, label, mean, std, augmentation_dict):
transforms = get_aug(augmentation_dict)
if len(transforms) > 0:
aug = albumentations.Compose(transforms, p=1)
augmented = aug(image=input_image, mask=label)
return augmented['image'], augmented["mask"]
else:
return input_image, label
def get_aug(augmentation_dict):
transforms = list()
for aug_command, aug_param in augmentation_dict.items():
if aug_command.startswith("OneOf"):
augs = get_aug(aug_param)
augmentation = albumentations.OneOf(augs, aug_param['p'])
transforms.append(augmentation)
elif aug_command == 'p':
continue
else:
if aug_param is None:
augmentation = getattr(albumentations, aug_command)()
else:
aug_list = sorted(aug_param.items(), key=lambda x: x[0])
new_param = dict()
for k, v in aug_list:
if "-" in k:
tuple_name, tuple_id = k.split("-")
if int(tuple_id) == 1:
new_param[tuple_name] = (v,)
else:
new_param[tuple_name] += (v,)
else:
new_param[k] = v
augmentation = getattr(
albumentations, aug_command)(**new_param)
transforms.append(augmentation)
return transforms
def segmentation_aug(input_image, label, mean, std, augmentation_dict):
"""apply augmentation to one image respectively
"""
# For Keras ImageDataGenerator
data_gen_args = dict()
data_gen_args["fill_mode"] = "constant" # cvalの値で埋める
data_gen_args["cval"] = 0 # 黒で埋める
# (H,W[,C]) => (N,H,W,C)
input_image = input_image[np.newaxis]
label = label[np.newaxis, ..., np.newaxis]
image_datagen = image.ImageDataGenerator(**data_gen_args)
mask_datagen = image.ImageDataGenerator(**data_gen_args)
seed = np.random.randint(100)
image_datagen.fit(input_image, augment=True, seed=seed)
mask_datagen.fit(label, augment=True, seed=seed)
image_gen = image_datagen.flow(input_image, batch_size=1, seed=seed)
mask_gen = mask_datagen.flow(label, batch_size=1, seed=seed)
# combine generators into one which yields image and masks
gen = zip(image_gen, mask_gen)
img_batches, mask_batches = next(gen)
input_image_processed = img_batches.squeeze() # batch次元を捨てる
label_processed = mask_batches.squeeze() # batchとchannel次元を捨てる
# Not Keras ImageDataGenerator
if "augmix" in augmentation_dict and augmentation_dict["augmix"] is True:
"""AugMix: to Improve Robustness and Uncertainty
AugMixは最後に行う
TODO: ひとまずハードラベル
Affine変換系が施されたらソフトラベルにした方がいい?
"""
input_image_processed = augment_and_mix(
input_image_processed,
mean, std,
)
return input_image_processed, label_processed
|
[
"albumentations.Compose",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.random.randint",
"albumentations.OneOf"
] |
[((2096, 2137), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {}), '(**data_gen_args)\n', (2120, 2137), False, 'from tensorflow.keras.preprocessing import image\n'), ((2157, 2198), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'image.ImageDataGenerator', ([], {}), '(**data_gen_args)\n', (2181, 2198), False, 'from tensorflow.keras.preprocessing import image\n'), ((2211, 2233), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (2228, 2233), True, 'import numpy as np\n'), ((296, 335), 'albumentations.Compose', 'albumentations.Compose', (['transforms'], {'p': '(1)'}), '(transforms, p=1)\n', (318, 335), False, 'import albumentations\n'), ((717, 759), 'albumentations.OneOf', 'albumentations.OneOf', (['augs', "aug_param['p']"], {}), "(augs, aug_param['p'])\n", (737, 759), False, 'import albumentations\n')]
|
import os
import cv2
import gym
import torch
import random
import numpy as np
from six import iteritems
from datetime import datetime
def seed(seed):
torch.cuda.manual_seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def evaluate_policy(env, policy, eval_episodes=10, max_timesteps=500, ppo_agent=False):
avg_reward = 0.
for _ in range(eval_episodes):
obs = env.reset()
done = False
step = 0
while not done and step < max_timesteps:
if ppo_agent:
_, _, action = policy.predict(np.array(obs), is_training=False)
else:
action = policy.predict(np.array(obs), is_training=False)
obs, reward, done, _ = env.step(action)
avg_reward += reward
step += 1
avg_reward /= eval_episodes
return avg_reward
def evaluate_atari_policy(env, policy, eval_episodes=10, ppo_agent=False):
avg_reward = 0.
for _ in range(eval_episodes):
obs = env.reset()
done = False
while not done:
if ppo_agent:
_, action = policy.predict(np.array(obs), is_training=False)
else:
action = policy.predict(np.array(obs), is_training=False)
obs, reward, done, _ = env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
return avg_reward
# show the mask
def show_mask(obs, mask, env_name="duckietown"):
if env_name == "duckietown":
obs = np.uint8(255*obs).transpose(1, 2, 0)
obs = obs[:, :, ::-1]
elif env_name == "atari":
# for gray image
# obs = cv2.cvtColor(np.uint8(255*obs), cv2.COLOR_GRAY2BGR)
# for rgb image
obs = cv2.resize(obs, (84, 84), interpolation=cv2.INTER_AREA)
obs = np.array(obs*255, dtype=np.uint8)
else:
raise NotImplementedError
mask = np.tile(np.expand_dims(mask, axis=-1), (1,1,3))
masked_obs = np.uint8(np.multiply(obs, mask))
heatmap = cv2.applyColorMap(masked_obs, cv2.COLORMAP_JET)
heatmap = cv2.addWeighted(obs, 0.5, heatmap, 0.5, 0)
return heatmap, masked_obs
def get_dirs(dir_type):
# current_time = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
model_dir = './logs/{}/model'.format(dir_type)
data_dir = './logs/{}/data'.format(dir_type)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Model directory: %s' % model_dir)
print('Data directory: %s' % data_dir)
return model_dir, data_dir
def write_arguments(args, filename):
with open(filename, 'w') as f:
for key, value in iteritems(vars(args)):
f.write('%s: %s\n' % (key, str(value)))
|
[
"numpy.uint8",
"numpy.random.seed",
"numpy.multiply",
"os.makedirs",
"torch.manual_seed",
"torch.cuda.manual_seed",
"numpy.expand_dims",
"os.path.exists",
"cv2.addWeighted",
"random.seed",
"numpy.array",
"cv2.applyColorMap",
"cv2.resize"
] |
[((156, 184), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (178, 184), False, 'import torch\n'), ((189, 212), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (206, 212), False, 'import torch\n'), ((217, 237), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (231, 237), True, 'import numpy as np\n'), ((242, 259), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (253, 259), False, 'import random\n'), ((2035, 2082), 'cv2.applyColorMap', 'cv2.applyColorMap', (['masked_obs', 'cv2.COLORMAP_JET'], {}), '(masked_obs, cv2.COLORMAP_JET)\n', (2052, 2082), False, 'import cv2\n'), ((2097, 2139), 'cv2.addWeighted', 'cv2.addWeighted', (['obs', '(0.5)', 'heatmap', '(0.5)', '(0)'], {}), '(obs, 0.5, heatmap, 0.5, 0)\n', (2112, 2139), False, 'import cv2\n'), ((1926, 1955), 'numpy.expand_dims', 'np.expand_dims', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (1940, 1955), True, 'import numpy as np\n'), ((1992, 2014), 'numpy.multiply', 'np.multiply', (['obs', 'mask'], {}), '(obs, mask)\n', (2003, 2014), True, 'import numpy as np\n'), ((2385, 2410), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (2399, 2410), False, 'import os\n'), ((2420, 2442), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (2431, 2442), False, 'import os\n'), ((2454, 2478), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2468, 2478), False, 'import os\n'), ((2488, 2509), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (2499, 2509), False, 'import os\n'), ((1758, 1813), 'cv2.resize', 'cv2.resize', (['obs', '(84, 84)'], {'interpolation': 'cv2.INTER_AREA'}), '(obs, (84, 84), interpolation=cv2.INTER_AREA)\n', (1768, 1813), False, 'import cv2\n'), ((1828, 1863), 'numpy.array', 'np.array', (['(obs * 255)'], {'dtype': 'np.uint8'}), '(obs * 255, dtype=np.uint8)\n', (1836, 1863), True, 'import numpy as np\n'), ((1530, 1549), 'numpy.uint8', 'np.uint8', (['(255 * obs)'], {}), '(255 * obs)\n', (1538, 1549), True, 'import numpy as np\n'), ((590, 603), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (598, 603), True, 'import numpy as np\n'), ((682, 695), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (690, 695), True, 'import numpy as np\n'), ((1151, 1164), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1159, 1164), True, 'import numpy as np\n'), ((1243, 1256), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (1251, 1256), True, 'import numpy as np\n')]
|
from unittest import mock
import chainer
import numpy as np
import pytest
from deep_sentinel.models.dnn.model.layers import mid
chainer.global_config.train = False
chainer.global_config.enable_backprop = False
@pytest.fixture
def activate_func():
m = mock.MagicMock()
m.side_effect = lambda x: x
return m
@pytest.fixture
def dropout_func():
m = mock.MagicMock()
m.side_effect = lambda x: x
return m
@pytest.mark.parametrize(
"data, n_units", [
(
# Batch=2, window=2, feature=2
[[[0, 0], [0, 0]], [[0, 0], [0, 0]]],
5,
),
(
# Batch=2, window=4, feature=1
[[[0], [0], [0], [0]], [[0], [0], [0], [0]]],
4,
),
(
# Batch=1, window=2, feature=3
[[[0, 0, 0], [0, 0, 0]]],
3,
),
]
)
def test_mid_layer(data, n_units, activate_func, dropout_func):
given = chainer.Variable(np.array(data).astype(np.float32))
b, w, f = given.shape
hidden = chainer.Variable(
np.arange(b * w * n_units)
.reshape((b, w, n_units))
.astype(np.float32)
)
mid_layer = mid.MidLayer(n_units, dropout_func, activate_func, f)
actual = mid_layer(given, hidden)
assert actual.shape == (b, w, n_units)
assert activate_func.call_count == 1
assert dropout_func.call_count == 1
|
[
"unittest.mock.MagicMock",
"deep_sentinel.models.dnn.model.layers.mid.MidLayer",
"numpy.arange",
"numpy.array",
"pytest.mark.parametrize"
] |
[((433, 610), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data, n_units"""', '[([[[0, 0], [0, 0]], [[0, 0], [0, 0]]], 5), ([[[0], [0], [0], [0]], [[0], [\n 0], [0], [0]]], 4), ([[[0, 0, 0], [0, 0, 0]]], 3)]'], {}), "('data, n_units', [([[[0, 0], [0, 0]], [[0, 0], [0, \n 0]]], 5), ([[[0], [0], [0], [0]], [[0], [0], [0], [0]]], 4), ([[[0, 0, \n 0], [0, 0, 0]]], 3)])\n", (456, 610), False, 'import pytest\n'), ((260, 276), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (274, 276), False, 'from unittest import mock\n'), ((368, 384), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (382, 384), False, 'from unittest import mock\n'), ((1220, 1273), 'deep_sentinel.models.dnn.model.layers.mid.MidLayer', 'mid.MidLayer', (['n_units', 'dropout_func', 'activate_func', 'f'], {}), '(n_units, dropout_func, activate_func, f)\n', (1232, 1273), False, 'from deep_sentinel.models.dnn.model.layers import mid\n'), ((1001, 1015), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1009, 1015), True, 'import numpy as np\n'), ((1101, 1127), 'numpy.arange', 'np.arange', (['(b * w * n_units)'], {}), '(b * w * n_units)\n', (1110, 1127), True, 'import numpy as np\n')]
|
#!/usr/bin python3
""" Stats functions for the GUI """
import time
import os
import warnings
from math import ceil, sqrt
import numpy as np
from lib.Serializer import PickleSerializer
class SavedSessions(object):
""" Saved Training Session """
def __init__(self, sessions_data):
self.serializer = PickleSerializer
self.sessions = self.load_sessions(sessions_data)
def load_sessions(self, filename):
""" Load previously saved sessions """
stats = list()
if os.path.isfile(filename):
with open(filename, self.serializer.roptions) as sessions:
stats = self.serializer.unmarshal(sessions.read())
return stats
def save_sessions(self, filename):
""" Save the session file """
with open(filename, self.serializer.woptions) as session:
session.write(self.serializer.marshal(self.sessions))
print("Saved session stats to: {}".format(filename))
class CurrentSession(object):
""" The current training session """
def __init__(self):
self.stats = {"iterations": 0,
"batchsize": None, # Set and reset by wrapper
"timestamps": [],
"loss": [],
"losskeys": []}
self.timestats = {"start": None,
"elapsed": None}
self.modeldir = None # Set and reset by wrapper
self.filename = None
self.historical = None
def initialise_session(self, currentloss):
""" Initialise the training session """
self.load_historical()
for item in currentloss:
self.stats["losskeys"].append(item[0])
self.stats["loss"].append(list())
self.timestats["start"] = time.time()
def load_historical(self):
""" Load historical data and add current session to the end """
self.filename = os.path.join(self.modeldir, "trainingstats.fss")
self.historical = SavedSessions(self.filename)
self.historical.sessions.append(self.stats)
def add_loss(self, currentloss):
""" Add a loss item from the training process """
if self.stats["iterations"] == 0:
self.initialise_session(currentloss)
self.stats["iterations"] += 1
self.add_timestats()
for idx, item in enumerate(currentloss):
self.stats["loss"][idx].append(float(item[1]))
def add_timestats(self):
""" Add timestats to loss dict and timestats """
now = time.time()
self.stats["timestamps"].append(now)
elapsed_time = now - self.timestats["start"]
self.timestats["elapsed"] = time.strftime("%H:%M:%S",
time.gmtime(elapsed_time))
def save_session(self):
""" Save the session file to the modeldir """
if self.stats["iterations"] > 0:
print("Saving session stats...")
self.historical.save_sessions(self.filename)
class SessionsTotals(object):
""" The compiled totals of all saved sessions """
def __init__(self, all_sessions):
self.stats = {"split": [],
"iterations": 0,
"batchsize": [],
"timestamps": [],
"loss": [],
"losskeys": []}
self.initiate(all_sessions)
self.compile(all_sessions)
def initiate(self, sessions):
""" Initiate correct losskey titles and number of loss lists """
for losskey in sessions[0]["losskeys"]:
self.stats["losskeys"].append(losskey)
self.stats["loss"].append(list())
def compile(self, sessions):
""" Compile all of the sessions into totals """
current_split = 0
for session in sessions:
iterations = session["iterations"]
current_split += iterations
self.stats["split"].append(current_split)
self.stats["iterations"] += iterations
self.stats["timestamps"].extend(session["timestamps"])
self.stats["batchsize"].append(session["batchsize"])
self.add_loss(session["loss"])
def add_loss(self, session_loss):
""" Add loss vals to each of their respective lists """
for idx, loss in enumerate(session_loss):
self.stats["loss"][idx].extend(loss)
class SessionsSummary(object):
""" Calculations for analysis summary stats """
def __init__(self, raw_data):
self.summary = list()
self.summary_stats_compile(raw_data)
def summary_stats_compile(self, raw_data):
""" Compile summary stats """
raw_summaries = list()
for idx, session in enumerate(raw_data):
raw_summaries.append(self.summarise_session(idx, session))
totals_summary = self.summarise_totals(raw_summaries)
raw_summaries.append(totals_summary)
self.format_summaries(raw_summaries)
# Compile Session Summaries
@staticmethod
def summarise_session(idx, session):
""" Compile stats for session passed in """
starttime = session["timestamps"][0]
endtime = session["timestamps"][-1]
elapsed = endtime - starttime
# Bump elapsed to 0.1s if no time is recorded
# to hack around div by zero error
elapsed = 0.1 if elapsed == 0 else elapsed
rate = (session["batchsize"] * session["iterations"]) / elapsed
return {"session": idx + 1,
"start": starttime,
"end": endtime,
"elapsed": elapsed,
"rate": rate,
"batch": session["batchsize"],
"iterations": session["iterations"]}
@staticmethod
def summarise_totals(raw_summaries):
""" Compile the stats for all sessions combined """
elapsed = 0
rate = 0
batchset = set()
iterations = 0
total_summaries = len(raw_summaries)
for idx, summary in enumerate(raw_summaries):
if idx == 0:
starttime = summary["start"]
if idx == total_summaries - 1:
endtime = summary["end"]
elapsed += summary["elapsed"]
rate += summary["rate"]
batchset.add(summary["batch"])
iterations += summary["iterations"]
batch = ",".join(str(bs) for bs in batchset)
return {"session": "Total",
"start": starttime,
"end": endtime,
"elapsed": elapsed,
"rate": rate / total_summaries,
"batch": batch,
"iterations": iterations}
def format_summaries(self, raw_summaries):
""" Format the summaries nicely for display """
for summary in raw_summaries:
summary["start"] = time.strftime("%x %X",
time.gmtime(summary["start"]))
summary["end"] = time.strftime("%x %X",
time.gmtime(summary["end"]))
summary["elapsed"] = time.strftime("%H:%M:%S",
time.gmtime(summary["elapsed"]))
summary["rate"] = "{0:.1f}".format(summary["rate"])
self.summary = raw_summaries
class Calculations(object):
""" Class to hold calculations against raw session data """
def __init__(self,
session,
display="loss",
selections=["raw"],
avg_samples=10,
flatten_outliers=False,
is_totals=False):
warnings.simplefilter("ignore", np.RankWarning)
self.session = session
if display.lower() == "loss":
display = self.session["losskeys"]
else:
display = [display]
self.args = {"display": display,
"selections": selections,
"avg_samples": int(avg_samples),
"flatten_outliers": flatten_outliers,
"is_totals": is_totals}
self.iterations = 0
self.stats = None
self.refresh()
def refresh(self):
""" Refresh the stats """
self.iterations = 0
self.stats = self.get_raw()
self.get_calculations()
self.remove_raw()
def get_raw(self):
""" Add raw data to stats dict """
raw = dict()
for idx, item in enumerate(self.args["display"]):
if item.lower() == "rate":
data = self.calc_rate(self.session)
else:
data = self.session["loss"][idx][:]
if self.args["flatten_outliers"]:
data = self.flatten_outliers(data)
if self.iterations == 0:
self.iterations = len(data)
raw["raw_{}".format(item)] = data
return raw
def remove_raw(self):
""" Remove raw values from stats if not requested """
if "raw" in self.args["selections"]:
return
for key in list(self.stats.keys()):
if key.startswith("raw"):
del self.stats[key]
def calc_rate(self, data):
""" Calculate rate per iteration
NB: For totals, gaps between sessions can be large
so time diffeence has to be reset for each session's
rate calculation """
batchsize = data["batchsize"]
if self.args["is_totals"]:
split = data["split"]
else:
batchsize = [batchsize]
split = [len(data["timestamps"])]
prev_split = 0
rate = list()
for idx, current_split in enumerate(split):
prev_time = data["timestamps"][prev_split]
timestamp_chunk = data["timestamps"][prev_split:current_split]
for item in timestamp_chunk:
current_time = item
timediff = current_time - prev_time
iter_rate = 0 if timediff == 0 else batchsize[idx] / timediff
rate.append(iter_rate)
prev_time = current_time
prev_split = current_split
if self.args["flatten_outliers"]:
rate = self.flatten_outliers(rate)
return rate
@staticmethod
def flatten_outliers(data):
""" Remove the outliers from a provided list """
retdata = list()
samples = len(data)
mean = (sum(data) / samples)
limit = sqrt(sum([(item - mean)**2 for item in data]) / samples)
for item in data:
if (mean - limit) <= item <= (mean + limit):
retdata.append(item)
else:
retdata.append(mean)
return retdata
def get_calculations(self):
""" Perform the required calculations """
for selection in self.get_selections():
if selection[0] == "raw":
continue
method = getattr(self, "calc_{}".format(selection[0]))
key = "{}_{}".format(selection[0], selection[1])
raw = self.stats["raw_{}".format(selection[1])]
self.stats[key] = method(raw)
def get_selections(self):
""" Compile a list of data to be calculated """
for summary in self.args["selections"]:
for item in self.args["display"]:
yield summary, item
def calc_avg(self, data):
""" Calculate rolling average """
avgs = list()
presample = ceil(self.args["avg_samples"] / 2)
postsample = self.args["avg_samples"] - presample
datapoints = len(data)
if datapoints <= (self.args["avg_samples"] * 2):
print("Not enough data to compile rolling average")
return avgs
for idx in range(0, datapoints):
if idx < presample or idx >= datapoints - postsample:
avgs.append(None)
continue
else:
avg = sum(data[idx - presample:idx + postsample]) \
/ self.args["avg_samples"]
avgs.append(avg)
return avgs
@staticmethod
def calc_trend(data):
""" Compile trend data """
points = len(data)
if points < 10:
dummy = [None for i in range(points)]
return dummy
x_range = range(points)
fit = np.polyfit(x_range, data, 3)
poly = np.poly1d(fit)
trend = poly(x_range)
return trend
|
[
"numpy.poly1d",
"warnings.simplefilter",
"math.ceil",
"numpy.polyfit",
"time.gmtime",
"time.time",
"os.path.isfile",
"os.path.join"
] |
[((515, 539), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (529, 539), False, 'import os\n'), ((1785, 1796), 'time.time', 'time.time', ([], {}), '()\n', (1794, 1796), False, 'import time\n'), ((1925, 1973), 'os.path.join', 'os.path.join', (['self.modeldir', '"""trainingstats.fss"""'], {}), "(self.modeldir, 'trainingstats.fss')\n", (1937, 1973), False, 'import os\n'), ((2546, 2557), 'time.time', 'time.time', ([], {}), '()\n', (2555, 2557), False, 'import time\n'), ((7670, 7717), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'np.RankWarning'], {}), "('ignore', np.RankWarning)\n", (7691, 7717), False, 'import warnings\n'), ((11536, 11570), 'math.ceil', 'ceil', (["(self.args['avg_samples'] / 2)"], {}), "(self.args['avg_samples'] / 2)\n", (11540, 11570), False, 'from math import ceil, sqrt\n'), ((12415, 12443), 'numpy.polyfit', 'np.polyfit', (['x_range', 'data', '(3)'], {}), '(x_range, data, 3)\n', (12425, 12443), True, 'import numpy as np\n'), ((12459, 12473), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (12468, 12473), True, 'import numpy as np\n'), ((2768, 2793), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (2779, 2793), False, 'import time\n'), ((6944, 6973), 'time.gmtime', 'time.gmtime', (["summary['start']"], {}), "(summary['start'])\n", (6955, 6973), False, 'import time\n'), ((7070, 7097), 'time.gmtime', 'time.gmtime', (["summary['end']"], {}), "(summary['end'])\n", (7081, 7097), False, 'import time\n'), ((7205, 7236), 'time.gmtime', 'time.gmtime', (["summary['elapsed']"], {}), "(summary['elapsed'])\n", (7216, 7236), False, 'import time\n')]
|
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
def images_from_samples(samples, dimensions=(5, 5), epoch=None, save=True):
# Remove channel dimension if present
if samples.ndim > 3 and samples.shape[-1] == 1:
samples = samples.squeeze(axis=3)
fig = plt.figure(figsize=dimensions)
for i in range(samples.shape[0]):
plt.subplot(dimensions[0], dimensions[1], i+1)
plt.imshow(samples[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
# Approximate top-center for title text
epoch and fig.text(0.42, 0.93, 'Epoch {}'.format(epoch), fontsize=12)
if save:
plt.savefig('output/images/generated-{}.png'.format(epoch))
plt.savefig('output/images/generated-latest.png')
plt.show()
def create_summary_helper(sess, output_path):
with tf.name_scope('generator'):
generator_loss_history = tf.placeholder(
tf.float32,
[ None ],
name='loss_history_placeholder'
)
generator_mean_loss = tf.reduce_mean(
generator_loss_history,
name='mean_loss_placeholder'
)
generator_summary = tf.summary.merge([
tf.summary.scalar('loss', generator_loss_history[-1]),
tf.summary.scalar('mean_loss', generator_mean_loss),
tf.summary.histogram('loss_history', generator_loss_history)
])
with tf.name_scope('discriminator'):
discriminator_loss_history = tf.placeholder(
tf.float32,
[ None ],
name='loss_history_placeholder'
)
discriminator_mean_loss = tf.reduce_mean(
discriminator_loss_history,
name='mean_loss_placeholder'
)
discriminator_summary = tf.summary.merge([
tf.summary.scalar('loss', discriminator_loss_history[-1]),
tf.summary.scalar('mean_loss', discriminator_mean_loss),
tf.summary.histogram('loss_history', discriminator_loss_history)
])
g_writer = tf.summary.FileWriter(
output_path + '/generator',
sess.graph
)
d_writer = tf.summary.FileWriter(
output_path + '/discriminator',
#sess.graph
)
def add_summaries(epoch, accumulate_losses):
g_writer.add_summary(sess.run(
generator_summary,
feed_dict={
generator_loss_history: accumulate_losses.T[0]
}),
epoch
)
d_writer.add_summary(sess.run(
discriminator_summary,
feed_dict={
discriminator_loss_history: accumulate_losses.T[1]
}),
epoch
)
return add_summaries
def create_train_helper(
sample_count=25,
sample_nth=10,
sample_save=True,
summaries=True,
**summary_args):
# Summary helper for Tensorboard
add_summary = lambda *a: None
if summaries:
add_summary = create_summary_helper(**summary_args)
def train_helper(epoch, state):
sess, losses, (generator_input, generator_output, noise_sampler) = state
# NOTE: Feel free to plot losses, or use Tensorboard with summaries
# losses
# Predefined noise vector for comparison
if train_helper.noise is None:
train_helper.noise = noise_sampler(sample_count)
# Generate some samples and save as images
if epoch == 1 or epoch % sample_nth == 0:
print('Info: Generating sample images...')
grid_size = int(np.sqrt(sample_count))
images_from_samples(
epoch=epoch,
save=sample_save,
dimensions=(grid_size, grid_size),
samples=sess.run(generator_output, feed_dict={
generator_input: train_helper.noise
})
)
add_summary(epoch, losses)
print('Training: epoch {} losses => generator={:.6f}, discriminator={:.6f}'.format(
epoch,
losses.T[0][-1],
losses.T[1][-1]
))
train_helper.noise = None
return train_helper
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"tensorflow.summary.scalar",
"matplotlib.pyplot.imshow",
"tensorflow.reduce_mean",
"matplotlib.pyplot.axis",
"tensorflow.placeholder",
"matplotlib.pyplot.figure",
"tensorflow.summary.FileWriter",
"tensorflow.summary.histogram",
"tensorflow.name_scope",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((299, 329), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'dimensions'}), '(figsize=dimensions)\n', (309, 329), True, 'import matplotlib.pyplot as plt\n'), ((781, 791), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (789, 791), True, 'import matplotlib.pyplot as plt\n'), ((2053, 2114), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(output_path + '/generator')", 'sess.graph'], {}), "(output_path + '/generator', sess.graph)\n", (2074, 2114), True, 'import tensorflow as tf\n'), ((2152, 2205), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["(output_path + '/discriminator')"], {}), "(output_path + '/discriminator')\n", (2173, 2205), True, 'import tensorflow as tf\n'), ((376, 424), 'matplotlib.pyplot.subplot', 'plt.subplot', (['dimensions[0]', 'dimensions[1]', '(i + 1)'], {}), '(dimensions[0], dimensions[1], i + 1)\n', (387, 424), True, 'import matplotlib.pyplot as plt\n'), ((431, 493), 'matplotlib.pyplot.imshow', 'plt.imshow', (['samples[i]'], {'interpolation': '"""nearest"""', 'cmap': '"""gray_r"""'}), "(samples[i], interpolation='nearest', cmap='gray_r')\n", (441, 493), True, 'import matplotlib.pyplot as plt\n'), ((502, 517), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (510, 517), True, 'import matplotlib.pyplot as plt\n'), ((727, 776), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""output/images/generated-latest.png"""'], {}), "('output/images/generated-latest.png')\n", (738, 776), True, 'import matplotlib.pyplot as plt\n'), ((849, 875), 'tensorflow.name_scope', 'tf.name_scope', (['"""generator"""'], {}), "('generator')\n", (862, 875), True, 'import tensorflow as tf\n'), ((910, 977), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""loss_history_placeholder"""'}), "(tf.float32, [None], name='loss_history_placeholder')\n", (924, 977), True, 'import tensorflow as tf\n'), ((1056, 1124), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['generator_loss_history'], {'name': '"""mean_loss_placeholder"""'}), "(generator_loss_history, name='mean_loss_placeholder')\n", (1070, 1124), True, 'import tensorflow as tf\n'), ((1432, 1462), 'tensorflow.name_scope', 'tf.name_scope', (['"""discriminator"""'], {}), "('discriminator')\n", (1445, 1462), True, 'import tensorflow as tf\n'), ((1501, 1568), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""loss_history_placeholder"""'}), "(tf.float32, [None], name='loss_history_placeholder')\n", (1515, 1568), True, 'import tensorflow as tf\n'), ((1651, 1723), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['discriminator_loss_history'], {'name': '"""mean_loss_placeholder"""'}), "(discriminator_loss_history, name='mean_loss_placeholder')\n", (1665, 1723), True, 'import tensorflow as tf\n'), ((1218, 1271), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'generator_loss_history[-1]'], {}), "('loss', generator_loss_history[-1])\n", (1235, 1271), True, 'import tensorflow as tf\n'), ((1285, 1336), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean_loss"""', 'generator_mean_loss'], {}), "('mean_loss', generator_mean_loss)\n", (1302, 1336), True, 'import tensorflow as tf\n'), ((1350, 1410), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""loss_history"""', 'generator_loss_history'], {}), "('loss_history', generator_loss_history)\n", (1370, 1410), True, 'import tensorflow as tf\n'), ((1821, 1878), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'discriminator_loss_history[-1]'], {}), "('loss', discriminator_loss_history[-1])\n", (1838, 1878), True, 'import tensorflow as tf\n'), ((1892, 1947), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean_loss"""', 'discriminator_mean_loss'], {}), "('mean_loss', discriminator_mean_loss)\n", (1909, 1947), True, 'import tensorflow as tf\n'), ((1961, 2025), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""loss_history"""', 'discriminator_loss_history'], {}), "('loss_history', discriminator_loss_history)\n", (1981, 2025), True, 'import tensorflow as tf\n'), ((3553, 3574), 'numpy.sqrt', 'np.sqrt', (['sample_count'], {}), '(sample_count)\n', (3560, 3574), True, 'import numpy as np\n')]
|
from __future__ import division
from io import BytesIO
import os
import os.path as op
import numpy as np
from PIL import Image
from traits.api import String, Tuple, provides
from .cacheing_decorators import lru_cache
from .i_tile_manager import ITileManager
from .tile_manager import TileManager
@provides(ITileManager)
class ImageTileManager(TileManager):
lod_dir = String
_level_dimensions = Tuple
@lru_cache(maxsize=256)
def get_tile(self, zoom, row, col):
if zoom >= len(self._level_dimensions):
return None
# Flip the Y axis
num_rows, _ = self.get_data_dimensions(zoom)
row = num_rows - 1 - row
zoom_dir = op.join(self.lod_dir, str(int(zoom)))
tile_path = op.join(zoom_dir, '{}.{}.npy'.format(row, col))
if not op.exists(tile_path):
return None
tile = np.load(tile_path)
img = Image.fromarray(tile, mode='RGB')
data = BytesIO()
img.save(data, format='png')
return self.process_raw(data.getvalue())
def get_tile_size(self):
return 256
def get_data_dimensions(self, zoom):
num_levels = len(self._level_dimensions)
if zoom >= num_levels:
return 0, 0
return self._level_dimensions[zoom]
def get_wrap_flags(self):
return False, False
def convert_to_tilenum(self, x, y, zoom):
n = 2 ** zoom
size = self.get_tile_size()
col = x // size % n
row = y // size % n
return zoom, row, col
def _lod_dir_changed(self, new):
self.get_tile.clear()
level_dimensions = _get_lod_dir_details(new)
self._level_dimensions = level_dimensions
self.min_level = 0
self.max_level = len(level_dimensions)
def _get_lod_dir_details(path):
level_count = 0
level_dimensions = []
for fn in os.listdir(path):
if op.isdir(op.join(path, fn)):
level_count += 1
for i in range(level_count):
level_dirpath = op.join(path, str(i))
if op.exists(level_dirpath) and op.isdir(level_dirpath):
rows, cols = 0, 0
for fn in os.listdir(level_dirpath):
try:
r, c = fn.split('.', 2)[:2]
rows, cols = max(rows, int(r)), max(cols, int(c))
except ValueError:
# Ignore bad filenames (like .DS_Store...)
pass
level_dimensions.append((rows + 1, cols + 1))
else:
# Missing level directory! Bail out.
return ()
return tuple(level_dimensions)
|
[
"io.BytesIO",
"numpy.load",
"os.path.isdir",
"traits.api.provides",
"os.path.exists",
"PIL.Image.fromarray",
"os.path.join",
"os.listdir"
] |
[((303, 325), 'traits.api.provides', 'provides', (['ITileManager'], {}), '(ITileManager)\n', (311, 325), False, 'from traits.api import String, Tuple, provides\n'), ((1880, 1896), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1890, 1896), False, 'import os\n'), ((872, 890), 'numpy.load', 'np.load', (['tile_path'], {}), '(tile_path)\n', (879, 890), True, 'import numpy as np\n'), ((905, 938), 'PIL.Image.fromarray', 'Image.fromarray', (['tile'], {'mode': '"""RGB"""'}), "(tile, mode='RGB')\n", (920, 938), False, 'from PIL import Image\n'), ((954, 963), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (961, 963), False, 'from io import BytesIO\n'), ((810, 830), 'os.path.exists', 'op.exists', (['tile_path'], {}), '(tile_path)\n', (819, 830), True, 'import os.path as op\n'), ((1918, 1935), 'os.path.join', 'op.join', (['path', 'fn'], {}), '(path, fn)\n', (1925, 1935), True, 'import os.path as op\n'), ((2058, 2082), 'os.path.exists', 'op.exists', (['level_dirpath'], {}), '(level_dirpath)\n', (2067, 2082), True, 'import os.path as op\n'), ((2087, 2110), 'os.path.isdir', 'op.isdir', (['level_dirpath'], {}), '(level_dirpath)\n', (2095, 2110), True, 'import os.path as op\n'), ((2164, 2189), 'os.listdir', 'os.listdir', (['level_dirpath'], {}), '(level_dirpath)\n', (2174, 2189), False, 'import os\n')]
|
import numpy as np
from time import time
from keras.datasets import mnist
from tmu.tsetlin_machine import TMCoalescedClassifier
import copy
clauses = 64
T = int(clauses*0.75)
s = 5.0
patch_size = 3
resolution = 8
number_of_state_bits = 8
(X_train_org, Y_train), (X_test_org, Y_test) = mnist.load_data()
Y_train=Y_train.reshape(Y_train.shape[0])
Y_test=Y_test.reshape(Y_test.shape[0])
X_train = np.empty((X_train_org.shape[0], X_train_org.shape[1], X_train_org.shape[2], resolution), dtype=np.uint8)
for z in range(resolution):
X_train[:,:,:,z] = X_train_org[:,:,:] >= (z+1)*255/(resolution+1)
X_test = np.empty((X_test_org.shape[0], X_test_org.shape[1], X_test_org.shape[2], resolution), dtype=np.uint8)
for z in range(resolution):
X_test[:,:,:,z] = X_test_org[:,:,:] >= (z+1)*255/(resolution+1)
X_train = X_train.reshape((X_train_org.shape[0], X_train_org.shape[1], X_train_org.shape[2], resolution))
X_test = X_test.reshape((X_test_org.shape[0], X_test_org.shape[1], X_test_org.shape[2], resolution))
tm = TMCoalescedClassifier(clauses, T, s, platform='CUDA', patch_dim=(3, 3), weighted_clauses=True)
print("\nAccuracy over 10 epochs:\n")
for i in range(1):
start_training = time()
tm.fit(X_train, Y_train)
stop_training = time()
start_testing = time()
result = 100*(tm.predict(X_test) == Y_test).mean()
stop_testing = time()
print("#%d Accuracy: %.2f%% Training: %.2fs Testing: %.2fs" % (i+1, result, stop_training-start_training, stop_testing-start_testing))
print("\nTransforming datasets")
start_transformation = time()
X_train_transformed = tm.transform_patchwise(X_train)
print(X_train_transformed.shape)
X_test_transformed = tm.transform_patchwise(X_test)
stop_transformation = time()
print("Transformation time: %.fs" % (stop_transformation - start_transformation))
print("Saving transformed datasets")
np.savez_compressed("X_train_transformed.npz", X_train_transformed)
np.savez_compressed("X_test_transformed.npz", X_test_transformed)
|
[
"tmu.tsetlin_machine.TMCoalescedClassifier",
"keras.datasets.mnist.load_data",
"numpy.empty",
"time.time",
"numpy.savez_compressed"
] |
[((290, 307), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (305, 307), False, 'from keras.datasets import mnist\n'), ((401, 509), 'numpy.empty', 'np.empty', (['(X_train_org.shape[0], X_train_org.shape[1], X_train_org.shape[2], resolution)'], {'dtype': 'np.uint8'}), '((X_train_org.shape[0], X_train_org.shape[1], X_train_org.shape[2],\n resolution), dtype=np.uint8)\n', (409, 509), True, 'import numpy as np\n'), ((619, 724), 'numpy.empty', 'np.empty', (['(X_test_org.shape[0], X_test_org.shape[1], X_test_org.shape[2], resolution)'], {'dtype': 'np.uint8'}), '((X_test_org.shape[0], X_test_org.shape[1], X_test_org.shape[2],\n resolution), dtype=np.uint8)\n', (627, 724), True, 'import numpy as np\n'), ((1036, 1134), 'tmu.tsetlin_machine.TMCoalescedClassifier', 'TMCoalescedClassifier', (['clauses', 'T', 's'], {'platform': '"""CUDA"""', 'patch_dim': '(3, 3)', 'weighted_clauses': '(True)'}), "(clauses, T, s, platform='CUDA', patch_dim=(3, 3),\n weighted_clauses=True)\n", (1057, 1134), False, 'from tmu.tsetlin_machine import TMCoalescedClassifier\n'), ((1558, 1564), 'time.time', 'time', ([], {}), '()\n', (1562, 1564), False, 'from time import time\n'), ((1726, 1732), 'time.time', 'time', ([], {}), '()\n', (1730, 1732), False, 'from time import time\n'), ((1853, 1920), 'numpy.savez_compressed', 'np.savez_compressed', (['"""X_train_transformed.npz"""', 'X_train_transformed'], {}), "('X_train_transformed.npz', X_train_transformed)\n", (1872, 1920), True, 'import numpy as np\n'), ((1921, 1986), 'numpy.savez_compressed', 'np.savez_compressed', (['"""X_test_transformed.npz"""', 'X_test_transformed'], {}), "('X_test_transformed.npz', X_test_transformed)\n", (1940, 1986), True, 'import numpy as np\n'), ((1207, 1213), 'time.time', 'time', ([], {}), '()\n', (1211, 1213), False, 'from time import time\n'), ((1257, 1263), 'time.time', 'time', ([], {}), '()\n', (1261, 1263), False, 'from time import time\n'), ((1283, 1289), 'time.time', 'time', ([], {}), '()\n', (1287, 1289), False, 'from time import time\n'), ((1358, 1364), 'time.time', 'time', ([], {}), '()\n', (1362, 1364), False, 'from time import time\n')]
|
import unittest
import numpy as np
import scipy.stats as st
from ..analysis import LinearRegression
from ..analysis.exc import MinimumSizeError, NoDataError
from ..data import UnequalVectorLengthError, Vector
class MyTestCase(unittest.TestCase):
def test_350_LinRegress_corr(self):
"""Test the Linear Regression class for correlation"""
np.random.seed(987654321)
x_input_array = range(1, 101)
y_input_array = [x * 3 for x in x_input_array]
alpha = 0.05
output = """
Linear Regression
-----------------
n = 100
Slope = 3.0000
Intercept = 0.0000
r = 1.0000
r^2 = 1.0000
Std Err = 0.0000
p value = 0.0000
"""
self.assertLess(LinearRegression(x_input_array, y_input_array, alpha=alpha, display=False).p_value, alpha,
"FAIL: Linear Regression Type II error")
self.assertEqual(str(LinearRegression(x_input_array, y_input_array, alpha=alpha, display=False)), output)
def test_351_LinRegress_no_corr(self):
"""Test the Linear Regression class for uncorrelated data"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=200)
y_input_array = st.norm.rvs(size=200)
self.assertGreater(LinearRegression(x_input_array, y_input_array, alpha=alpha, display=False).p_value, alpha,
"FAIL: Linear Regression Type I error")
def test_352_LinRegress_no_corr_slope(self):
"""Test the Linear Regression slope"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=200)
y_input_array = st.norm.rvs(size=200)
self.assertAlmostEqual(LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).slope, -0.0969, delta=0.0001,
msg="FAIL: Linear Regression slope")
def test_353_LinRegress_no_corr_intercept(self):
"""Test the Linear Regression intercept"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=200)
y_input_array = st.norm.rvs(size=200)
self.assertAlmostEqual(LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).intercept, -0.0397, delta=0.0001,
msg="FAIL: Linear Regression intercept")
def test_354_LinRegress_no_corr_r(self):
"""Test the Linear Regression r"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=200)
y_input_array = st.norm.rvs(size=200)
self.assertAlmostEqual(LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).r_value, -0.1029, delta=0.0001,
msg="FAIL: Linear Regression r")
def test_355_LinRegress_no_corr_r2(self):
"""Test the Linear Regression r^2"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=200)
y_input_array = st.norm.rvs(size=200)
self.assertAlmostEqual(LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).r_squared, 0.0105, delta=0.0001,
msg="FAIL: Linear Regression r^2")
def test_356_LinRegress_no_corr_std_err(self):
"""Test the Linear Regression std err"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=200)
y_input_array = st.norm.rvs(size=200)
self.assertAlmostEqual(LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).std_err, 0.0666, delta=0.0001,
msg="FAIL: Linear Regression std err")
def test_357_LinRegress_no_corr_just_above_min_size(self):
"""Test the Linear Regression class for uncorrelated data just above minimum size"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=4)
y_input_array = st.norm.rvs(size=4)
self.assertTrue(LinearRegression(x_input_array, y_input_array, alpha=alpha, display=False).p_value,
"FAIL: Linear Regression just above minimum size")
def test_358_LinRegress_no_corr_at_min_size(self):
"""Test the Linear Regression class for uncorrelated data at minimum size"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=3)
y_input_array = st.norm.rvs(size=3)
self.assertRaises(MinimumSizeError, lambda: LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).p_value)
def test_359_LinRegress_no_corr_unequal_vectors(self):
"""Test the Linear Regression class for uncorrelated data with unequal vectors"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=184)
y_input_array = st.norm.rvs(size=200)
self.assertRaises(UnequalVectorLengthError, lambda: LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).p_value)
def test_360_LinRegress_no_corr_empty_vector(self):
"""Test the Linear Regression class for uncorrelated data with an empty vector"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = [float("nan"), "two", "three", "four", float("nan")]
y_input_array = st.norm.rvs(size=5)
self.assertRaises(NoDataError, lambda: LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).p_value)
def test_361_LinRegress_no_corr_two_empty_vectors(self):
"""Test the Linear Regression class for uncorrelated data with two empty vectors"""
alpha = 0.05
x_input_array = [float("nan"), "two", "three", "four", float("nan")]
y_input_array = ["one", "two", float("nan"), "four", float("nan")]
self.assertRaises(NoDataError, lambda: LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).p_value)
def test_362_LinRegress_no_corr_statistic(self):
"""Test the Linear Regression R^2"""
np.random.seed(987654321)
alpha = 0.05
x_input_array = st.norm.rvs(size=200)
y_input_array = st.norm.rvs(size=200)
self.assertAlmostEqual(LinearRegression(x_input_array, y_input_array,
alpha=alpha,
display=False).statistic, 0.0105, delta=0.0001,
msg="FAIL: Linear Regression statistic")
def test_363_LinRegress_vector(self):
"""Test the Linear Regression class with an input Vector."""
np.random.seed(987654321)
x_input_array = range(1, 101)
y_input_array = [x * 3 for x in x_input_array]
alpha = 0.05
output = """
Linear Regression
-----------------
n = 100
Slope = 3.0000
Intercept = 0.0000
r = 1.0000
r^2 = 1.0000
Std Err = 0.0000
p value = 0.0000
"""
exp = LinearRegression(Vector(x_input_array, other=y_input_array), alpha=alpha, display=False)
self.assertLess(exp.p_value, alpha, "FAIL: Linear Regression Type II error")
self.assertEqual(str(exp), output)
def test_364_LinRegress_missing_ydata(self):
"""Test the case where no ydata is given."""
np.random.seed(987654321)
x_input_array = range(1, 101)
self.assertRaises(AttributeError, lambda: LinearRegression(x_input_array))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.random.seed",
"scipy.stats.norm.rvs"
] |
[((8507, 8522), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8520, 8522), False, 'import unittest\n'), ((360, 385), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (374, 385), True, 'import numpy as np\n'), ((1115, 1140), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (1129, 1140), True, 'import numpy as np\n'), ((1186, 1207), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (1197, 1207), True, 'import scipy.stats as st\n'), ((1232, 1253), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (1243, 1253), True, 'import scipy.stats as st\n'), ((1544, 1569), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (1558, 1569), True, 'import numpy as np\n'), ((1615, 1636), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (1626, 1636), True, 'import scipy.stats as st\n'), ((1661, 1682), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (1672, 1682), True, 'import scipy.stats as st\n'), ((2096, 2121), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (2110, 2121), True, 'import numpy as np\n'), ((2167, 2188), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (2178, 2188), True, 'import scipy.stats as st\n'), ((2213, 2234), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (2224, 2234), True, 'import scipy.stats as st\n'), ((2640, 2665), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (2654, 2665), True, 'import numpy as np\n'), ((2711, 2732), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (2722, 2732), True, 'import scipy.stats as st\n'), ((2757, 2778), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (2768, 2778), True, 'import scipy.stats as st\n'), ((3177, 3202), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (3191, 3202), True, 'import numpy as np\n'), ((3248, 3269), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (3259, 3269), True, 'import scipy.stats as st\n'), ((3294, 3315), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (3305, 3315), True, 'import scipy.stats as st\n'), ((3726, 3751), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (3740, 3751), True, 'import numpy as np\n'), ((3797, 3818), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (3808, 3818), True, 'import scipy.stats as st\n'), ((3843, 3864), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (3854, 3864), True, 'import scipy.stats as st\n'), ((4333, 4358), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (4347, 4358), True, 'import numpy as np\n'), ((4404, 4423), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(4)'}), '(size=4)\n', (4415, 4423), True, 'import scipy.stats as st\n'), ((4448, 4467), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(4)'}), '(size=4)\n', (4459, 4467), True, 'import scipy.stats as st\n'), ((4800, 4825), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (4814, 4825), True, 'import numpy as np\n'), ((4871, 4890), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(3)'}), '(size=3)\n', (4882, 4890), True, 'import scipy.stats as st\n'), ((4915, 4934), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(3)'}), '(size=3)\n', (4926, 4934), True, 'import scipy.stats as st\n'), ((5367, 5392), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (5381, 5392), True, 'import numpy as np\n'), ((5438, 5459), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(184)'}), '(size=184)\n', (5449, 5459), True, 'import scipy.stats as st\n'), ((5484, 5505), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (5495, 5505), True, 'import scipy.stats as st\n'), ((5959, 5984), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (5973, 5984), True, 'import numpy as np\n'), ((6107, 6126), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(5)'}), '(size=5)\n', (6118, 6126), True, 'import scipy.stats as st\n'), ((7079, 7104), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (7093, 7104), True, 'import numpy as np\n'), ((7150, 7171), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (7161, 7171), True, 'import scipy.stats as st\n'), ((7196, 7217), 'scipy.stats.norm.rvs', 'st.norm.rvs', ([], {'size': '(200)'}), '(size=200)\n', (7207, 7217), True, 'import scipy.stats as st\n'), ((7645, 7670), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (7659, 7670), True, 'import numpy as np\n'), ((8327, 8352), 'numpy.random.seed', 'np.random.seed', (['(987654321)'], {}), '(987654321)\n', (8341, 8352), True, 'import numpy as np\n')]
|
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM, GRU
from keras.utils.data_utils import get_file
from keras.optimizers import RMSprop
import numpy as np
import sys
import time
import random
import sys
import os
import re
from io import StringIO
# from LSTMPeephole import LSTMPeephole
BATCH_SIZE = 2 ** 13
def read_text_from_file(filename):
text = open(filename).read()
text = text.replace('%', ' percent ')
text = re.sub(r' +', ' ', text).lower()
text = re.sub(r'\n+', '\n', text).lower()
text = re.sub(r'[^a-zA-Z0-9\.\n\,\';\- ]+', '', text).lower()
return text
def make_char_lookup_table(text):
chars = set(text)
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
return char_indices, indices_char
def generate_text_stream(text, offset=0):
fp = StringIO(text)
fp.seek(offset)
while True:
val = fp.read(1)
if not val:
fp.seek(0)
continue
yield val
def onehot_encode(generator, char_indices):
char_count = len(char_indices)
for val in generator:
idx = char_indices[val]
v = np.zeros(char_count)
v[idx] = 1
yield v
def generate_training_data(text, char_indices, batch_size=BATCH_SIZE):
char_count = len(char_indices)
X = np.zeros((batch_size, 1, char_count))
y = np.zeros((batch_size, char_count))
generators = []
for i in range(batch_size):
offset = random.randint(0, len(text))
g = onehot_encode(generate_text_stream(text, offset), char_indices)
generators.append(g)
for i in range(batch_size):
X[i] = next(generators[i]).reshape(X[i].shape)
indices_char = {v:k for (k,v) in list(char_indices.items())}
while True:
for i in range(batch_size):
y[i] = next(generators[i])
yield (X, y)
X[:,0,:] = y[:]
def build_model(char_count, batch_size=BATCH_SIZE):
model = Sequential()
model.add(GRU(1024, return_sequences=True, batch_input_shape=(batch_size, 1, char_count), stateful=True))
model.add(Dropout(0.2))
model.add(GRU(1024, return_sequences=False, stateful=True))
model.add(Dropout(0.2))
model.add(Dense(char_count))
model.add(Activation('softmax'))
learning_rate = .00001 * batch_size / (2.0 ** 10)
print(("Running with batch size {} learning rate {}".format(batch_size, learning_rate)))
optimizer = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
return model
def sample(a, temperature=.5):
# print(f'a={a}, temperature={temperature}')
if temperature == 0:
return np.argmax(a)
# helper function to sample an index from a probability array
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
a = np.array([min(max(float(p), 0.), 1.) for p in a])
a = a / a.sum()
# print(f'a={a}, np.sum(a)={np.sum(a)}')
return np.argmax(np.random.multinomial(1, a, 1))
def predict(model, current_char, char_indices, indices_to_char, batch_size=BATCH_SIZE, temperature=0.2):
# Ignore all but one value in the batch
X = np.zeros((batch_size, 1, len(char_indices)))
X[0, 0, char_indices[current_char]] = 1
preds = model.predict(X, batch_size=batch_size)[0]
char_idx = sample(preds, temperature=temperature)
return indices_to_char[char_idx]
def np_to_char(x, indices_char):
if not x.any():
return '?'
idx = np.nonzero(x)[0][0]
return indices_char[idx].replace('\n', 'NEWLINE')
def build_visualization(layers, old_weights, run_name, iteration):
visualization_rows = []
for i, layer in enumerate(layers):
weights = layer.get_value()
print(("Weights layer {} max is {} min is {}".format(layer, weights.max(), weights.min())))
weight_update = np.abs(weights - old_weights[i])
weight_update *= 255.0 / 0.001
weights_normalized = weights * 128.0 / weights.max() + 128.0
visualization_rows.append(np.concatenate((weights_normalized, weight_update), axis=1))
old_weights[i] = weights
from PIL import Image
visualization = np.concatenate(visualization_rows)
Image.fromarray(visualization).convert('RGB').save('/tmp/visualization.png')
Image.fromarray(visualization).convert('RGB').save('/tmp/visualization_{}_iter{:04d}.png'.format(run_name, iteration))
def main(run_name, text):
chars = set(text)
print(('Found {} distinct characters: {}'.format(len(chars), ''.join(chars))))
char_indices, indices_char = make_char_lookup_table(text)
model = build_model(char_count=len(char_indices))
print("Building single-stream model...")
fast_model = build_model(char_count=len(char_indices), batch_size=1)
# train the model, output generated text after each iteration
layers = [layer for layer in model.trainable_weights if len(layer.get_value().shape) > 1 and layer.get_value().shape[1] == 512]
old_weights = [layer.get_value() for layer in layers]
generator = generate_training_data(text, char_indices)
start_time = time.time()
model.load_weights('models/bern.iter399.h5')
for iteration in range(1, 1000):
print(('-' * 50))
print(('Iteration {}'.format(iteration)))
model.reset_states()
batches_per_minute = 2 ** 20 / BATCH_SIZE
for i in range(batches_per_minute):
X, y = next(generator)
results = model.train_on_batch(X, y)
sys.stdout.write("\rBatch {} Loss: {}\t".format(i, results))
sys.stdout.flush()
sys.stdout.write('\n')
print(("Finished iteration {} after {:.2f} sec".format(iteration, time.time() - start_time)))
new_weights = [layer.get_value() for layer in layers]
# build_visualization(layers, old_weights, run_name, iteration)
old_weights = new_weights
# Copy weights to a light-weight version of the model used for prediction
for slow_layer, fast_layer in zip(model.layers, fast_model.layers):
fast_layer.set_weights(slow_layer.get_weights())
next_char = random.choice(list(char_indices.keys()))
for i in range(512 * 2):
next_char = predict(fast_model, next_char, char_indices, indices_char, batch_size=1)
sys.stdout.write(next_char)
sys.stdout.flush()
sys.stdout.write('\n')
# Save model parameters
model.save_weights('{}.iter{}.h5'.format(run_name, iteration), overwrite=True)
if __name__ == '__main__':
if len(sys.argv) < 3:
print(('Usage: {} text_corpus.txt run_name'.format(sys.argv[0])))
print('Text corpus should be at least 100k characters')
print('It is recommended to run this on a GPU')
exit()
filename = sys.argv[1]
run_name = sys.argv[2]
text = read_text_from_file(filename)
print(('Text length {} characters'.format(len(text))))
main(run_name, text)
|
[
"sys.stdout.write",
"numpy.abs",
"numpy.argmax",
"numpy.random.multinomial",
"sys.stdout.flush",
"numpy.exp",
"keras.layers.core.Activation",
"keras.layers.core.Dropout",
"re.sub",
"io.StringIO",
"keras.layers.recurrent.GRU",
"keras.layers.core.Dense",
"keras.optimizers.RMSprop",
"numpy.concatenate",
"numpy.log",
"numpy.zeros",
"time.time",
"numpy.nonzero",
"PIL.Image.fromarray",
"keras.models.Sequential"
] |
[((947, 961), 'io.StringIO', 'StringIO', (['text'], {}), '(text)\n', (955, 961), False, 'from io import StringIO\n'), ((1428, 1465), 'numpy.zeros', 'np.zeros', (['(batch_size, 1, char_count)'], {}), '((batch_size, 1, char_count))\n', (1436, 1465), True, 'import numpy as np\n'), ((1474, 1508), 'numpy.zeros', 'np.zeros', (['(batch_size, char_count)'], {}), '((batch_size, char_count))\n', (1482, 1508), True, 'import numpy as np\n'), ((2070, 2082), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2080, 2082), False, 'from keras.models import Sequential\n'), ((2546, 2571), 'keras.optimizers.RMSprop', 'RMSprop', ([], {'lr': 'learning_rate'}), '(lr=learning_rate)\n', (2553, 2571), False, 'from keras.optimizers import RMSprop\n'), ((4271, 4305), 'numpy.concatenate', 'np.concatenate', (['visualization_rows'], {}), '(visualization_rows)\n', (4285, 4305), True, 'import numpy as np\n'), ((5211, 5222), 'time.time', 'time.time', ([], {}), '()\n', (5220, 5222), False, 'import time\n'), ((1256, 1276), 'numpy.zeros', 'np.zeros', (['char_count'], {}), '(char_count)\n', (1264, 1276), True, 'import numpy as np\n'), ((2097, 2195), 'keras.layers.recurrent.GRU', 'GRU', (['(1024)'], {'return_sequences': '(True)', 'batch_input_shape': '(batch_size, 1, char_count)', 'stateful': '(True)'}), '(1024, return_sequences=True, batch_input_shape=(batch_size, 1,\n char_count), stateful=True)\n', (2100, 2195), False, 'from keras.layers.recurrent import LSTM, GRU\n'), ((2207, 2219), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2214, 2219), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2235, 2283), 'keras.layers.recurrent.GRU', 'GRU', (['(1024)'], {'return_sequences': '(False)', 'stateful': '(True)'}), '(1024, return_sequences=False, stateful=True)\n', (2238, 2283), False, 'from keras.layers.recurrent import LSTM, GRU\n'), ((2299, 2311), 'keras.layers.core.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (2306, 2311), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2327, 2344), 'keras.layers.core.Dense', 'Dense', (['char_count'], {}), '(char_count)\n', (2332, 2344), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2360, 2381), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (2370, 2381), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((2783, 2795), 'numpy.argmax', 'np.argmax', (['a'], {}), '(a)\n', (2792, 2795), True, 'import numpy as np\n'), ((2870, 2879), 'numpy.log', 'np.log', (['a'], {}), '(a)\n', (2876, 2879), True, 'import numpy as np\n'), ((2902, 2911), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (2908, 2911), True, 'import numpy as np\n'), ((3076, 3106), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'a', '(1)'], {}), '(1, a, 1)\n', (3097, 3106), True, 'import numpy as np\n'), ((3956, 3988), 'numpy.abs', 'np.abs', (['(weights - old_weights[i])'], {}), '(weights - old_weights[i])\n', (3962, 3988), True, 'import numpy as np\n'), ((5707, 5729), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (5723, 5729), False, 'import sys\n'), ((6491, 6513), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (6507, 6513), False, 'import sys\n'), ((515, 538), 're.sub', 're.sub', (['""" +"""', '""" """', 'text'], {}), "(' +', ' ', text)\n", (521, 538), False, 'import re\n'), ((559, 585), 're.sub', 're.sub', (['"""\\\\n+"""', '"""\n"""', 'text'], {}), "('\\\\n+', '\\n', text)\n", (565, 585), False, 'import re\n'), ((605, 655), 're.sub', 're.sub', (['"""[^a-zA-Z0-9\\\\.\\\\n\\\\,\\\\\';\\\\- ]+"""', '""""""', 'text'], {}), '("[^a-zA-Z0-9\\\\.\\\\n\\\\,\\\\\';\\\\- ]+", \'\', text)\n', (611, 655), False, 'import re\n'), ((2921, 2930), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (2927, 2930), True, 'import numpy as np\n'), ((3586, 3599), 'numpy.nonzero', 'np.nonzero', (['x'], {}), '(x)\n', (3596, 3599), True, 'import numpy as np\n'), ((4131, 4190), 'numpy.concatenate', 'np.concatenate', (['(weights_normalized, weight_update)'], {'axis': '(1)'}), '((weights_normalized, weight_update), axis=1)\n', (4145, 4190), True, 'import numpy as np\n'), ((5680, 5698), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5696, 5698), False, 'import sys\n'), ((6424, 6451), 'sys.stdout.write', 'sys.stdout.write', (['next_char'], {}), '(next_char)\n', (6440, 6451), False, 'import sys\n'), ((6464, 6482), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6480, 6482), False, 'import sys\n'), ((4310, 4340), 'PIL.Image.fromarray', 'Image.fromarray', (['visualization'], {}), '(visualization)\n', (4325, 4340), False, 'from PIL import Image\n'), ((4391, 4421), 'PIL.Image.fromarray', 'Image.fromarray', (['visualization'], {}), '(visualization)\n', (4406, 4421), False, 'from PIL import Image\n'), ((5804, 5815), 'time.time', 'time.time', ([], {}), '()\n', (5813, 5815), False, 'import time\n')]
|
from Main.AlphaZero.DistributedSelfPlay import Constants as C
from Main.AlphaZero.Oracle import GraphOptimizer, OracleCommands
from Main import Hyperparameters, MachineSpecificSettings
# from keras import backend as K
# import tensorflow as tf
import numpy as np
ORACLE_PIPE = None
K = None
tf = None
def runPredictionOracle(model, selfPlayPool, toOraclePipe, kerasBackend, tensorflow):
global ORACLE_PIPE, K, tf
ORACLE_PIPE = toOraclePipe
K = kerasBackend
tf = tensorflow
if (MachineSpecificSettings.IS_UNIX_MACHINE):
_runOptimizedGraphOracle(model, selfPlayPool)
else:
_runNormalKerasOracle(model, selfPlayPool)
# _runUnbiasedOracle(selfPlayPool)
# ***** Main oracle loop that's called from of the pre-defined oracle. ******
def _oracleLoop(predictFunc, selfPlayPool):
predictionEvalHistory = []
amountOfGames = []
while (True):
message = ORACLE_PIPE.get()
if (message[0] == OracleCommands.EVAL_NEW_DATA):
_, amountOfStates, states, workerID = message
assert (amountOfStates == len(states))
predictions = predictFunc(states)
# DEBUG STATS
amountOfGames.append(amountOfStates)
predictionEvalHistory.extend(predictions[0])
outMsg = (OracleCommands.ORACLE_STATUS_RUNNING, len(predictions[0]), predictions)
selfPlayPool.sendMessageToProc(workerID, outMsg)
# If we set the ABORT_FLAG to True whilst the oracle is listening on the Q we will get stuck in an infinite read.
# Therefore we also send a message to the oracle when the cycle is over
elif (message[0] == C.LocalWorkerProtocol.SELF_PLAY_OVER):
_flushAndAbortLocalWorkers(selfPlayPool)
break
def _flushAndAbortLocalWorkers(selfPlayPool):
abortMsg = (C.LocalWorkerProtocol.SELF_PLAY_OVER, [])
selfPlayPool.broadcastMsg(abortMsg)
# ***** Prediction with a simple keras model... *****
def _runNormalKerasOracle(model, selfPlayPool):
global NORMAL_MODEL
NORMAL_MODEL = model
_oracleLoop(_predictWithNormalModel, selfPlayPool)
NORMAL_MODEL = None
def _predictWithNormalModel(states):
return NORMAL_MODEL.predict([states])
# ***** Prediction with unbiased values, AKA fake prediction without any network... *****
UNBIASED_EVAL = None
UNBIASED_POLICY = None
# ***** Prediction with a simple keras model... *****
def _runUnbiasedOracle(selfPlayPool):
global UNBIASED_EVAL, UNBIASED_POLICY
UNBIASED_EVAL = [[np.random.random()] for i in range(Hyperparameters.AMOUNT_OF_GAMES_PER_WORKER)]
UNBIASED_POLICY = np.array([[1, 1, 1, 1, 1, 1, 1]] * Hyperparameters.AMOUNT_OF_GAMES_PER_WORKER)
_oracleLoop(_predictWithUnbiasedValues, selfPlayPool)
def _predictWithUnbiasedValues(states):
amountOfStates = len(states)
return [UNBIASED_EVAL[:amountOfStates], UNBIASED_POLICY[:amountOfStates]]
# ***** Prediction with an optimized graph directly in tensorflow... *****
OPTIMIZED_GRAPH = None
INPUT_TENSOR = None
VALUE_OUT = None
POLICY_OUT = None
def _runOptimizedGraphOracle(model, selfPlayPool):
global OPTIMIZED_GRAPH, VALUE_OUT, POLICY_OUT, INPUT_TENSOR
optiGraph, outputs = GraphOptimizer.createOptimizedGraph(model, K.get_session(), tf)
graph = tf.Graph()
with graph.as_default():
with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=1))) as sess:
# read TensorRT model
trt_graph = optiGraph
# obtain the corresponding input-output tensor
tf.import_graph_def(trt_graph, name='')
INPUT_TENSOR = sess.graph.get_tensor_by_name('InputLayer:0')
VALUE_OUT = sess.graph.get_tensor_by_name('ValueOut/Sigmoid:0')
POLICY_OUT = sess.graph.get_tensor_by_name('PolicyOut/Softmax:0')
OPTIMIZED_GRAPH = sess
_oracleLoop(_predictWithOptimizedGraph, selfPlayPool)
def _predictWithOptimizedGraph(states):
return OPTIMIZED_GRAPH.run([VALUE_OUT, POLICY_OUT], feed_dict={INPUT_TENSOR: np.array(states)})
|
[
"numpy.random.random",
"numpy.array"
] |
[((2710, 2788), 'numpy.array', 'np.array', (['([[1, 1, 1, 1, 1, 1, 1]] * Hyperparameters.AMOUNT_OF_GAMES_PER_WORKER)'], {}), '([[1, 1, 1, 1, 1, 1, 1]] * Hyperparameters.AMOUNT_OF_GAMES_PER_WORKER)\n', (2718, 2788), True, 'import numpy as np\n'), ((2607, 2625), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2623, 2625), True, 'import numpy as np\n'), ((4203, 4219), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (4211, 4219), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Original code (Python 2) from <NAME>; <NAME>; <NAME>; <NAME>; J.He;
# "Sentinel-2 MultiSpectral Instrument (MSI) data processing for aquatic science applications: Demonstrations and validations"
# suplementary data "Program for generating Sentinel-2's high-resolution angle coefficients"
# at https://www.sciencedirect.com/science/article/pii/S0034425717303991
import logging
import math
import numpy
import os
import xml.etree.ElementTree as ET
from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2
from pathlib import Path
import rasterio
from skimage.transform import resize
############################################################################
# Sudipta's addition to enable spatial subset
############################################################################
K0 = 0.9996
E = 0.00669438
E2 = E * E
E3 = E2 * E
E_P2 = E / (1.0 - E)
SQRT_E = math.sqrt(1 - E)
_E = (1 - SQRT_E) / (1 + SQRT_E)
_E2 = _E * _E
_E3 = _E2 * _E
_E4 = _E3 * _E
_E5 = _E4 * _E
M1 = (1 - E / 4 - 3 * E2 / 64 - 5 * E3 / 256)
M2 = (3 * E / 8 + 3 * E2 / 32 + 45 * E3 / 1024)
M3 = (15 * E2 / 256 + 45 * E3 / 1024)
M4 = (35 * E3 / 3072)
P2 = (3. / 2 * _E - 27. / 32 * _E3 + 269. / 512 * _E5)
P3 = (21. / 16 * _E2 - 55. / 32 * _E4)
P4 = (151. / 96 * _E3 - 417. / 128 * _E5)
P5 = (1097. / 512 * _E4)
R = 6378137
ZONE_LETTERS = "CDEFGHJKLMNPQRSTUVWXX"
def from_latlon(latitude, longitude, force_zone_number=None):
if not -80.0 <= latitude <= 84.0:
raise ValueError('latitude out of range (must be between 80 deg S and 84 deg N)')
if not -180.0 <= longitude <= 180.0:
raise ValueError('longitude out of range (must be between 180 deg W and 180 deg E)')
lat_rad = math.radians(latitude)
lat_sin = math.sin(lat_rad)
lat_cos = math.cos(lat_rad)
lat_tan = lat_sin / lat_cos
lat_tan2 = lat_tan * lat_tan
lat_tan4 = lat_tan2 * lat_tan2
if force_zone_number is None:
zone_number = latlon_to_zone_number(latitude, longitude)
else:
zone_number = force_zone_number
zone_letter = latitude_to_zone_letter(latitude)
lon_rad = math.radians(longitude)
central_lon = zone_number_to_central_longitude(zone_number)
central_lon_rad = math.radians(central_lon)
n = R / math.sqrt(1 - E * lat_sin**2)
c = E_P2 * lat_cos**2
a = lat_cos * (lon_rad - central_lon_rad)
a2 = a * a
a3 = a2 * a
a4 = a3 * a
a5 = a4 * a
a6 = a5 * a
m = R * (M1 * lat_rad -
M2 * math.sin(2 * lat_rad) +
M3 * math.sin(4 * lat_rad) -
M4 * math.sin(6 * lat_rad))
easting = K0 * n * (a +
a3 / 6 * (1 - lat_tan2 + c) +
a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000
northing = K0 * (m + n * lat_tan * (a2 / 2 +
a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) +
a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2)))
if latitude < 0:
northing += 10000000
return easting, northing, zone_number, zone_letter
def latitude_to_zone_letter(latitude):
if -80 <= latitude <= 84:
return ZONE_LETTERS[int(latitude + 80) >> 3]
else:
return None
def latlon_to_zone_number(latitude, longitude):
if 56 <= latitude < 64 and 3 <= longitude < 12:
return 32
if 72 <= latitude <= 84 and longitude >= 0:
if longitude <= 9:
return 31
elif longitude <= 21:
return 33
elif longitude <= 33:
return 35
elif longitude <= 42:
return 37
return int((longitude + 180) / 6) + 1
def zone_number_to_central_longitude(zone_number):
return (zone_number - 1) * 6 - 180 + 3
############################################################################
# End Sudipta's addition
############################################################################
# Define constants
a = 6378137.0 # WGS 84 semi-major axis in meters
b = 6356752.314 # WGS 84 semi-minor axis in meters
ecc = 1.0 - b / a * b / a # WGS 84 ellipsoid eccentricity
todeg = 180.0 / pi # Converts radians to degrees
# Define functions used to construct image observations
def LOSVec(Lat, Lon, Zen, Az):
LSRx = (-sin(Lon), cos(Lon), 0.0)
LSRy = (-sin(Lat)*cos(Lon), -sin(Lat)*sin(Lon), cos(Lat))
LSRz = (cos(Lat)*cos(Lon), cos(Lat)*sin(Lon), sin(Lat))
LOS = (sin(Zen)*sin(Az), sin(Zen)*cos(Az), cos(Zen))
Sat = (LOS[0]*LSRx[0] + LOS[1]*LSRy[0] + LOS[2]*LSRz[0],
LOS[0]*LSRx[1] + LOS[1]*LSRy[1] + LOS[2]*LSRz[1],
LOS[0]*LSRx[2] + LOS[1]*LSRy[2] + LOS[2]*LSRz[2])
Rn = a / sqrt(1.0 - ecc *sin(Lat)*sin(Lat))
Gx = (Rn*cos(Lat)*cos(Lon),
Rn*cos(Lat)*sin(Lon),
Rn*(1-ecc)*sin(Lat))
return (Sat, Gx)
def GrndVec(Lat, Lon):
Rn = a / sqrt(1.0 - ecc *sin(Lat)*sin(Lat))
Gx = (Rn*cos(Lat)*cos(Lon),
Rn*cos(Lat)*sin(Lon),
Rn*(1-ecc)*sin(Lat))
return (Gx)
# Inverse (X/Y to lat/long) UTM projection
def utm_inv(Zone, X, Y, a=6378137.0, b=6356752.31414):
if Zone < 0 :
FNorth = 10000000.0 # Southern hemisphere False Northing
else:
FNorth = 0.0 # Northern hemisphere False Northing
FEast = 500000.0 # UTM False Easting
Scale = 0.9996 # Scale at CM (UTM parameter)
LatOrigin = 0.0 # Latitude origin (UTM parameter)
CMDeg = -177 + (abs(int(Zone))-1)*6
CM = float(CMDeg)*pi/180.0 # Central meridian (based on zone)
ecc = 1.0 - b/a*b/a
ep = ecc/(1.0-ecc)
M0 = a*((1.0-ecc*(0.25+ecc*(3.0/64.0+ecc*5.0/256.0)))*LatOrigin
-ecc*(0.375+ecc*(3.0/32.0+ecc*45.0/1024.0))*sin(2.0*LatOrigin)
+ecc*ecc*(15.0/256.0+ecc*45.0/1024.0)*sin(4.0*LatOrigin)
-ecc*ecc*ecc*35.0/3072.0*sin(6.0*LatOrigin))
M = M0+(Y-FNorth)/Scale
Mu = M/(a*(1.0-ecc*(0.25+ecc*(3.0/64.0+ecc*5.0/256.0))))
e1 = (1.0-sqrt(1-ecc))/(1.0+sqrt(1.0-ecc))
Phi1 = Mu+(e1*(1.5-27.0/32.0*e1*e1)*sin(2.0*Mu)
+e1*e1*(21.0/16.0-55.0/32.0*e1*e1)*sin(4.0*Mu)
+151.0/96.0*e1*e1*e1*sin(6.0*Mu)
+1097.0/512.0*e1*e1*e1*e1*sin(8.0*Mu))
slat = sin(Phi1)
clat = cos(Phi1)
Rn1 = a/sqrt(1.0-ecc*slat*slat)
T1 = slat*slat/clat/clat
C1 = ep*clat*clat
R1 = Rn1*(1.0-ecc)/(1.0-ecc*slat*slat)
D = (X-FEast)/Rn1/Scale
# Calculate Lat/Lon
Lat = Phi1 - (Rn1*slat/clat/R1*(D*D/2.0
-(5.0+3.0*T1+10.0*C1-4.0*C1*C1-9.0*ep)*D*D*D*D/24.0
+(61.0+90.0*T1+298.0*C1+45.0*T1*T1-252.0*ep-3.0*C1*C1)*D*D*D*D*D*D/720.0))
Lon = CM + (D-(1.0+2.0*T1+C1)*D*D*D/6.0+(5.0-2.0*C1+28.0*T1-3.0*C1*C1+8.0*ep+24.0*T1*T1)
*D*D*D*D*D/120.0)/clat
return (Lat, Lon)
def get_angleobs(XML_File):
# Parse the XML file
tree = ET.parse(XML_File)
root = tree.getroot()
# Find the angles
for child in root:
if child.tag[-12:] == 'General_Info':
geninfo = child
if child.tag[-14:] == 'Geometric_Info':
geoinfo = child
for segment in geninfo:
if segment.tag == 'TILE_ID':
tile_id = segment.text.strip()
for segment in geoinfo:
if segment.tag == 'Tile_Geocoding':
frame = segment
if segment.tag == 'Tile_Angles':
angles = segment
for box in frame:
if box.tag == 'HORIZONTAL_CS_NAME':
czone = box.text.strip()[-3:]
hemis = czone[-1:]
zone = int(czone[:-1])
if box.tag == 'Size' and box.attrib['resolution'] == '60':
for field in box:
if field.tag == 'NROWS':
nrows = int(field.text)
if field.tag == 'NCOLS':
ncols = int(field.text)
if box.tag == 'Geoposition' and box.attrib['resolution'] == '60':
for field in box:
if field.tag == 'ULX':
ulx = float(field.text)
if field.tag == 'ULY':
uly = float(field.text)
if hemis == 'S':
lzone = -zone
else:
lzone = zone
AngleObs = { 'zone' : zone, 'hemis' : hemis, 'nrows' : nrows, 'ncols' : ncols, 'ul_x' : ulx, 'ul_y' : uly, 'obs' : [] }
for angle in angles:
if angle.tag == 'Viewing_Incidence_Angles_Grids':
bandId = int(angle.attrib['bandId'])
detectorId = int(angle.attrib['detectorId'])
for bset in angle:
if bset.tag == 'Zenith':
zenith = bset
if bset.tag == 'Azimuth':
azimuth = bset
for field in zenith:
if field.tag == 'COL_STEP':
col_step = int(field.text)
if field.tag == 'ROW_STEP':
row_step = int(field.text)
if field.tag == 'Values_List':
zvallist = field
for field in azimuth:
if field.tag == 'Values_List':
avallist = field
for rindex in range(len(zvallist)):
zvalrow = zvallist[rindex]
avalrow = avallist[rindex]
zvalues = zvalrow.text.split(' ')
avalues = avalrow.text.split(' ')
values = list(zip(zvalues, avalues))
ycoord = uly - rindex * row_step
for cindex in range(len(values)):
xcoord = ulx + cindex * col_step
(lat, lon) = utm_inv(lzone, xcoord, ycoord)
if (values[cindex][0] != 'NaN' and values[cindex][1] != 'NaN'):
zen = float(values[cindex][0]) / todeg
az = float(values[cindex][1]) / todeg
(Sat, Gx) = LOSVec(lat, lon, zen, az)
observe = [bandId, detectorId, xcoord, ycoord, Sat, Gx]
AngleObs['obs'].append(observe)
return (tile_id, AngleObs)
def get_detfootprint(XML_File):
# Extract the directory
Foot_Dir = os.path.dirname(XML_File)
Foot_Dir += '/QI_DATA/'
# Parse the XML file
tree = ET.parse(XML_File)
root = tree.getroot()
# Find the detector footprint files
footprints = []
for child in root:
if child.tag[-23:] == 'Quality_Indicators_Info':
qualinfo = child
for segment in qualinfo:
if segment.tag == 'Pixel_Level_QI':
pixlevel = segment
for qifile in pixlevel:
if qifile.tag == 'MASK_FILENAME':
if qifile.attrib['type'] == 'MSK_DETFOO':
bandId = int(qifile.attrib['bandId'])
qifname = Foot_Dir + os.path.basename(qifile.text.strip())
footprints.append((bandId, qifname))
bandfoot = []
for foot in footprints:
bandId = int(foot[0])
tree2 = ET.parse(foot[1])
root2 = tree2.getroot()
for child in root2:
if child.tag[-11:] == 'maskMembers':
thismember = child
for feature in thismember:
if feature.tag[-11:] == 'MaskFeature':
for thisattribute in feature.attrib:
if thisattribute[-2:] == 'id':
detId = int (feature.attrib[thisattribute].split('-')[2])
bandName = feature.attrib[thisattribute].split('-')[1]
thisband = { 'detId' : detId , 'bandId' : bandId, 'bandName' : bandName, 'coords' : [] }
thisfeature = feature
for extent in thisfeature:
if extent.tag[-8:] == 'extentOf':
thisextent = extent
for polygon in thisextent:
if polygon.tag[-7:] == 'Polygon':
thispolygon = polygon
for exterior in thispolygon:
if exterior.tag[-8:] == 'exterior':
thisexterior = exterior
for ring in thisexterior:
if ring.tag[-10:] == 'LinearRing':
thisring = ring
for poslist in thisring:
if poslist.tag[-7:] == 'posList':
ncoord = int(poslist.attrib['srsDimension'])
fields = poslist.text.split(' ')
index = 0
for field in fields:
if index == 0:
x = float(field)
elif index == 1:
y = float(field)
thisband['coords'].append((x,y))
index = (index + 1) % ncoord
bandfoot.append(thisband)
return bandfoot
# Define functions used to construct image observations
def Magnitude(A):
return sqrt(A[0]*A[0] + A[1]*A[1] + A[2]*A[2])
def Dot(A, B):
return A[0]*B[0] + A[1]*B[1] + A[2]*B[2]
def CalcObs(obs, Orbit, Omega0, Lon0):
Vx = [0.0, 0.0, 0.0]
ltime = obs[6]
Sat = obs[4]
Gx = obs[5]
cta = Omega0 - 2*pi*ltime/Orbit[4]
gclat = asin(sin(cta) * sin(Orbit[3]))
gclon = Lon0 + asin(tan(gclat) / -tan(Orbit[3])) - 2*pi*ltime/86400
Vx[0] = Orbit[2] * cos(gclat) * cos(gclon) - Gx[0]
Vx[1] = Orbit[2] * cos(gclat) * sin(gclon) - Gx[1]
Vx[2] = Orbit[2] * sin(gclat) - Gx[2]
Vdist = Magnitude(Vx)
Vx[0] = Vx[0] / Vdist - Sat[0]
Vx[1] = Vx[1] / Vdist - Sat[1]
Vx[2] = Vx[2] / Vdist - Sat[2]
return Vx
def Partial_O(obs, Orbit):
P0 = numpy.zeros((3,4))
Omega0 = asin(sin(Orbit[0]) / sin(Orbit[3]))
Lon0 = Orbit[1] - asin(tan(Orbit[0]) / -tan(Orbit[3]))
Dx = CalcObs(obs, Orbit, Omega0, Lon0)
POrb = Orbit
Pert = [0.00001, 0.00001, 10.0, 0.0001] # Perturbations to Lat, Lon, Radius, and Inclination
for index in range(len(Pert)):
POrb[index] += Pert[index]
Omega0 = asin(sin(POrb[0]) / sin(POrb[3]))
Lon0 = POrb[1] - asin(tan(POrb[0]) / -tan(POrb[3]))
Dp = CalcObs(obs, POrb, Omega0, Lon0)
P0[0, index] = (Dp[0] - Dx[0])/Pert[index]
P0[1, index] = (Dp[1] - Dx[1])/Pert[index]
P0[2, index] = (Dp[2] - Dx[2])/Pert[index]
POrb[index] -= Pert[index]
return P0
def Partial_T(obs, Orbit):
P1 = [0.0, 0.0, 0.0]
Omega0 = asin(sin(Orbit[0]) / sin(Orbit[3]))
Lon0 = Orbit[1] - asin(tan(Orbit[0]) / -tan(Orbit[3]))
Dx = CalcObs(obs, Orbit, Omega0, Lon0)
Pobs = obs
Pert = 0.1 # Time perturbation
Pobs[6] += Pert
Dp = CalcObs(Pobs, Orbit, Omega0, Lon0)
P1[0] = (Dp[0] - Dx[0])/Pert
P1[1] = (Dp[1] - Dx[1])/Pert
P1[2] = (Dp[2] - Dx[2])/Pert
Pobs[6] -= Pert
return P1
def Fit_Time(ul_x, ul_y, Obs):
Time_Parms = []
for band in range(13):
TParm_List = []
for sca in range(13):
A0 = numpy.matrix(numpy.zeros((4, 4)))
L0 = numpy.matrix(numpy.zeros((4, 1)))
X0 = numpy.matrix(numpy.zeros((4, 1)))
for los in Obs:
if los[0] == band and (sca == 0 or los[1] == sca):
dx = los[2] - ul_x
dy = ul_y - los[3]
A0[0,0] += 1.0
A0[0,1] += dx
A0[0,2] += dy
A0[0,3] += dx*dy
L0[0,0] += los[6]
A0[1,0] += dx
A0[1,1] += dx*dx
A0[1,2] += dx*dy
A0[1,3] += dx*dx*dy
L0[1,0] += dx*los[6]
A0[2,0] += dy
A0[2,1] += dy*dx
A0[2,2] += dy*dy
A0[2,3] += dy*dx*dy
L0[2,0] += dy*los[6]
A0[3,0] += dx*dy
A0[3,1] += dx*dy*dx
A0[3,2] += dx*dy*dy
A0[3,3] += dx*dy*dx*dy
L0[3,0] += dx*dy*los[6]
# Detector 0 is the band average solution which is used to strengthen detectors with few points
if sca == 0:
A0all = A0
L0all = L0
# Make sure we have a valid solution for this detector
try:
A0inv = A0**-1
# Bring in the band average data for the rate terms
except:
if A0[0,0] < 1.0:
A0[0,0] = 1.0
A0[1,1] = A0all[1,1]
A0[1,2] = A0all[1,2]
A0[1,3] = A0all[1,3]
A0[2,1] = A0all[2,1]
A0[2,2] = A0all[2,2]
A0[2,3] = A0all[2,3]
A0[3,1] = A0all[3,1]
A0[3,2] = A0all[3,2]
A0[3,3] = A0all[3,3]
L0[1,0] = L0all[1,0]
L0[2,0] = L0all[2,0]
L0[3,0] = L0all[3,0]
A0inv = A0**-1
X0 = A0inv * L0
TParm_List.append(list(X0))
this_time = { 'band' : band, 'tmodel' : TParm_List }
Time_Parms.append(this_time)
# Calculate fit statistic
rmsfit = 0
numobs = 0
coeffs = TParm_List
for los in Obs:
if los[0] == band:
det = los[1]
dx = los[2] - ul_x
dy = ul_y - los[3]
dt = coeffs[det][0] + coeffs[det][1]*dx + coeffs[det][2]*dy + coeffs[det][3]*dx*dy - los[6]
numobs += 1
rmsfit += dt*dt
if numobs > 0:
rmsfit = sqrt(rmsfit / numobs)
logging.info('Time fit for band ',band,' RMS = ',rmsfit,' seconds')
return Time_Parms
def Fit_Orbit(AngleObs):
# Initialize the orbit parameters
Orbit = [0.0, 0.0, 7169868.175, 98.62/todeg, 6041.958] # Reference Lat, Reference Lon, Radius, Inclination, Period
Orbit0 = [0.0, 0.0, 7169868.175, 98.62/todeg, 6041.958] # Reference Lat, Reference Lon, Radius, Inclination, Period
# Load the angle records
ul_x = AngleObs['ul_x']
ul_y = AngleObs['ul_y']
Obs = []
numobs = 0
for viewrec in AngleObs['obs']:
numobs += 1
# Construct observation record
Sat = [viewrec[4][0], viewrec[4][1], viewrec[4][2]]
Gx = viewrec[5]
Obs.append([viewrec[0], viewrec[1], viewrec[2], viewrec[3], viewrec[4], viewrec[5], 0.0])
# Project the view vector out to the satellite orbital radius
Gmag = Magnitude(Gx)
Vdot = Dot(Sat, Gx)
Vdist = sqrt(Orbit[2]*Orbit[2] + Vdot*Vdot - Gmag*Gmag)
Px = [Gx[0]+Sat[0]*Vdist, Gx[1]+Sat[1]*Vdist, Gx[2]+Sat[2]*Vdist]
Orbit[1] += atan2(Px[1], Px[0])
Orbit[0] += atan(Px[2] / sqrt(Px[0]*Px[0] + Px[1]*Px[1]))
Orbit[1] /= numobs
Orbit[0] /= numobs
Orbit0[0] = Orbit[0]
Orbit0[1] = Orbit[1]
#Iterate solution for orbital parameters and observation times
convtol = 0.001 # 1 millisecond RMS time correction
rmstime = 15.0
orbtol = 1.0
orbrss = 1000.0
first_iter = 0
logging.info('Reconstructing Orbit from View Angles')
while rmstime > convtol or orbrss > orbtol:
AngResid = 0.0
Omega0 = asin(sin(Orbit[0]) / sin(Orbit[3]))
Lon0 = Orbit[1] - asin(tan(Orbit[0]) / -tan(Orbit[3]))
A0 = numpy.matrix(numpy.zeros((4, 4)))
L0 = numpy.matrix(numpy.zeros((4, 1)))
X0 = numpy.matrix(numpy.zeros((4, 1)))
M1 = numpy.matrix(numpy.zeros((4, 1)))
BackSub = []
for los in Obs:
Vx = CalcObs(los, Orbit, Omega0, Lon0)
AngResid += Dot(Vx, Vx)
V0 = numpy.matrix(numpy.array(Vx)).reshape(3,1)
# Calculate the partial derivatives w.r.t. the orbit parameters
P0 = Partial_O(los, Orbit)
P0t = numpy.matrix(numpy.transpose(P0))
P0 = numpy.matrix(P0)
A0 = A0 + P0t * P0
L0 = L0 + P0t * V0
P1 = Partial_T(los, Orbit)
M1 = P0t * numpy.matrix(numpy.array(P1)).reshape(3,1)
A1 = 1.0 / Dot(P1, P1)
L1 = Dot(P1, Vx) * A1
M1t = A1 * M1.reshape(1,4)
A0 = A0 + M1 * M1t
L0 = L0 + M1 * L1
BackSub.append([L1, M1 * A1])
# Solve for Orbital parameter corrections
if first_iter == 0:
X0 = numpy.matrix(numpy.zeros((4, 1)))
first_iter = 1
else:
X0 = (A0**-1) * L0
# Back Substitute for Time Corrections
rmstime = 0.0
for index in range(len(Obs)):
dtime = BackSub[index][0] - Dot(BackSub[index][1], X0)
rmstime += dtime * dtime
Obs[index][6] -= dtime
# Update Orbit Parameters
Orbit[0] -= X0[0,0]
Orbit[1] -= X0[1,0]
Orbit[2] -= X0[2,0]
Orbit[3] -= X0[3,0]
# Evaluate Observation Residual RMS
AngResid = sqrt(AngResid / numobs)
# Evaluate Convergence
rmstime = sqrt(rmstime / numobs)
# Orbit Convergence
X0[0,0] *= 6378137.0
X0[1,0] *= 6378137.0
X0[3,0] *= Orbit[2]
orbrss = sqrt(X0[0,0]*X0[0,0] + X0[1,0]*X0[1,0] + X0[2,0]*X0[2,0] + X0[3,0]*X0[3,0])
logging.info('Lat = ', Orbit[0]*todeg)
logging.info('Lon = ', Orbit[1]*todeg)
logging.info('Radius = ', Orbit[2])
logging.info('Incl = ', Orbit[3]*todeg)
logging.info('RMS Orbit Fit (meters): ', orbrss)
logging.info('RMS Time Fit (seconds): ', rmstime)
logging.info('RMS LOS Residual: ', AngResid)
logging.info('Fitting Tile Observation Times')
Time_Parms = Fit_Time(ul_x, ul_y, Obs)
return (Orbit, Time_Parms)
def CalcOrbit(ltime, Orbit):
cta = Orbit[5] - 2*pi*ltime/Orbit[4]
gclat = asin(sin(cta) * sin(Orbit[3]))
gclon = Orbit[6] + asin(tan(gclat) / -tan(Orbit[3])) - 2*pi*ltime/86400
Px = [Orbit[2]*cos(gclat)*cos(gclon), Orbit[2]*cos(gclat)*sin(gclon), Orbit[2]*sin(gclat)]
return Px
#def CalcGroundVectors(AngleObs, gsd, subsamp, nrows, ncols):
# sudipta changed above to support spatial subset
def CalcGroundVectors(AngleObs, gsd, subsamp, start_row, end_row, start_col, end_col, out_rows, out_cols):
GVecs = numpy.zeros((int(out_rows), int(out_cols), 3))
ul_x = AngleObs['ul_x']
ul_y = AngleObs['ul_y']
zone = AngleObs['zone']
if AngleObs['hemis'] == 'S':
zone *= -1
#for row in range(nrows):
# sudipta changed above to support spatial subset
for row in range(int(start_row), int(end_row)):
y = ul_y - float(row * gsd * subsamp) - gsd/2.0
#for col in range(ncols):
# sudipta changed above to support spatial subset
for col in range(start_col, end_col):
x = ul_x + float(col * gsd * subsamp) + gsd/2.0
(lat, lon) = utm_inv(zone, x, y)
Gx = GrndVec(lat, lon)
GVecs[row,col,0] = Gx[0]
GVecs[row,col,1] = Gx[1]
GVecs[row,col,2] = Gx[2]
return GVecs
def WriteHeader(Out_File, out_rows, out_cols, ul_x, ul_y, gsd, zone, n_or_s):
Hdr_File = Out_File + '.hdr'
if n_or_s == 'S':
hemis = 'South'
else:
hemis = 'North'
ofile = open(Hdr_File, 'w')
ofile.write('ENVI\n')
ofile.write('description = { S2 View Angle Band File }\n')
ofile.write('lines = %d\n' % out_rows)
ofile.write('samples = %d\n' % out_cols)
ofile.write('bands = 2\n')
ofile.write('header offset = 0\n')
ofile.write('file type = ENVI Standard\n')
ofile.write('data type = 2\n')
ofile.write('interleave = bsq\n')
ofile.write('byte order = 0\n')
ofile.write('x start = 0\n')
ofile.write('y start = 0\n')
ofile.write('map info = {UTM, 1.0, 1.0, %6.3lf, %6.3lf, %6.3lf, %6.3lf, %d, %s, WGS-84, units=Meters}\n' % (ul_x, ul_y, gsd, gsd, zone, hemis))
ofile.write('band names = {Azimuth, Zenith}\n')
ofile.close()
return Hdr_File
def s2_sensor_angs(XML_File, imgref, va_path, vz_path, gsd=[60, 10, 10, 10, 20, 20, 20, 10, 20, 60, 60, 20, 20], subsamp=10):
# # Sudipta spatial subset setting
sul_lat = sul_lon = slr_lat = slr_lon = None
# Load the angle observations from the metadata
(Tile_ID, AngleObs) = get_angleobs(XML_File)
Tile_Base = Tile_ID.split('.')
logging.info('Loaded view angles from metadata for tile: ', Tile_ID)
# Reconstruct the Orbit from the Angles
(Orbit, TimeParms) = Fit_Orbit(AngleObs)
Omega0 = asin(sin(Orbit[0]) / sin(Orbit[3]))
Orbit.append(Omega0)
Lon0 = Orbit[1] - asin(tan(Orbit[0]) / -tan(Orbit[3]))
Orbit.append(Lon0)
logging.info('Orbit processing complete')
# Load the detector footprints
BandFoot = get_detfootprint(XML_File)
logging.info('Loaded detector footprints from QI files')
# Loop through the bands using TimeParms which are in band order
for tparms in TimeParms:
band = tparms['band']
if band ==3:
coeffs = tparms['tmodel']
# Set up the output array
out_rows = int(AngleObs['nrows'] * 60 / gsd[band] / subsamp)
out_cols = int(AngleObs['ncols'] * 60 / gsd[band] / subsamp)
if subsamp > 1:
out_rows += 1
out_cols += 1
#######################################################
## Sudipta addition to support spatial subset
######################################################
if (sul_lat is not None):
# Convert the spatial subset bbox lat, lon to UTM coords.
ul_sx,ul_sy,_,_ = from_latlon(sul_lat, sul_lon)
lr_sx,lr_sy,_,_ = from_latlon(slr_lat, slr_lon)
# now calculate the bbox row, col pairs
ulx = AngleObs['ul_x']
uly = AngleObs['ul_y']
ul_s_c = max(0,int((ul_sx - ulx)/gsd[band]/subsamp))
ul_s_r = max(0,int((uly - ul_sy)/gsd[band]/subsamp))
lr_s_c = min(out_cols,int((lr_sx - ulx)/gsd[band]/subsamp))
lr_s_r = min(out_rows,int((uly - lr_sy)/gsd[band]/subsamp))
else:
ul_s_r = 0
ul_s_c = 0
lr_s_r = out_rows
lr_s_c = out_cols
logging.info("ul_s_r = {}, ul_s_c = {}, lr_s_r = {}, lr_s_c = {}".format(ul_s_r, ul_s_c, lr_s_r, lr_s_c))
#sys.exit(0)
#######################################################
## Sudipta addition to support spatial subset
######################################################
#GVecs = CalcGroundVectors(AngleObs, gsd[band], subsamp, out_rows, out_cols)
# sudipta changed above to support spatial subset
GVecs = CalcGroundVectors(AngleObs, gsd[band], subsamp, ul_s_r, lr_s_r, ul_s_c, lr_s_c, out_rows, out_cols)
zenith = numpy.zeros((out_rows, out_cols))
azimuth = numpy.zeros((out_rows, out_cols))
detcount = numpy.matrix(numpy.zeros((out_rows, out_cols)))
# Find the detector footprints for this band
for foot in BandFoot:
if foot['bandId'] == band:
detId = foot['detId']
bandName = foot['bandName']
logging.info('Scanning band ', band, ' detector ', detId)
minloc = [foot['coords'][0][0], foot['coords'][0][1]]
maxloc = [foot['coords'][0][0], foot['coords'][0][1]]
for pointloc in foot['coords']:
if pointloc[0] < minloc[0]:
minloc[0] = pointloc[0]
if pointloc[0] > maxloc[0]:
maxloc[0] = pointloc[0]
if pointloc[1] < minloc[1]:
minloc[1] = pointloc[1]
if pointloc[1] > maxloc[1]:
maxloc[1] = pointloc[1]
segs = []
for index in range(len(foot['coords'])-1):
point0 = foot['coords'][index]
point1 = foot['coords'][index+1]
if point1[1] == point0[1]:
slope = 0.0
intercept = point0[0]
else:
slope = (point1[0] - point0[0]) / (point1[1] - point0[1])
intercept = point0[0] - slope * point0[1]
if point1[1] < point0[1]:
ymin = point1[1]
ymax = point0[1]
else:
ymin = point0[1]
ymax = point1[1]
segs.append({ 'y0' : point0[1], 'ymin' : ymin, 'ymax' : ymax, 'slope' : slope, 'intercept' : intercept })
# Scan the array
#for row in range(out_rows):
# sudipta changed above to support spatial subset
for row in range(ul_s_r, lr_s_r):
dy = float(row*gsd[band]*subsamp)
y = AngleObs['ul_y'] - dy - gsd[band]/2.0
if y < minloc[1] or y > maxloc[1]:
continue
xlist = []
for seg in segs:
if y == seg['y0'] or (y > seg['ymin'] and y < seg['ymax']):
x = seg['intercept'] + y * seg['slope']
xlist.append(x)
xlist.sort()
if len(xlist)%2 > 0:
logging.info('Invalid footprint intersection')
break
#for col in range(out_cols):
# sudipta changed above to support spatial subset
for col in range(ul_s_c, lr_s_c):
dx = float(col*gsd[band]*subsamp)
x = AngleObs['ul_x'] + dx + gsd[band]/2.0
if x < minloc[0] or x > maxloc[0]:
continue
# See if this point is inside the footprint
index = 0
while index < len(xlist):
if x >= xlist[index] and x < xlist[index+1]:
# It is
calctime = coeffs[detId][0] + coeffs[detId][1]*dx + coeffs[detId][2]*dy + coeffs[detId][3]*dx*dy
detcount[row,col] += 1
Px = CalcOrbit(calctime, Orbit)
Gx = [GVecs[row,col,0], GVecs[row,col,1], GVecs[row,col,2]]
Vx = [Px[0]-Gx[0], Px[1]-Gx[1], Px[2]-Gx[2]]
Vlen = Magnitude(Vx)
Vx = [Vx[0]/Vlen, Vx[1]/Vlen, Vx[2]/Vlen]
LSRz = [Gx[0]/a, Gx[1]/a, Gx[2]/b]
Vlen = sqrt(LSRz[0]*LSRz[0] + LSRz[1]*LSRz[1])
LSRx = [-LSRz[1]/Vlen, LSRz[0]/Vlen, 0.0]
LSRy = [LSRz[1]*LSRx[2]-LSRz[2]*LSRx[1], LSRz[2]*LSRx[0]-LSRz[0]*LSRx[2], LSRz[0]*LSRx[1]-LSRz[1]*LSRx[0]]
LSRVec = [Dot(Vx, LSRx), Dot(Vx, LSRy), Dot(Vx, LSRz)]
zenith[row,col] += round(acos(LSRVec[2]) * todeg * 100.0)
azimuth[row,col] += round(atan2(LSRVec[0], LSRVec[1]) * todeg * 100.0)
if detcount[row,col] > 1:
zenith[row,col] /= detcount[row,col]
azimuth[row,col] /= detcount[row,col]
index = len(xlist)
else:
index += 2
src_dataset = rasterio.open(imgref)
profile = src_dataset.profile
profile.update(nodata=-9999)
#Azimuth
azimuth = resize(azimuth,(profile['width'], profile['height']))
azimuth = (azimuth).astype(numpy.intc)
new_dataset = rasterio.open(
va_path,
'w',
driver='GTiff',
height=profile['height'],
width=profile['width'],
count=profile['count'],
dtype=numpy.intc,
crs=profile['crs'],
transform=profile['transform'],
nodata=profile['nodata'],
compress='deflate'
)
new_dataset.write(azimuth, 1)
new_dataset.close()
#Zenith
zenith = resize(zenith,(profile['width'], profile['height']))
zenith = (zenith).astype(numpy.intc)
new_dataset = rasterio.open(
vz_path,
'w',
driver='GTiff',
height=profile['height'],
width=profile['width'],
count=profile['count'],
dtype=numpy.intc,
crs=profile['crs'],
transform=profile['transform'],
nodata=profile['nodata'],
compress='deflate'
)
new_dataset.write(zenith, 1)
new_dataset.close()
return va_path, vz_path
|
[
"xml.etree.ElementTree.parse",
"numpy.matrix",
"rasterio.open",
"math.sqrt",
"math.atan2",
"math.radians",
"math.tan",
"os.path.dirname",
"numpy.zeros",
"numpy.transpose",
"math.sin",
"math.acos",
"logging.info",
"skimage.transform.resize",
"math.cos",
"numpy.array"
] |
[((901, 917), 'math.sqrt', 'math.sqrt', (['(1 - E)'], {}), '(1 - E)\n', (910, 917), False, 'import math\n'), ((1720, 1742), 'math.radians', 'math.radians', (['latitude'], {}), '(latitude)\n', (1732, 1742), False, 'import math\n'), ((1757, 1774), 'math.sin', 'math.sin', (['lat_rad'], {}), '(lat_rad)\n', (1765, 1774), False, 'import math\n'), ((1789, 1806), 'math.cos', 'math.cos', (['lat_rad'], {}), '(lat_rad)\n', (1797, 1806), False, 'import math\n'), ((2126, 2149), 'math.radians', 'math.radians', (['longitude'], {}), '(longitude)\n', (2138, 2149), False, 'import math\n'), ((2236, 2261), 'math.radians', 'math.radians', (['central_lon'], {}), '(central_lon)\n', (2248, 2261), False, 'import math\n'), ((6452, 6461), 'math.sin', 'sin', (['Phi1'], {}), '(Phi1)\n', (6455, 6461), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6477, 6486), 'math.cos', 'cos', (['Phi1'], {}), '(Phi1)\n', (6480, 6486), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((7149, 7167), 'xml.etree.ElementTree.parse', 'ET.parse', (['XML_File'], {}), '(XML_File)\n', (7157, 7167), True, 'import xml.etree.ElementTree as ET\n'), ((10390, 10415), 'os.path.dirname', 'os.path.dirname', (['XML_File'], {}), '(XML_File)\n', (10405, 10415), False, 'import os\n'), ((10481, 10499), 'xml.etree.ElementTree.parse', 'ET.parse', (['XML_File'], {}), '(XML_File)\n', (10489, 10499), True, 'import xml.etree.ElementTree as ET\n'), ((14029, 14074), 'math.sqrt', 'sqrt', (['(A[0] * A[0] + A[1] * A[1] + A[2] * A[2])'], {}), '(A[0] * A[0] + A[1] * A[1] + A[2] * A[2])\n', (14033, 14074), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14742, 14761), 'numpy.zeros', 'numpy.zeros', (['(3, 4)'], {}), '((3, 4))\n', (14753, 14761), False, 'import numpy\n'), ((20207, 20260), 'logging.info', 'logging.info', (['"""Reconstructing Orbit from View Angles"""'], {}), "('Reconstructing Orbit from View Angles')\n", (20219, 20260), False, 'import logging\n'), ((22371, 22414), 'logging.info', 'logging.info', (['"""Lat = """', '(Orbit[0] * todeg)'], {}), "('Lat = ', Orbit[0] * todeg)\n", (22383, 22414), False, 'import logging\n'), ((22417, 22460), 'logging.info', 'logging.info', (['"""Lon = """', '(Orbit[1] * todeg)'], {}), "('Lon = ', Orbit[1] * todeg)\n", (22429, 22460), False, 'import logging\n'), ((22463, 22498), 'logging.info', 'logging.info', (['"""Radius = """', 'Orbit[2]'], {}), "('Radius = ', Orbit[2])\n", (22475, 22498), False, 'import logging\n'), ((22503, 22546), 'logging.info', 'logging.info', (['"""Incl = """', '(Orbit[3] * todeg)'], {}), "('Incl = ', Orbit[3] * todeg)\n", (22515, 22546), False, 'import logging\n'), ((22549, 22597), 'logging.info', 'logging.info', (['"""RMS Orbit Fit (meters): """', 'orbrss'], {}), "('RMS Orbit Fit (meters): ', orbrss)\n", (22561, 22597), False, 'import logging\n'), ((22602, 22651), 'logging.info', 'logging.info', (['"""RMS Time Fit (seconds): """', 'rmstime'], {}), "('RMS Time Fit (seconds): ', rmstime)\n", (22614, 22651), False, 'import logging\n'), ((22656, 22700), 'logging.info', 'logging.info', (['"""RMS LOS Residual: """', 'AngResid'], {}), "('RMS LOS Residual: ', AngResid)\n", (22668, 22700), False, 'import logging\n'), ((22706, 22752), 'logging.info', 'logging.info', (['"""Fitting Tile Observation Times"""'], {}), "('Fitting Tile Observation Times')\n", (22718, 22752), False, 'import logging\n'), ((25432, 25500), 'logging.info', 'logging.info', (['"""Loaded view angles from metadata for tile: """', 'Tile_ID'], {}), "('Loaded view angles from metadata for tile: ', Tile_ID)\n", (25444, 25500), False, 'import logging\n'), ((25752, 25793), 'logging.info', 'logging.info', (['"""Orbit processing complete"""'], {}), "('Orbit processing complete')\n", (25764, 25793), False, 'import logging\n'), ((25876, 25932), 'logging.info', 'logging.info', (['"""Loaded detector footprints from QI files"""'], {}), "('Loaded detector footprints from QI files')\n", (25888, 25932), False, 'import logging\n'), ((2275, 2306), 'math.sqrt', 'math.sqrt', (['(1 - E * lat_sin ** 2)'], {}), '(1 - E * lat_sin ** 2)\n', (2284, 2306), False, 'import math\n'), ((4374, 4382), 'math.cos', 'cos', (['Lon'], {}), '(Lon)\n', (4377, 4382), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4441, 4449), 'math.cos', 'cos', (['Lat'], {}), '(Lat)\n', (4444, 4449), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4501, 4509), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (4504, 4509), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4558, 4566), 'math.cos', 'cos', (['Zen'], {}), '(Zen)\n', (4561, 4566), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6503, 6532), 'math.sqrt', 'sqrt', (['(1.0 - ecc * slat * slat)'], {}), '(1.0 - ecc * slat * slat)\n', (6507, 6532), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((11201, 11218), 'xml.etree.ElementTree.parse', 'ET.parse', (['foot[1]'], {}), '(foot[1])\n', (11209, 11218), True, 'import xml.etree.ElementTree as ET\n'), ((19672, 19725), 'math.sqrt', 'sqrt', (['(Orbit[2] * Orbit[2] + Vdot * Vdot - Gmag * Gmag)'], {}), '(Orbit[2] * Orbit[2] + Vdot * Vdot - Gmag * Gmag)\n', (19676, 19725), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((19814, 19833), 'math.atan2', 'atan2', (['Px[1]', 'Px[0]'], {}), '(Px[1], Px[0])\n', (19819, 19833), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((22063, 22086), 'math.sqrt', 'sqrt', (['(AngResid / numobs)'], {}), '(AngResid / numobs)\n', (22067, 22086), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((22136, 22158), 'math.sqrt', 'sqrt', (['(rmstime / numobs)'], {}), '(rmstime / numobs)\n', (22140, 22158), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((22290, 22385), 'math.sqrt', 'sqrt', (['(X0[0, 0] * X0[0, 0] + X0[1, 0] * X0[1, 0] + X0[2, 0] * X0[2, 0] + X0[3, 0] *\n X0[3, 0])'], {}), '(X0[0, 0] * X0[0, 0] + X0[1, 0] * X0[1, 0] + X0[2, 0] * X0[2, 0] + X0[3,\n 0] * X0[3, 0])\n', (22294, 22385), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4364, 4372), 'math.sin', 'sin', (['Lon'], {}), '(Lon)\n', (4367, 4372), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4411, 4419), 'math.cos', 'cos', (['Lon'], {}), '(Lon)\n', (4414, 4419), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4431, 4439), 'math.sin', 'sin', (['Lon'], {}), '(Lon)\n', (4434, 4439), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4463, 4471), 'math.cos', 'cos', (['Lat'], {}), '(Lat)\n', (4466, 4471), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4472, 4480), 'math.cos', 'cos', (['Lon'], {}), '(Lon)\n', (4475, 4480), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4482, 4490), 'math.cos', 'cos', (['Lat'], {}), '(Lat)\n', (4485, 4490), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4491, 4499), 'math.sin', 'sin', (['Lon'], {}), '(Lon)\n', (4494, 4499), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4522, 4530), 'math.sin', 'sin', (['Zen'], {}), '(Zen)\n', (4525, 4530), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4531, 4538), 'math.sin', 'sin', (['Az'], {}), '(Az)\n', (4534, 4538), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4540, 4548), 'math.sin', 'sin', (['Zen'], {}), '(Zen)\n', (4543, 4548), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4549, 4556), 'math.cos', 'cos', (['Az'], {}), '(Az)\n', (4552, 4556), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4823, 4831), 'math.cos', 'cos', (['Lon'], {}), '(Lon)\n', (4826, 4831), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4857, 4865), 'math.sin', 'sin', (['Lon'], {}), '(Lon)\n', (4860, 4865), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4890, 4898), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (4893, 4898), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((5015, 5023), 'math.cos', 'cos', (['Lon'], {}), '(Lon)\n', (5018, 5023), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((5049, 5057), 'math.sin', 'sin', (['Lon'], {}), '(Lon)\n', (5052, 5057), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((5082, 5090), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (5085, 5090), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6172, 6185), 'math.sqrt', 'sqrt', (['(1 - ecc)'], {}), '(1 - ecc)\n', (6176, 6185), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6190, 6205), 'math.sqrt', 'sqrt', (['(1.0 - ecc)'], {}), '(1.0 - ecc)\n', (6194, 6205), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14309, 14317), 'math.sin', 'sin', (['cta'], {}), '(cta)\n', (14312, 14317), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14320, 14333), 'math.sin', 'sin', (['Orbit[3]'], {}), '(Orbit[3])\n', (14323, 14333), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14443, 14453), 'math.cos', 'cos', (['gclon'], {}), '(gclon)\n', (14446, 14453), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14498, 14508), 'math.sin', 'sin', (['gclon'], {}), '(gclon)\n', (14501, 14508), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14540, 14550), 'math.sin', 'sin', (['gclat'], {}), '(gclat)\n', (14543, 14550), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14779, 14792), 'math.sin', 'sin', (['Orbit[0]'], {}), '(Orbit[0])\n', (14782, 14792), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14795, 14808), 'math.sin', 'sin', (['Orbit[3]'], {}), '(Orbit[3])\n', (14798, 14808), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((15531, 15544), 'math.sin', 'sin', (['Orbit[0]'], {}), '(Orbit[0])\n', (15534, 15544), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((15547, 15560), 'math.sin', 'sin', (['Orbit[3]'], {}), '(Orbit[3])\n', (15550, 15560), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((18701, 18722), 'math.sqrt', 'sqrt', (['(rmsfit / numobs)'], {}), '(rmsfit / numobs)\n', (18705, 18722), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((18735, 18806), 'logging.info', 'logging.info', (['"""Time fit for band """', 'band', '""" RMS = """', 'rmsfit', '""" seconds"""'], {}), "('Time fit for band ', band, ' RMS = ', rmsfit, ' seconds')\n", (18747, 18806), False, 'import logging\n'), ((20474, 20493), 'numpy.zeros', 'numpy.zeros', (['(4, 4)'], {}), '((4, 4))\n', (20485, 20493), False, 'import numpy\n'), ((20521, 20540), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (20532, 20540), False, 'import numpy\n'), ((20568, 20587), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (20579, 20587), False, 'import numpy\n'), ((20615, 20634), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (20626, 20634), False, 'import numpy\n'), ((21012, 21028), 'numpy.matrix', 'numpy.matrix', (['P0'], {}), '(P0)\n', (21024, 21028), False, 'import numpy\n'), ((22918, 22926), 'math.sin', 'sin', (['cta'], {}), '(cta)\n', (22921, 22926), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((22929, 22942), 'math.sin', 'sin', (['Orbit[3]'], {}), '(Orbit[3])\n', (22932, 22942), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((23050, 23060), 'math.cos', 'cos', (['gclon'], {}), '(gclon)\n', (23053, 23060), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((23082, 23092), 'math.sin', 'sin', (['gclon'], {}), '(gclon)\n', (23085, 23092), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((23103, 23113), 'math.sin', 'sin', (['gclat'], {}), '(gclat)\n', (23106, 23113), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((25609, 25622), 'math.sin', 'sin', (['Orbit[0]'], {}), '(Orbit[0])\n', (25612, 25622), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((25625, 25638), 'math.sin', 'sin', (['Orbit[3]'], {}), '(Orbit[3])\n', (25628, 25638), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((28022, 28055), 'numpy.zeros', 'numpy.zeros', (['(out_rows, out_cols)'], {}), '((out_rows, out_cols))\n', (28033, 28055), False, 'import numpy\n'), ((28078, 28111), 'numpy.zeros', 'numpy.zeros', (['(out_rows, out_cols)'], {}), '((out_rows, out_cols))\n', (28089, 28111), False, 'import numpy\n'), ((33259, 33280), 'rasterio.open', 'rasterio.open', (['imgref'], {}), '(imgref)\n', (33272, 33280), False, 'import rasterio\n'), ((33409, 33463), 'skimage.transform.resize', 'resize', (['azimuth', "(profile['width'], profile['height'])"], {}), "(azimuth, (profile['width'], profile['height']))\n", (33415, 33463), False, 'from skimage.transform import resize\n'), ((33541, 33789), 'rasterio.open', 'rasterio.open', (['va_path', '"""w"""'], {'driver': '"""GTiff"""', 'height': "profile['height']", 'width': "profile['width']", 'count': "profile['count']", 'dtype': 'numpy.intc', 'crs': "profile['crs']", 'transform': "profile['transform']", 'nodata': "profile['nodata']", 'compress': '"""deflate"""'}), "(va_path, 'w', driver='GTiff', height=profile['height'], width\n =profile['width'], count=profile['count'], dtype=numpy.intc, crs=\n profile['crs'], transform=profile['transform'], nodata=profile['nodata'\n ], compress='deflate')\n", (33554, 33789), False, 'import rasterio\n'), ((34081, 34134), 'skimage.transform.resize', 'resize', (['zenith', "(profile['width'], profile['height'])"], {}), "(zenith, (profile['width'], profile['height']))\n", (34087, 34134), False, 'from skimage.transform import resize\n'), ((34210, 34458), 'rasterio.open', 'rasterio.open', (['vz_path', '"""w"""'], {'driver': '"""GTiff"""', 'height': "profile['height']", 'width': "profile['width']", 'count': "profile['count']", 'dtype': 'numpy.intc', 'crs': "profile['crs']", 'transform': "profile['transform']", 'nodata': "profile['nodata']", 'compress': '"""deflate"""'}), "(vz_path, 'w', driver='GTiff', height=profile['height'], width\n =profile['width'], count=profile['count'], dtype=numpy.intc, crs=\n profile['crs'], transform=profile['transform'], nodata=profile['nodata'\n ], compress='deflate')\n", (34223, 34458), False, 'import rasterio\n'), ((2588, 2609), 'math.sin', 'math.sin', (['(6 * lat_rad)'], {}), '(6 * lat_rad)\n', (2596, 2609), False, 'import math\n'), ((4402, 4410), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (4405, 4410), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4422, 4430), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (4425, 4430), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4814, 4822), 'math.cos', 'cos', (['Lat'], {}), '(Lat)\n', (4817, 4822), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4848, 4856), 'math.cos', 'cos', (['Lat'], {}), '(Lat)\n', (4851, 4856), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((5006, 5014), 'math.cos', 'cos', (['Lat'], {}), '(Lat)\n', (5009, 5014), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((5040, 5048), 'math.cos', 'cos', (['Lat'], {}), '(Lat)\n', (5043, 5048), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6037, 6057), 'math.sin', 'sin', (['(6.0 * LatOrigin)'], {}), '(6.0 * LatOrigin)\n', (6040, 6057), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6424, 6437), 'math.sin', 'sin', (['(8.0 * Mu)'], {}), '(8.0 * Mu)\n', (6427, 6437), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14430, 14440), 'math.cos', 'cos', (['gclat'], {}), '(gclat)\n', (14433, 14440), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14485, 14495), 'math.cos', 'cos', (['gclat'], {}), '(gclat)\n', (14488, 14495), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14837, 14850), 'math.tan', 'tan', (['Orbit[0]'], {}), '(Orbit[0])\n', (14840, 14850), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((15121, 15133), 'math.sin', 'sin', (['POrb[0]'], {}), '(POrb[0])\n', (15124, 15133), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((15136, 15148), 'math.sin', 'sin', (['POrb[3]'], {}), '(POrb[3])\n', (15139, 15148), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((15589, 15602), 'math.tan', 'tan', (['Orbit[0]'], {}), '(Orbit[0])\n', (15592, 15602), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((16083, 16102), 'numpy.zeros', 'numpy.zeros', (['(4, 4)'], {}), '((4, 4))\n', (16094, 16102), False, 'import numpy\n'), ((16134, 16153), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (16145, 16153), False, 'import numpy\n'), ((16185, 16204), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (16196, 16204), False, 'import numpy\n'), ((19867, 19902), 'math.sqrt', 'sqrt', (['(Px[0] * Px[0] + Px[1] * Px[1])'], {}), '(Px[0] * Px[0] + Px[1] * Px[1])\n', (19871, 19902), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((20354, 20367), 'math.sin', 'sin', (['Orbit[0]'], {}), '(Orbit[0])\n', (20357, 20367), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((20370, 20383), 'math.sin', 'sin', (['Orbit[3]'], {}), '(Orbit[3])\n', (20373, 20383), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((20974, 20993), 'numpy.transpose', 'numpy.transpose', (['P0'], {}), '(P0)\n', (20989, 20993), False, 'import numpy\n'), ((21515, 21534), 'numpy.zeros', 'numpy.zeros', (['(4, 1)'], {}), '((4, 1))\n', (21526, 21534), False, 'import numpy\n'), ((23039, 23049), 'math.cos', 'cos', (['gclat'], {}), '(gclat)\n', (23042, 23049), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((23071, 23081), 'math.cos', 'cos', (['gclat'], {}), '(gclat)\n', (23074, 23081), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((25692, 25705), 'math.tan', 'tan', (['Orbit[0]'], {}), '(Orbit[0])\n', (25695, 25705), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((28148, 28181), 'numpy.zeros', 'numpy.zeros', (['(out_rows, out_cols)'], {}), '((out_rows, out_cols))\n', (28159, 28181), False, 'import numpy\n'), ((2546, 2567), 'math.sin', 'math.sin', (['(4 * lat_rad)'], {}), '(4 * lat_rad)\n', (2554, 2567), False, 'import math\n'), ((4791, 4799), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (4794, 4799), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4983, 4991), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (4986, 4991), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((5978, 5998), 'math.sin', 'sin', (['(4.0 * LatOrigin)'], {}), '(4.0 * LatOrigin)\n', (5981, 5998), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6367, 6380), 'math.sin', 'sin', (['(6.0 * Mu)'], {}), '(6.0 * Mu)\n', (6370, 6380), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14359, 14369), 'math.tan', 'tan', (['gclat'], {}), '(gclat)\n', (14362, 14369), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14854, 14867), 'math.tan', 'tan', (['Orbit[3]'], {}), '(Orbit[3])\n', (14857, 14867), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((15180, 15192), 'math.tan', 'tan', (['POrb[0]'], {}), '(POrb[0])\n', (15183, 15192), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((15606, 15619), 'math.tan', 'tan', (['Orbit[3]'], {}), '(Orbit[3])\n', (15609, 15619), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((20416, 20429), 'math.tan', 'tan', (['Orbit[0]'], {}), '(Orbit[0])\n', (20419, 20429), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((22972, 22982), 'math.tan', 'tan', (['gclat'], {}), '(gclat)\n', (22975, 22982), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((25709, 25722), 'math.tan', 'tan', (['Orbit[3]'], {}), '(Orbit[3])\n', (25712, 25722), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((28427, 28484), 'logging.info', 'logging.info', (['"""Scanning band """', 'band', '""" detector """', 'detId'], {}), "('Scanning band ', band, ' detector ', detId)\n", (28439, 28484), False, 'import logging\n'), ((2504, 2525), 'math.sin', 'math.sin', (['(2 * lat_rad)'], {}), '(2 * lat_rad)\n', (2512, 2525), False, 'import math\n'), ((4782, 4790), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (4785, 4790), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((4974, 4982), 'math.sin', 'sin', (['Lat'], {}), '(Lat)\n', (4977, 4982), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((5906, 5926), 'math.sin', 'sin', (['(2.0 * LatOrigin)'], {}), '(2.0 * LatOrigin)\n', (5909, 5926), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6249, 6262), 'math.sin', 'sin', (['(2.0 * Mu)'], {}), '(2.0 * Mu)\n', (6252, 6262), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((6315, 6328), 'math.sin', 'sin', (['(4.0 * Mu)'], {}), '(4.0 * Mu)\n', (6318, 6328), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((14373, 14386), 'math.tan', 'tan', (['Orbit[3]'], {}), '(Orbit[3])\n', (14376, 14386), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((15196, 15208), 'math.tan', 'tan', (['POrb[3]'], {}), '(POrb[3])\n', (15199, 15208), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((20433, 20446), 'math.tan', 'tan', (['Orbit[3]'], {}), '(Orbit[3])\n', (20436, 20446), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((20798, 20813), 'numpy.array', 'numpy.array', (['Vx'], {}), '(Vx)\n', (20809, 20813), False, 'import numpy\n'), ((22986, 22999), 'math.tan', 'tan', (['Orbit[3]'], {}), '(Orbit[3])\n', (22989, 22999), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((21166, 21181), 'numpy.array', 'numpy.array', (['P1'], {}), '(P1)\n', (21177, 21181), False, 'import numpy\n'), ((30848, 30894), 'logging.info', 'logging.info', (['"""Invalid footprint intersection"""'], {}), "('Invalid footprint intersection')\n", (30860, 30894), False, 'import logging\n'), ((32321, 32364), 'math.sqrt', 'sqrt', (['(LSRz[0] * LSRz[0] + LSRz[1] * LSRz[1])'], {}), '(LSRz[0] * LSRz[0] + LSRz[1] * LSRz[1])\n', (32325, 32364), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((32734, 32749), 'math.acos', 'acos', (['LSRVec[2]'], {}), '(LSRVec[2])\n', (32738, 32749), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n'), ((32830, 32857), 'math.atan2', 'atan2', (['LSRVec[0]', 'LSRVec[1]'], {}), '(LSRVec[0], LSRVec[1])\n', (32835, 32857), False, 'from math import sqrt, cos, sin, tan, pi, asin, acos, atan, atan2\n')]
|
import torch
from typing import Optional, List
from PIL import Image
from torch import Tensor
import torchvision as tv
import cv2
import json
import os
import numpy as np
MAX_DIM = 299
def read_json(file_name):
with open(file_name) as handle:
out = json.load(handle)
return out
def nested_tensor_from_tensor_list(tensor_list: List[Tensor], img_width, img_height):
# TODO make this more general
if tensor_list[0].ndim == 3:
# TODO make it support different-sized images
max_size = [1, img_height, img_width]
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask)
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
val_transform = tv.transforms.Compose([
tv.transforms.Resize(299),
tv.transforms.ToTensor(),
tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
def padding_image(image_path, config):
with Image.open(os.path.join(image_path)) as img:
img = img.convert('RGB')
width, height = img.size
# Resize image
n_width, n_height = int(width / 2), int(height / 2)
resized_img = img.resize((n_width, n_height), Image.ANTIALIAS)
padding_img = Image.new('RGB', (config.max_img_w, config.max_img_h), (0, 0, 0))
padding_img.paste(resized_img)
return padding_img
def padding_image_v2(img, expected_size):
img = img.convert('L')
original_w, original_h = img.size
expected_w, expected_h = expected_size
ratio_w, ratio_h = expected_w / original_w, expected_h / original_h
if ratio_w < ratio_h:
new_w, new_h = expected_w, original_h * ratio_w
else:
new_w, new_h = original_w * ratio_h, expected_h
img = img.resize((int(new_w), int(new_h)), Image.ANTIALIAS)
padding_img = Image.new('RGB', expected_size, (0, 0, 0))
padding_img.paste(img)
return padding_img
def resize_filling(image, new_size, color=None):
n_width, n_height = new_size
height, width = image.shape[:2]
ratio_w = n_width / width
ratio_h = n_height / height
ratio = min(ratio_h, ratio_w)
image = cv2.resize(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA)
height, width = image.shape[:2]
blank_image = np.zeros((n_height, n_width, 3), np.uint8)
if color is None:
color = bincount_app(image)
lower = np.array([color[0] - 20, color[1] - 20, color[2] - 20])
upper = np.array([color[0] + 20, color[1] + 20, color[2] + 20])
mask = cv2.inRange(image, lower, upper)
masked_image = np.copy(image)
masked_image[mask != 0] = color
# img_bw = 255 * (cv2.cvtColor(masked_image, cv2.COLOR_BGR2GRAY) > 10).astype('uint8')
#
# se1 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
# se2 = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
# mask = cv2.morphologyEx(img_bw, cv2.MORPH_CLOSE, se1)
# mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, se2)
#
# mask = np.dstack([mask, mask, mask]) / 255
# out = masked_image * mask
#
blank_image[:] = color
x_offset, y_offset = int((n_width - width) / 2), int((n_height - height) / 2)
# Here, y_offset+height <= blank_image.shape[0] and x_offset+width <= blank_image.shape[1]
blank_image[y_offset:y_offset + height, x_offset:x_offset + width] = masked_image.copy()
# plt.figure()
# plt.imshow(blank_image)
#
# plt.axis('off')
# plt.ioff()
# # plt.pause(0.05)
# # plt.clf()
# plt.show()
return blank_image
def bincount_app(a):
image_to_array = np.array(a)
a2D = image_to_array.reshape(-1, image_to_array.shape[-1])
col_range = (256, 256, 256) # generically : a2D.max(0)+1
a1D = np.ravel_multi_index(a2D.T, col_range)
return np.unravel_index(np.bincount(a1D).argmax(), col_range)
class AddGaussianNoise(object):
def __init__(self, mean=0., std=1.):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
|
[
"torch.ones",
"PIL.Image.new",
"json.load",
"os.path.join",
"numpy.copy",
"numpy.zeros",
"numpy.bincount",
"torchvision.transforms.ToTensor",
"numpy.array",
"numpy.ravel_multi_index",
"torch.zeros",
"torchvision.transforms.Normalize",
"cv2.inRange",
"cv2.resize",
"torchvision.transforms.Resize"
] |
[((2927, 2969), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'expected_size', '(0, 0, 0)'], {}), "('RGB', expected_size, (0, 0, 0))\n", (2936, 2969), False, 'from PIL import Image\n'), ((3248, 3323), 'cv2.resize', 'cv2.resize', (['image', '(0, 0)'], {'fx': 'ratio', 'fy': 'ratio', 'interpolation': 'cv2.INTER_AREA'}), '(image, (0, 0), fx=ratio, fy=ratio, interpolation=cv2.INTER_AREA)\n', (3258, 3323), False, 'import cv2\n'), ((3378, 3420), 'numpy.zeros', 'np.zeros', (['(n_height, n_width, 3)', 'np.uint8'], {}), '((n_height, n_width, 3), np.uint8)\n', (3386, 3420), True, 'import numpy as np\n'), ((3491, 3546), 'numpy.array', 'np.array', (['[color[0] - 20, color[1] - 20, color[2] - 20]'], {}), '([color[0] - 20, color[1] - 20, color[2] - 20])\n', (3499, 3546), True, 'import numpy as np\n'), ((3559, 3614), 'numpy.array', 'np.array', (['[color[0] + 20, color[1] + 20, color[2] + 20]'], {}), '([color[0] + 20, color[1] + 20, color[2] + 20])\n', (3567, 3614), True, 'import numpy as np\n'), ((3626, 3658), 'cv2.inRange', 'cv2.inRange', (['image', 'lower', 'upper'], {}), '(image, lower, upper)\n', (3637, 3658), False, 'import cv2\n'), ((3678, 3692), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (3685, 3692), True, 'import numpy as np\n'), ((4679, 4690), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (4687, 4690), True, 'import numpy as np\n'), ((4825, 4863), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['a2D.T', 'col_range'], {}), '(a2D.T, col_range)\n', (4845, 4863), True, 'import numpy as np\n'), ((264, 281), 'json.load', 'json.load', (['handle'], {}), '(handle)\n', (273, 281), False, 'import json\n'), ((815, 867), 'torch.zeros', 'torch.zeros', (['batch_shape'], {'dtype': 'dtype', 'device': 'device'}), '(batch_shape, dtype=dtype, device=device)\n', (826, 867), False, 'import torch\n'), ((883, 937), 'torch.ones', 'torch.ones', (['(b, h, w)'], {'dtype': 'torch.bool', 'device': 'device'}), '((b, h, w), dtype=torch.bool, device=device)\n', (893, 937), False, 'import torch\n'), ((1881, 1906), 'torchvision.transforms.Resize', 'tv.transforms.Resize', (['(299)'], {}), '(299)\n', (1901, 1906), True, 'import torchvision as tv\n'), ((1912, 1936), 'torchvision.transforms.ToTensor', 'tv.transforms.ToTensor', ([], {}), '()\n', (1934, 1936), True, 'import torchvision as tv\n'), ((1942, 1999), 'torchvision.transforms.Normalize', 'tv.transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1965, 1999), True, 'import torchvision as tv\n'), ((2341, 2406), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(config.max_img_w, config.max_img_h)', '(0, 0, 0)'], {}), "('RGB', (config.max_img_w, config.max_img_h), (0, 0, 0))\n", (2350, 2406), False, 'from PIL import Image\n'), ((2065, 2089), 'os.path.join', 'os.path.join', (['image_path'], {}), '(image_path)\n', (2077, 2089), False, 'import os\n'), ((4892, 4908), 'numpy.bincount', 'np.bincount', (['a1D'], {}), '(a1D)\n', (4903, 4908), True, 'import numpy as np\n')]
|
"""
Created on Wed Jun 17 14:01:23 2020
Calculate graph properties
@author: Jyotika.bahuguna
"""
import os
import glob
import numpy as np
import pylab as pl
import scipy.io as sio
from copy import copy, deepcopy
import pickle
import matplotlib.cm as cm
import pdb
import h5py
import pandas as pd
import bct
from collections import Counter
import seaborn as sns
import scipy.spatial.distance as sp_sp_dist
from itertools import product
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import scipy.cluster.hierarchy as shc
import sklearn as skl
from sklearn.cluster import KMeans
gammas = np.round(np.arange(0.0,1.5,0.17),2)
def calc_modularity(mat,weighted=True,undirected=True):
gammas = np.arange(0.0,1.5,0.17)
num_mods_list = []
modularity_index_list = []
ci_list = [] # community vector list
for g in gammas:
if undirected == True:
mod = bct.modularity_louvain_und_sign(mat,gamma=g)
else:
mod = bct.modularity.modularity_louvain_dir(mat,gamma=g)
num_mods = [Counter(mod[0])[x] for x in Counter(mod[0]).keys() if Counter(mod[0])[x] > 1 ]
ind_mods = [np.where(mod[0]==x)[0] for x in Counter(mod[0]).keys() if Counter(mod[0])[x] > 1 ]
modularity_index = mod[1]
num_mods_list.append(num_mods)
modularity_index_list.append(modularity_index)
ci_list.append(mod[0])
return gammas, num_mods_list, modularity_index_list,ci_list
def calc_local_assortativity_sign(mat):
loc_pos, loc_neg = bct.local_assortativity_wu_sign(mat)
return loc_pos, loc_neg
def calc_module_degree_zscore(mat,ci,undirected=True,median=True):
if undirected == True:
zscore = bct.centrality.module_degree_zscore(mat,ci,0) # undirected
else:
zscore = bct.centrality.module_degree_zscore(mat,ci,3) # directed graph in and out degree
if median == True:
return np.median(zscore)
else:
return zscore
# Participation coefficient is a measure of diversity of intermodular connections of individual nodes
# Ppos (Nx1 numpy.ndarray) – participation coefficient from positive weights
# Pneg (Nx1 numpy.ndarray) – participation coefficient from negative weights
def calc_participation_coef_sign(mat,ci_list,median=True,undirected=True):
if undirected == True:
med_participation_pos = []
med_participation_neg = []
for ci in ci_list:
part = bct.participation_coef_sign(mat,ci)
if median == True:
med_participation_pos.append(np.median(part[0]))
med_participation_neg.append(np.median(part[1]))
else:
med_participation_pos.append(part[0])
med_participation_neg.append(part[1])
return med_participation_pos, med_participation_neg
else:
part = bct.centrality.participation_coef(mat,ci_list,degree='out')
return part
def get_re_arranged_matrix(label_comms,orig_mat):
re_arranged_mat = np.copy(orig_mat)
idx = np.argsort(label_comms)
re_arranged_mat = re_arranged_mat[idx,:]
re_arranged_mat = re_arranged_mat[:,idx]
return re_arranged_mat
|
[
"bct.centrality.module_degree_zscore",
"numpy.copy",
"numpy.median",
"bct.modularity_louvain_und_sign",
"bct.participation_coef_sign",
"bct.centrality.participation_coef",
"numpy.argsort",
"numpy.where",
"numpy.arange",
"bct.local_assortativity_wu_sign",
"collections.Counter",
"bct.modularity.modularity_louvain_dir"
] |
[((686, 711), 'numpy.arange', 'np.arange', (['(0.0)', '(1.5)', '(0.17)'], {}), '(0.0, 1.5, 0.17)\n', (695, 711), True, 'import numpy as np\n'), ((784, 809), 'numpy.arange', 'np.arange', (['(0.0)', '(1.5)', '(0.17)'], {}), '(0.0, 1.5, 0.17)\n', (793, 809), True, 'import numpy as np\n'), ((1595, 1631), 'bct.local_assortativity_wu_sign', 'bct.local_assortativity_wu_sign', (['mat'], {}), '(mat)\n', (1626, 1631), False, 'import bct\n'), ((3079, 3096), 'numpy.copy', 'np.copy', (['orig_mat'], {}), '(orig_mat)\n', (3086, 3096), True, 'import numpy as np\n'), ((3107, 3130), 'numpy.argsort', 'np.argsort', (['label_comms'], {}), '(label_comms)\n', (3117, 3130), True, 'import numpy as np\n'), ((1774, 1821), 'bct.centrality.module_degree_zscore', 'bct.centrality.module_degree_zscore', (['mat', 'ci', '(0)'], {}), '(mat, ci, 0)\n', (1809, 1821), False, 'import bct\n'), ((1860, 1907), 'bct.centrality.module_degree_zscore', 'bct.centrality.module_degree_zscore', (['mat', 'ci', '(3)'], {}), '(mat, ci, 3)\n', (1895, 1907), False, 'import bct\n'), ((1980, 1997), 'numpy.median', 'np.median', (['zscore'], {}), '(zscore)\n', (1989, 1997), True, 'import numpy as np\n'), ((2920, 2981), 'bct.centrality.participation_coef', 'bct.centrality.participation_coef', (['mat', 'ci_list'], {'degree': '"""out"""'}), "(mat, ci_list, degree='out')\n", (2953, 2981), False, 'import bct\n'), ((975, 1020), 'bct.modularity_louvain_und_sign', 'bct.modularity_louvain_und_sign', (['mat'], {'gamma': 'g'}), '(mat, gamma=g)\n', (1006, 1020), False, 'import bct\n'), ((1052, 1103), 'bct.modularity.modularity_louvain_dir', 'bct.modularity.modularity_louvain_dir', (['mat'], {'gamma': 'g'}), '(mat, gamma=g)\n', (1089, 1103), False, 'import bct\n'), ((2511, 2547), 'bct.participation_coef_sign', 'bct.participation_coef_sign', (['mat', 'ci'], {}), '(mat, ci)\n', (2538, 2547), False, 'import bct\n'), ((1123, 1138), 'collections.Counter', 'Counter', (['mod[0]'], {}), '(mod[0])\n', (1130, 1138), False, 'from collections import Counter\n'), ((1223, 1244), 'numpy.where', 'np.where', (['(mod[0] == x)'], {}), '(mod[0] == x)\n', (1231, 1244), True, 'import numpy as np\n'), ((2623, 2641), 'numpy.median', 'np.median', (['part[0]'], {}), '(part[0])\n', (2632, 2641), True, 'import numpy as np\n'), ((2688, 2706), 'numpy.median', 'np.median', (['part[1]'], {}), '(part[1])\n', (2697, 2706), True, 'import numpy as np\n'), ((1152, 1167), 'collections.Counter', 'Counter', (['mod[0]'], {}), '(mod[0])\n', (1159, 1167), False, 'from collections import Counter\n'), ((1178, 1193), 'collections.Counter', 'Counter', (['mod[0]'], {}), '(mod[0])\n', (1185, 1193), False, 'from collections import Counter\n'), ((1256, 1271), 'collections.Counter', 'Counter', (['mod[0]'], {}), '(mod[0])\n', (1263, 1271), False, 'from collections import Counter\n'), ((1282, 1297), 'collections.Counter', 'Counter', (['mod[0]'], {}), '(mod[0])\n', (1289, 1297), False, 'from collections import Counter\n')]
|
from abc import ABCMeta, abstractmethod
import numpy as np
from core.net_errors import NetIsNotInitialized, NetIsNotCalculated
class Corrector:
__metaclass__ = ABCMeta
def __init__(self, nu):
self.nu = nu
@abstractmethod
def initialize(self, net_object):
if net_object.net[-1].get('s') is None:
for l in range(1, len(net_object.net)):
net_object.net[l]['s'] = np.zeros((net_object.config[l]))
@abstractmethod
def correct(self, net_object, output_vector):
if net_object.net is None:
raise NetIsNotInitialized()
if not net_object.is_calculated:
raise NetIsNotCalculated()
self.initialize(net_object)
|
[
"core.net_errors.NetIsNotInitialized",
"numpy.zeros",
"core.net_errors.NetIsNotCalculated"
] |
[((583, 604), 'core.net_errors.NetIsNotInitialized', 'NetIsNotInitialized', ([], {}), '()\n', (602, 604), False, 'from core.net_errors import NetIsNotInitialized, NetIsNotCalculated\n'), ((664, 684), 'core.net_errors.NetIsNotCalculated', 'NetIsNotCalculated', ([], {}), '()\n', (682, 684), False, 'from core.net_errors import NetIsNotInitialized, NetIsNotCalculated\n'), ((426, 456), 'numpy.zeros', 'np.zeros', (['net_object.config[l]'], {}), '(net_object.config[l])\n', (434, 456), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import *
from keras.models import load_model
import matplotlib.pyplot as plt
#################################################################
### Generate Data ###############################################
#################################################################
# generate training data
x = np.linspace(0.0,2*np.pi,20)
y = np.sin(x)
# save training data to file
data = np.vstack((x,y)).T
np.savetxt('train_data.csv',data,header='x,y',comments='',delimiter=',')
# generate test data
x = np.linspace(0.0,2*np.pi,100)
y = np.sin(x)
# save test data to file
data = np.vstack((x,y)).T
np.savetxt('test_data.csv',data,header='x,y',comments='',delimiter=',')
#################################################################
### Scale data ##################################################
#################################################################
# load training and test data with pandas
train_df = pd.read_csv('train_data.csv')
test_df = pd.read_csv('test_data.csv')
# scale values to 0 to 1 for the ANN to work well
s = MinMaxScaler(feature_range=(0,1))
# scale training and test data
sc_train = s.fit_transform(train_df)
sc_test = s.transform(test_df)
# print scaling adjustments
print('Scalar multipliers')
print(s.scale_)
print('Scalar minimum')
print(s.min_)
# convert scaled values back to dataframe
sc_train_df = pd.DataFrame(sc_train, columns=train_df.columns.values)
sc_test_df = pd.DataFrame(sc_test, columns=test_df.columns.values)
# save scaled values to CSV files
sc_train_df.to_csv('train_scaled.csv', index=False)
sc_test_df.to_csv('test_scaled.csv', index=False)
#################################################################
### Train model #################################################
#################################################################
# create neural network model
model = Sequential()
model.add(Dense(1, input_dim=1, activation='linear'))
model.add(Dense(2, activation='linear'))
model.add(Dense(2, activation='tanh'))
model.add(Dense(2, activation='linear'))
model.add(Dense(1, activation='linear'))
model.compile(loss="mean_squared_error", optimizer="adam")
# load training data
train_df = pd.read_csv("train_scaled.csv")
X1 = train_df.drop('y', axis=1).values
Y1 = train_df[['y']].values
# train the model
model.fit(X1,Y1,epochs=5000,verbose=0,shuffle=True)
# Save the model to hard drive
#model.save('model.h5')
#################################################################
### Test model ##################################################
#################################################################
# Load the model from hard drive
#model = load_model('model.h5')
# load test data
test_df = pd.read_csv("test_scaled.csv")
X2 = test_df.drop('y', axis=1).values
Y2 = test_df[['y']].values
# test the model
mse = model.evaluate(X2,Y2, verbose=1)
print('Mean Squared Error: ', mse)
#################################################################
### Predictions Outside Training Region #########################
#################################################################
# generate prediction data
x = np.linspace(-2*np.pi,4*np.pi,100)
y = np.sin(x)
# scale input
X3 = x*s.scale_[0]+s.min_[0]
# predict
Y3P = model.predict(X3)
# unscale output
yp = (Y3P-s.min_[1])/s.scale_[1]
plt.figure()
plt.plot((X1-s.min_[0])/s.scale_[0], \
(Y1-s.min_[1])/s.scale_[1], \
'bo',label='train')
plt.plot(x,y,'r-',label='actual')
plt.plot(x,yp,'k--',label='predict')
plt.legend(loc='best')
plt.savefig('results.png')
plt.show()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pandas.read_csv",
"matplotlib.pyplot.legend",
"numpy.savetxt",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.vstack",
"numpy.linspace",
"keras.models.Sequential",
"matplotlib.pyplot.savefig"
] |
[((448, 479), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', '(20)'], {}), '(0.0, 2 * np.pi, 20)\n', (459, 479), True, 'import numpy as np\n'), ((480, 489), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (486, 489), True, 'import numpy as np\n'), ((545, 621), 'numpy.savetxt', 'np.savetxt', (['"""train_data.csv"""', 'data'], {'header': '"""x,y"""', 'comments': '""""""', 'delimiter': '""","""'}), "('train_data.csv', data, header='x,y', comments='', delimiter=',')\n", (555, 621), True, 'import numpy as np\n'), ((644, 676), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2 * np.pi)', '(100)'], {}), '(0.0, 2 * np.pi, 100)\n', (655, 676), True, 'import numpy as np\n'), ((677, 686), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (683, 686), True, 'import numpy as np\n'), ((738, 813), 'numpy.savetxt', 'np.savetxt', (['"""test_data.csv"""', 'data'], {'header': '"""x,y"""', 'comments': '""""""', 'delimiter': '""","""'}), "('test_data.csv', data, header='x,y', comments='', delimiter=',')\n", (748, 813), True, 'import numpy as np\n'), ((1063, 1092), 'pandas.read_csv', 'pd.read_csv', (['"""train_data.csv"""'], {}), "('train_data.csv')\n", (1074, 1092), True, 'import pandas as pd\n'), ((1103, 1131), 'pandas.read_csv', 'pd.read_csv', (['"""test_data.csv"""'], {}), "('test_data.csv')\n", (1114, 1131), True, 'import pandas as pd\n'), ((1187, 1221), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (1199, 1221), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1489, 1544), 'pandas.DataFrame', 'pd.DataFrame', (['sc_train'], {'columns': 'train_df.columns.values'}), '(sc_train, columns=train_df.columns.values)\n', (1501, 1544), True, 'import pandas as pd\n'), ((1558, 1611), 'pandas.DataFrame', 'pd.DataFrame', (['sc_test'], {'columns': 'test_df.columns.values'}), '(sc_test, columns=test_df.columns.values)\n', (1570, 1611), True, 'import pandas as pd\n'), ((1987, 1999), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1997, 1999), False, 'from keras.models import Sequential\n'), ((2308, 2339), 'pandas.read_csv', 'pd.read_csv', (['"""train_scaled.csv"""'], {}), "('train_scaled.csv')\n", (2319, 2339), True, 'import pandas as pd\n'), ((2827, 2857), 'pandas.read_csv', 'pd.read_csv', (['"""test_scaled.csv"""'], {}), "('test_scaled.csv')\n", (2838, 2857), True, 'import pandas as pd\n'), ((3247, 3286), 'numpy.linspace', 'np.linspace', (['(-2 * np.pi)', '(4 * np.pi)', '(100)'], {}), '(-2 * np.pi, 4 * np.pi, 100)\n', (3258, 3286), True, 'import numpy as np\n'), ((3285, 3294), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3291, 3294), True, 'import numpy as np\n'), ((3423, 3435), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3433, 3435), True, 'import matplotlib.pyplot as plt\n'), ((3436, 3533), 'matplotlib.pyplot.plot', 'plt.plot', (['((X1 - s.min_[0]) / s.scale_[0])', '((Y1 - s.min_[1]) / s.scale_[1])', '"""bo"""'], {'label': '"""train"""'}), "((X1 - s.min_[0]) / s.scale_[0], (Y1 - s.min_[1]) / s.scale_[1],\n 'bo', label='train')\n", (3444, 3533), True, 'import matplotlib.pyplot as plt\n'), ((3531, 3567), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""r-"""'], {'label': '"""actual"""'}), "(x, y, 'r-', label='actual')\n", (3539, 3567), True, 'import matplotlib.pyplot as plt\n'), ((3565, 3604), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'yp', '"""k--"""'], {'label': '"""predict"""'}), "(x, yp, 'k--', label='predict')\n", (3573, 3604), True, 'import matplotlib.pyplot as plt\n'), ((3602, 3624), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3612, 3624), True, 'import matplotlib.pyplot as plt\n'), ((3625, 3651), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""results.png"""'], {}), "('results.png')\n", (3636, 3651), True, 'import matplotlib.pyplot as plt\n'), ((3652, 3662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3660, 3662), True, 'import matplotlib.pyplot as plt\n'), ((526, 543), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (535, 543), True, 'import numpy as np\n'), ((719, 736), 'numpy.vstack', 'np.vstack', (['(x, y)'], {}), '((x, y))\n', (728, 736), True, 'import numpy as np\n')]
|
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import settings.hparam as hp
from torch.autograd import Variable
from collections import OrderedDict
class SeqLinear(nn.Module):
"""
Linear layer for sequences
"""
def __init__(self, input_size, output_size, time_dim=1):
"""
:param input_size: dimension of input
:param output_size: dimension of output
:param time_dim: index of time dimension
"""
super(SeqLinear, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.time_dim = time_dim
self.linear = nn.Linear(input_size, output_size)
def forward(self, input_):
"""
:param input_: sequences
:return: outputs
"""
batch_size = input_.size()[0]
if self.time_dim == 2:
input_ = input_.transpose(1, 2)
input_ = input_.contiguous()
input_ = input_.view(-1, self.input_size)
out = self.linear(input_).view(batch_size, -1, self.output_size)
if self.time_dim == 2:
out = out.contiguous().transpose(1, 2)
return out
class Prenet(nn.Module):
"""
Prenet before passing through the network
"""
def __init__(self, input_size, hidden_size, output_size, dropout_rate=0.5, time_dim=2):
"""
:param input_size: dimension of input
:param hidden_size: dimension of hidden unit
:param output_size: dimension of output
"""
super(Prenet, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.layer = nn.Sequential(OrderedDict([
('fc1', SeqLinear(self.input_size, self.hidden_size, time_dim=time_dim)),
('relu1', nn.ReLU()),
('dropout1', nn.Dropout(dropout_rate)),
('fc2', SeqLinear(self.hidden_size, self.output_size, time_dim=time_dim)),
('relu2', nn.ReLU()),
('dropout2', nn.Dropout(dropout_rate)),
]))
def forward(self, input_):
out = self.layer(input_)
return out
class CBHG(nn.Module):
"""
CBHG Module
"""
def __init__(self, hidden_size, K=16, projection_size=256, num_highway_blocks=4, num_gru_layers=1, max_pool_kernel_size=2):
"""
:param hidden_size: dimension of hidden unit
:param K: # of convolution banks
:param projection_size: dimension of projection unit
:param num_gru_layers: # of layers of GRUcell
:param max_pool_kernel_size: max pooling kernel size
:param is_post: whether post processing or not
"""
super(CBHG, self).__init__()
self.hidden_size = hidden_size
self.num_gru_layers = num_gru_layers
self.projection_size = projection_size
self.convbank_list = nn.ModuleList()
self.convbank_list.append(nn.Conv1d(in_channels=hidden_size,
out_channels=hidden_size,
kernel_size=1,
padding=int(np.floor(1/2))))
for i in range(2, K+1):
self.convbank_list.append(nn.Conv1d(in_channels=hidden_size,
out_channels=hidden_size,
kernel_size=i,
padding=int(np.floor(i/2))))
self.batchnorm_list = nn.ModuleList()
for i in range(1, K+1):
self.batchnorm_list.append(nn.BatchNorm1d(hidden_size))
convbank_outdim = hidden_size * K
self.conv_projection_1 = nn.Conv1d(in_channels=convbank_outdim,
out_channels=hidden_size * 2,
kernel_size=3,
padding=int(np.floor(3/2)))
self.conv_projection_2 = nn.Conv1d(in_channels=hidden_size * 2,
out_channels=hidden_size,
kernel_size=3,
padding=int(np.floor(3/2)))
self.batchnorm_proj_1 = nn.BatchNorm1d(hidden_size * 2)
self.batchnorm_proj_2 = nn.BatchNorm1d(hidden_size)
self.max_pool = nn.MaxPool1d(max_pool_kernel_size, stride=1, padding=1)
self.highway = Highwaynet(self.hidden_size, num_layers=num_highway_blocks)
self.gru = nn.GRU(self.hidden_size, self.hidden_size, num_layers=num_gru_layers,
batch_first=True,
bidirectional=True)
def _conv_fit_dim(self, x, kernel_size=3):
if kernel_size % 2 == 0:
return x[:, :, :-1]
else:
return x
def forward(self, input_):
input_ = input_.contiguous()
batch_size = input_.size()[0]
convbank_list = list()
convbank_input = input_
# Convolution bank filters
for k, (conv, batchnorm) in enumerate(zip(self.convbank_list, self.batchnorm_list)):
convbank_input = batchnorm(F.relu(self._conv_fit_dim(conv(convbank_input), k+1).contiguous()))
convbank_list.append(convbank_input)
# Concatenate all features
conv_cat = torch.cat(convbank_list, dim=1)
# Max pooling
conv_cat = self.max_pool(conv_cat)[:, :, :-1]
# Projection
style_feature = self.batchnorm_proj_1(F.relu(self._conv_fit_dim(self.conv_projection_1(conv_cat))))
conv_proj = self.batchnorm_proj_2(self._conv_fit_dim(self.conv_projection_2(style_feature))) + input_
# Highway networks
highway = self.highway.forward(conv_proj)
highway = torch.transpose(highway, 1, 2)
# Bidirectional GRU
if torch.cuda.is_available():
init_gru = Variable(torch.zeros(2 * self.num_gru_layers, batch_size, self.hidden_size)).cuda()
else:
init_gru = Variable(torch.zeros(2 * self.num_gru_layers, batch_size, self.hidden_size))
self.gru.flatten_parameters()
content_feature, _ = self.gru(highway, init_gru)
return content_feature, style_feature
class Highwaynet(nn.Module):
"""
Highway network
"""
def __init__(self, num_units, num_layers=4):
"""
:param num_units: dimension of hidden unit
:param num_layers: # of highway layers
"""
super(Highwaynet, self).__init__()
self.num_units = num_units
self.num_layers = num_layers
self.gates = nn.ModuleList()
self.linears = nn.ModuleList()
for _ in range(self.num_layers):
self.linears.append(SeqLinear(num_units, num_units, time_dim=2))
self.gates.append(SeqLinear(num_units, num_units, time_dim=2))
def forward(self, input_):
out = input_
# highway gated function
for fc1, fc2 in zip(self.linears, self.gates):
h = F.relu(fc1.forward(out))
t = F.sigmoid(fc2.forward(out))
c = 1. - t
out = h * t + out * c
return out
class Conv1d(nn.Conv1d):
def __init__(self, in_channels, out_channels, kernel_size, padding=0, norm_fn=None, dropout=False, activation_fn=F.relu):
super(Conv1d, self).__init__(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding
)
self.norm = norm_fn
self.dropout = dropout
self.activation_fn = activation_fn
if norm_fn is not None:
self.norm_fn = norm_fn(hp.hidden_size)
if dropout:
self.drop = nn.Dropout(p=0.25)
def forward(self, input_):
conv = self.activation_fn(F.conv1d(input_, self.weight, self.bias, self.stride,
self.padding, self.dilation, self.groups))
if self.norm is not None:
conv = self.norm_fn(conv)
if self.dropout:
conv = self.drop(conv)
return conv
class MinimalGRU(nn.Module):
"""
Implementation Revising GRU
Reference : https://arxiv.org/abs/1710.00641
Reference Source : https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py
Differences with original GRU
1. No reset gate
"""
# TODO: Recurrent Dropout
# TODO: Batch Normalization on linear computation
def __init__(self, input_size, hidden_size, max_len, num_layers=1, is_bidirection=False,
bias=True, dropout=0, nonlinearity='relu', is_norm=True):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.dropout = dropout
self.num_layers = num_layers
self.num_directions = 2 if is_bidirection else 1
self.bias = bias
self.is_norm = is_norm
# handle dropout boundary exception
if self.dropout < 0 or self.dropout > 1:
raise ValueError('Dropout %.6f is not valid value!' % self.dropout)
elif self.dropout:
self.drop_modules = nn.ModuleList()
if self.is_norm:
self.i_norm_list = nn.ModuleList()
self.h_norm_list = nn.ModuleList()
# setup nonlinearity
if nonlinearity == 'relu':
self.act = nn.ReLU()
elif nonlinearity == 'tanh':
self.act = nn.Tanh()
else:
raise NotImplementedError('%s nonlinearity is not implemented !!' % nonlinearity)
gate_size = 2 * hidden_size
self._all_weights = []
for layer in range(num_layers):
for direction in range(self.num_directions):
layer_input_size = input_size if layer == 0 else hidden_size * self.num_directions
w_ih = nn.Parameter(torch.Tensor(gate_size, layer_input_size))
w_hh = nn.Parameter(torch.Tensor(gate_size, hidden_size))
b_ih = nn.Parameter(torch.Tensor(gate_size))
b_hh = nn.Parameter(torch.Tensor(gate_size))
drop_mask = nn.Parameter(torch.ones(gate_size), requires_grad=False)
layer_params = (w_ih, w_hh, b_ih, b_hh, drop_mask)
suffix = '_reverse' if direction == 1 else ''
param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
if bias:
param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
if self.dropout:
param_names += ['drop_mask_l{}{}']
self.drop_modules.append(nn.Dropout(self.dropout))
if self.is_norm:
self.i_norm_list.append(SeparatedBatchNorm1d(gate_size, max_length=max_len))
self.h_norm_list.append(SeparatedBatchNorm1d(gate_size, max_length=max_len))
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._all_weights.append(param_names)
self.reset_parameters()
def reset_parameters(self):
"""
https://github.com/pytorch/pytorch/blob/7b6b7d4575832a9af4257ba341f3da9e7a2a2b57/torch/nn/modules/rnn.py#L115
"""
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
if not weight.requires_grad:
continue
weight.data.uniform_(-stdv, stdv)
def forward(self, x, hx, time_dim=1):
"""
Custom GRU Layers with gru cell in source
:param x: sequence data
:param hx: initial hidden status
:param time_dim: the time axis on input data (default is 1)
:return:
"""
assert time_dim == 1
t_len = x.size()[time_dim]
for layer in range(self.num_layers):
layer_outputs = []
for direction in range(self.num_directions):
# get attrs
weight_attr_names = self._all_weights[layer * self.num_directions + direction]
attrs = [getattr(self, name) for name in weight_attr_names]
if self.bias:
if self.dropout:
w_ih, w_hh, b_ih, b_hh, mask = attrs
else:
w_ih, w_hh, b_ih, b_hh = attrs
else:
if self.dropout:
w_ih, w_hh, mask, b_ih, b_hh = attrs + [None, None]
else:
w_ih, w_hh, b_ih, b_hh = attrs + [None, None]
if self.dropout:
mask = self.drop_modules[layer * self.num_directions + direction](mask)
else:
mask = 1.
hx_outputs = []
hx_ = hx[layer * self.num_directions + direction]
# access on sequence
for t in range(t_len):
input = x[:, t, :]
# GRU Cell Part
# make gates
in_part = F.linear(input, w_ih, b_ih)
h_part = F.linear(hx_, w_hh, b_hh)
if self.is_norm:
in_part = self.i_norm_list[layer * self.num_directions + direction](in_part, t)
h_part = self.h_norm_list[layer * self.num_directions + direction](h_part, t)
gates = in_part + h_part
# recurrent dropout
gates *= mask
ug, og = gates.chunk(2, 1)
# calc
ug = F.sigmoid(ug)
og = self.act(og)
hx_ = ug * hx_ + (1 - ug) * og
hx_outputs.append(hx_)
layer_outputs.append(hx_outputs)
assert len(layer_outputs) in [1, 2]
# make output or next input
# bi-direction
if len(layer_outputs) == 2:
# TODO: Why this computation flow occurs gradient computation err?
# f_cat, b_cat = [], []
# for idx in range(len(layer_outputs[0])):
# f_cat.append(layer_outputs[0][idx].unsqueeze_(1))
# b_cat.append(layer_outputs[1][-(idx+1)].unsqueeze_(1))
# f_cat = torch.cat(f_cat, dim=1)
# b_cat = torch.cat(b_cat, dim=1)
# x = torch.cat([f_cat, b_cat], dim=2)
x = []
for f, b in zip(layer_outputs[0], layer_outputs[1][::-1]):
x.append(torch.cat([f, b], dim=1).unsqueeze_(1))
x = torch.cat(x, dim=1)
# single direction
else:
x = torch.cat([item.unsqueeze_(1) for item in layer_outputs[0]], 1)
return x
class SeparatedBatchNorm1d(nn.Module):
"""
A batch normalization module which keeps its running mean
and variance separately per timestep.
"""
def __init__(self, num_features, max_length, eps=1e-5, momentum=0.1,
affine=True):
"""
Most parts are copied from
torch.nn.modules.batchnorm._BatchNorm.
"""
super().__init__()
self.num_features = num_features
self.max_length = max_length
self.affine = affine
self.eps = eps
self.momentum = momentum
if self.affine:
self.weight = nn.Parameter(torch.FloatTensor(num_features))
self.bias = nn.Parameter(torch.FloatTensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
for i in range(max_length):
self.register_buffer(
'running_mean_{}'.format(i), torch.zeros(num_features))
self.register_buffer(
'running_var_{}'.format(i), torch.ones(num_features))
self.reset_parameters()
def reset_parameters(self):
for i in range(self.max_length):
running_mean_i = getattr(self, 'running_mean_{}'.format(i))
running_var_i = getattr(self, 'running_var_{}'.format(i))
running_mean_i.zero_()
running_var_i.fill_(1)
if self.affine:
self.weight.data.uniform_()
self.bias.data.zero_()
def _check_input_dim(self, input_):
if input_.size(1) != self.running_mean_0.nelement():
raise ValueError('got {}-feature tensor, expected {}'
.format(input_.size(1), self.num_features))
def forward(self, input_, time):
self._check_input_dim(input_)
if time >= self.max_length:
time = self.max_length - 1
running_mean = getattr(self, 'running_mean_{}'.format(time))
running_var = getattr(self, 'running_var_{}'.format(time))
return F.batch_norm(
input=input_, running_mean=running_mean, running_var=running_var,
weight=self.weight, bias=self.bias, training=self.training,
momentum=self.momentum, eps=self.eps)
def __repr__(self):
return ('{name}({num_features}, eps={eps}, momentum={momentum},'
' max_length={max_length}, affine={affine})'
.format(name=self.__class__.__name__, **self.__dict__))
|
[
"torch.nn.Dropout",
"numpy.floor",
"torch.nn.MaxPool1d",
"torch.cat",
"torch.nn.functional.sigmoid",
"torch.ones",
"torch.FloatTensor",
"torch.Tensor",
"torch.nn.Linear",
"torch.zeros",
"torch.nn.GRU",
"math.sqrt",
"torch.nn.ModuleList",
"torch.nn.Tanh",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"torch.nn.functional.batch_norm",
"torch.nn.ReLU",
"torch.nn.functional.linear",
"torch.nn.functional.conv1d",
"torch.transpose"
] |
[((677, 711), 'torch.nn.Linear', 'nn.Linear', (['input_size', 'output_size'], {}), '(input_size, output_size)\n', (686, 711), True, 'import torch.nn as nn\n'), ((2938, 2953), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (2951, 2953), True, 'import torch.nn as nn\n'), ((3588, 3603), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3601, 3603), True, 'import torch.nn as nn\n'), ((4320, 4351), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(hidden_size * 2)'], {}), '(hidden_size * 2)\n', (4334, 4351), True, 'import torch.nn as nn\n'), ((4385, 4412), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_size'], {}), '(hidden_size)\n', (4399, 4412), True, 'import torch.nn as nn\n'), ((4438, 4493), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['max_pool_kernel_size'], {'stride': '(1)', 'padding': '(1)'}), '(max_pool_kernel_size, stride=1, padding=1)\n', (4450, 4493), True, 'import torch.nn as nn\n'), ((4598, 4709), 'torch.nn.GRU', 'nn.GRU', (['self.hidden_size', 'self.hidden_size'], {'num_layers': 'num_gru_layers', 'batch_first': '(True)', 'bidirectional': '(True)'}), '(self.hidden_size, self.hidden_size, num_layers=num_gru_layers,\n batch_first=True, bidirectional=True)\n', (4604, 4709), True, 'import torch.nn as nn\n'), ((5418, 5449), 'torch.cat', 'torch.cat', (['convbank_list'], {'dim': '(1)'}), '(convbank_list, dim=1)\n', (5427, 5449), False, 'import torch\n'), ((5863, 5893), 'torch.transpose', 'torch.transpose', (['highway', '(1)', '(2)'], {}), '(highway, 1, 2)\n', (5878, 5893), False, 'import torch\n'), ((5934, 5959), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5957, 5959), False, 'import torch\n'), ((6700, 6715), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6713, 6715), True, 'import torch.nn as nn\n'), ((6739, 6754), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6752, 6754), True, 'import torch.nn as nn\n'), ((17096, 17281), 'torch.nn.functional.batch_norm', 'F.batch_norm', ([], {'input': 'input_', 'running_mean': 'running_mean', 'running_var': 'running_var', 'weight': 'self.weight', 'bias': 'self.bias', 'training': 'self.training', 'momentum': 'self.momentum', 'eps': 'self.eps'}), '(input=input_, running_mean=running_mean, running_var=\n running_var, weight=self.weight, bias=self.bias, training=self.training,\n momentum=self.momentum, eps=self.eps)\n', (17108, 17281), True, 'import torch.nn.functional as F\n'), ((7918, 7936), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.25)'}), '(p=0.25)\n', (7928, 7936), True, 'import torch.nn as nn\n'), ((8003, 8103), 'torch.nn.functional.conv1d', 'F.conv1d', (['input_', 'self.weight', 'self.bias', 'self.stride', 'self.padding', 'self.dilation', 'self.groups'], {}), '(input_, self.weight, self.bias, self.stride, self.padding, self.\n dilation, self.groups)\n', (8011, 8103), True, 'import torch.nn.functional as F\n'), ((9390, 9405), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (9403, 9405), True, 'import torch.nn as nn\n'), ((9437, 9452), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (9450, 9452), True, 'import torch.nn as nn\n'), ((9540, 9549), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9547, 9549), True, 'import torch.nn as nn\n'), ((11509, 11536), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (11518, 11536), False, 'import math\n'), ((3675, 3702), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hidden_size'], {}), '(hidden_size)\n', (3689, 3702), True, 'import torch.nn as nn\n'), ((6114, 6180), 'torch.zeros', 'torch.zeros', (['(2 * self.num_gru_layers)', 'batch_size', 'self.hidden_size'], {}), '(2 * self.num_gru_layers, batch_size, self.hidden_size)\n', (6125, 6180), False, 'import torch\n'), ((9318, 9333), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (9331, 9333), True, 'import torch.nn as nn\n'), ((9610, 9619), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (9617, 9619), True, 'import torch.nn as nn\n'), ((14872, 14891), 'torch.cat', 'torch.cat', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (14881, 14891), False, 'import torch\n'), ((15671, 15702), 'torch.FloatTensor', 'torch.FloatTensor', (['num_features'], {}), '(num_features)\n', (15688, 15702), False, 'import torch\n'), ((15741, 15772), 'torch.FloatTensor', 'torch.FloatTensor', (['num_features'], {}), '(num_features)\n', (15758, 15772), False, 'import torch\n'), ((16005, 16030), 'torch.zeros', 'torch.zeros', (['num_features'], {}), '(num_features)\n', (16016, 16030), False, 'import torch\n'), ((16110, 16134), 'torch.ones', 'torch.ones', (['num_features'], {}), '(num_features)\n', (16120, 16134), False, 'import torch\n'), ((4000, 4015), 'numpy.floor', 'np.floor', (['(3 / 2)'], {}), '(3 / 2)\n', (4008, 4015), True, 'import numpy as np\n'), ((4271, 4286), 'numpy.floor', 'np.floor', (['(3 / 2)'], {}), '(3 / 2)\n', (4279, 4286), True, 'import numpy as np\n'), ((10030, 10071), 'torch.Tensor', 'torch.Tensor', (['gate_size', 'layer_input_size'], {}), '(gate_size, layer_input_size)\n', (10042, 10071), False, 'import torch\n'), ((10109, 10145), 'torch.Tensor', 'torch.Tensor', (['gate_size', 'hidden_size'], {}), '(gate_size, hidden_size)\n', (10121, 10145), False, 'import torch\n'), ((10183, 10206), 'torch.Tensor', 'torch.Tensor', (['gate_size'], {}), '(gate_size)\n', (10195, 10206), False, 'import torch\n'), ((10244, 10267), 'torch.Tensor', 'torch.Tensor', (['gate_size'], {}), '(gate_size)\n', (10256, 10267), False, 'import torch\n'), ((10310, 10331), 'torch.ones', 'torch.ones', (['gate_size'], {}), '(gate_size)\n', (10320, 10331), False, 'import torch\n'), ((13300, 13327), 'torch.nn.functional.linear', 'F.linear', (['input', 'w_ih', 'b_ih'], {}), '(input, w_ih, b_ih)\n', (13308, 13327), True, 'import torch.nn.functional as F\n'), ((13357, 13382), 'torch.nn.functional.linear', 'F.linear', (['hx_', 'w_hh', 'b_hh'], {}), '(hx_, w_hh, b_hh)\n', (13365, 13382), True, 'import torch.nn.functional as F\n'), ((13845, 13858), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['ug'], {}), '(ug)\n', (13854, 13858), True, 'import torch.nn.functional as F\n'), ((1868, 1877), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1875, 1877), True, 'import torch.nn as nn\n'), ((1906, 1930), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (1916, 1930), True, 'import torch.nn as nn\n'), ((2044, 2053), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2051, 2053), True, 'import torch.nn as nn\n'), ((2082, 2106), 'torch.nn.Dropout', 'nn.Dropout', (['dropout_rate'], {}), '(dropout_rate)\n', (2092, 2106), True, 'import torch.nn as nn\n'), ((3220, 3235), 'numpy.floor', 'np.floor', (['(1 / 2)'], {}), '(1 / 2)\n', (3228, 3235), True, 'import numpy as np\n'), ((5993, 6059), 'torch.zeros', 'torch.zeros', (['(2 * self.num_gru_layers)', 'batch_size', 'self.hidden_size'], {}), '(2 * self.num_gru_layers, batch_size, self.hidden_size)\n', (6004, 6059), False, 'import torch\n'), ((10782, 10806), 'torch.nn.Dropout', 'nn.Dropout', (['self.dropout'], {}), '(self.dropout)\n', (10792, 10806), True, 'import torch.nn as nn\n'), ((3540, 3555), 'numpy.floor', 'np.floor', (['(i / 2)'], {}), '(i / 2)\n', (3548, 3555), True, 'import numpy as np\n'), ((14812, 14836), 'torch.cat', 'torch.cat', (['[f, b]'], {'dim': '(1)'}), '([f, b], dim=1)\n', (14821, 14836), False, 'import torch\n')]
|
import numpy as np
import scipy.stats as sps
def preprocess(X):
return X
def prob_model_data1_range():
return [-5,5]
def prob_model_data2_range():
return [-5,5]
def prob_model_poi_range(mode = 'eval'):
if mode == 'eval':
return [-3,3]
elif mode == 'train':
return [-5,5]
def prob_model_nuis_range(mode = 'eval'):
if mode == 'eval':
return [-3,3]
elif mode == 'train':
return [-5,5]
CORR = 0.8
COV = np.array([[1.0,CORR],[CORR,1.0]])
COVINV = np.linalg.inv(COV)
def __prob_model(pars):
return sps.multivariate_normal(mean = pars,cov =COV)
def getnuhathat(at, data):
covinv = np.linalg.inv(COV)
nuhathat = covinv[0,1]/covinv[1,1]*(data[:,0]-at)+data[:,1]
return nuhathat
def get_non_centrality(a,b):
return (a - b)**2/COV[0,0]
def sample_prob_model(pars, N):
return __prob_model(pars).rvs(N)
def logpdf_prob_model(pars,data):
return __prob_model(pars).logpdf(data)
def prob_model_mhat(data):
n1,n2 = data[...,0],data[...,1]
return n1
def prob_model_nuhat(data):
n1,n2 = data[...,0],data[...,1]
return n2
def expcted_data(pars):
return pars
def get_non_centrality(a,b):
return (a[0] - b[0])**2/COV[0,0]
|
[
"numpy.linalg.inv",
"numpy.array",
"scipy.stats.multivariate_normal"
] |
[((467, 503), 'numpy.array', 'np.array', (['[[1.0, CORR], [CORR, 1.0]]'], {}), '([[1.0, CORR], [CORR, 1.0]])\n', (475, 503), True, 'import numpy as np\n'), ((510, 528), 'numpy.linalg.inv', 'np.linalg.inv', (['COV'], {}), '(COV)\n', (523, 528), True, 'import numpy as np\n'), ((565, 608), 'scipy.stats.multivariate_normal', 'sps.multivariate_normal', ([], {'mean': 'pars', 'cov': 'COV'}), '(mean=pars, cov=COV)\n', (588, 608), True, 'import scipy.stats as sps\n'), ((652, 670), 'numpy.linalg.inv', 'np.linalg.inv', (['COV'], {}), '(COV)\n', (665, 670), True, 'import numpy as np\n')]
|
import os
import sys
import typing
import numpy as np
import open3d as o3d
import data.io as dio
import skimage.io
from settings import process_arguments, Parameters
import image_processing
from warp_field.graph import DeformationGraphNumpy
from nnrt import compute_mesh_from_depth_and_flow as compute_mesh_from_depth_and_flow_c
from nnrt import compute_mesh_from_depth as compute_mesh_from_depth_c
from nnrt import get_vertex_erosion_mask as erode_mesh_c
from nnrt import sample_nodes as sample_nodes_c
from nnrt import compute_edges_shortest_path as compute_edges_shortest_path_c
from nnrt import node_and_edge_clean_up as node_and_edge_clean_up_c
from nnrt import compute_pixel_anchors_shortest_path as compute_pixel_anchors_shortest_path_c
from nnrt import compute_clusters as compute_clusters_c
from nnrt import update_pixel_anchors as update_pixel_anchors_c
def build_deformation_graph_from_depth_image(depth_image: np.ndarray, mask_image: np.ndarray, intrinsic_matrix: np.ndarray,
max_triangle_distance: float = 0.05, depth_scale_reciprocal: float = 1000.0,
erosion_num_iterations: int = 10, erosion_min_neighbors: int = 4,
remove_nodes_with_too_few_neighbors: bool = True, use_only_valid_vertices: bool = True,
sample_random_shuffle: bool = False, neighbor_count: int = 8,
enforce_neighbor_count: bool = True, scene_flow_path: typing.Union[str, None] = None,
enable_visual_debugging: bool = False) -> \
typing.Tuple[DeformationGraphNumpy, typing.Union[None, np.ndarray], np.ndarray, np.ndarray]:
# options
node_coverage = Parameters.graph.node_coverage.value
graph_debug = Parameters.graph.graph_debug.value
# extract intrinsic coefficients
fx = intrinsic_matrix[0, 0]
fy = intrinsic_matrix[1, 1]
cx = intrinsic_matrix[0, 2]
cy = intrinsic_matrix[1, 2]
#########################################################################
# Convert depth to mesh.
#########################################################################
width = depth_image.shape[1]
height = depth_image.shape[0]
# Invalidate depth residuals outside object mask.
# We only define graph over dynamic object (inside the object mask).
mask_image[mask_image > 0] = 1
depth_image = depth_image * mask_image
# Backproject depth images into 3D.
point_image = image_processing.backproject_depth(depth_image, fx, fy, cx, cy, depth_scale=depth_scale_reciprocal)
point_image = point_image.astype(np.float32)
# Convert depth image into mesh, using pixel-wise connectivity.
# We also compute flow residuals, and invalidate any vertex with non-finite
# flow residuals.
if scene_flow_path is None:
vertices, vertex_pixels, faces = \
compute_mesh_from_depth_c(
point_image, max_triangle_distance
)
else:
# Load scene flow image.
scene_flow_image = dio.load_flow(scene_flow_path)
scene_flow_image = np.moveaxis(scene_flow_image, 0, 2)
vertices, vertex_flows, vertex_pixels, faces = \
compute_mesh_from_depth_and_flow_c(
point_image, scene_flow_image,
max_triangle_distance
)
num_vertices = vertices.shape[0]
num_faces = faces.shape[0]
assert num_vertices > 0 and num_faces > 0
# Erode mesh, to not sample unstable nodes on the mesh boundary.
non_eroded_vertices = erode_mesh_c(
vertices, faces, erosion_num_iterations, erosion_min_neighbors
)
# Just for debugging.
if enable_visual_debugging:
mesh = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(vertices), o3d.utility.Vector3iVector(faces))
mesh.compute_vertex_normals()
pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(vertices[non_eroded_vertices.reshape(-1), :]))
o3d.visualization.draw_geometries([mesh, pcd], mesh_show_back_face=True)
if scene_flow_path is None:
o3d.visualization.draw_geometries([mesh], mesh_show_back_face=True)
else:
mesh_transformed = o3d.geometry.TriangleMesh(o3d.utility.Vector3dVector(vertices + vertex_flows), o3d.utility.Vector3iVector(faces))
mesh_transformed.compute_vertex_normals()
mesh_transformed.paint_uniform_color([0.0, 1.0, 0.0])
o3d.visualization.draw_geometries([mesh, mesh_transformed], mesh_show_back_face=True)
#########################################################################
# Sample graph nodes.
#########################################################################
valid_vertices = non_eroded_vertices
# Sample graph nodes.
node_coords, node_indices = sample_nodes_c(
vertices, valid_vertices,
node_coverage, use_only_valid_vertices,
sample_random_shuffle
)
num_nodes = node_coords.shape[0]
node_coords = node_coords[:num_nodes, :]
node_indices = node_indices[:num_nodes, :]
if scene_flow_path is not None:
# Get node deformation.
node_deformations = vertex_flows[node_indices.squeeze()]
node_deformations = node_deformations.reshape(-1, 3)
assert np.isfinite(node_deformations).all(), "All deformations should be valid."
assert node_deformations.shape[0] == node_coords.shape[0] == node_indices.shape[0]
else:
node_deformations = None
if enable_visual_debugging:
pcd_nodes = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(node_coords))
o3d.visualization.draw_geometries([pcd_nodes], mesh_show_back_face=True)
#########################################################################
# Compute graph edges.
#########################################################################
# Compute edges between nodes.
visible_vertices = np.ones_like(valid_vertices)
graph_edges, graph_edges_weights, graph_edges_distances, node_to_vertex_distances = \
compute_edges_shortest_path_c(
vertices, visible_vertices, faces, node_indices,
neighbor_count, Parameters.graph.node_coverage.value, enforce_neighbor_count
)
# Remove nodes
valid_nodes_mask = np.ones((num_nodes, 1), dtype=bool)
if remove_nodes_with_too_few_neighbors:
# Mark nodes with not enough neighbors
node_and_edge_clean_up_c(graph_edges, valid_nodes_mask)
# Get the list of invalid nodes
node_id_black_list = np.where(valid_nodes_mask == False)[0].tolist()
else:
node_id_black_list = []
if graph_debug:
print("You're allowing nodes with not enough neighbors!")
if graph_debug:
print("Node filtering: initial num nodes", num_nodes, "| invalid nodes", len(node_id_black_list), "({})".format(node_id_black_list))
#########################################################################
# Compute pixel anchors.
#########################################################################
pixel_anchors, pixel_weights = compute_pixel_anchors_shortest_path_c(
node_to_vertex_distances, valid_nodes_mask,
vertices, vertex_pixels,
width, height, node_coverage
)
if graph_debug:
print("Valid pixels:", np.sum(np.all(pixel_anchors != -1, axis=2)))
if enable_visual_debugging:
pixel_anchors_image = np.sum(pixel_anchors, axis=2)
pixel_anchors_mask_ed = np.copy(pixel_anchors_image).astype(np.uint8)
pixel_anchors_mask_ed[...] = 1
pixel_anchors_mask_ed[pixel_anchors_image == -4] = 0
dio.save_grayscale_image("pixel_anchors_mask_ed.jpeg", pixel_anchors_mask_ed)
# Get only valid nodes and their corresponding info
node_coords = node_coords[valid_nodes_mask.squeeze()]
node_indices = node_indices[valid_nodes_mask.squeeze()]
# Apply node mask to the computed node deformations
if node_deformations is not None:
node_deformations = node_deformations[valid_nodes_mask.squeeze()]
graph_edges = graph_edges[valid_nodes_mask.squeeze()]
graph_edges_weights = graph_edges_weights[valid_nodes_mask.squeeze()]
graph_edges_distances = graph_edges_distances[valid_nodes_mask.squeeze()]
#########################################################################
# Graph checks.
#########################################################################
num_nodes = node_coords.shape[0]
# Check that we have enough nodes
if num_nodes == 0:
print("No nodes! Exiting ...")
exit()
# Update node ids only if we actually removed nodes
if len(node_id_black_list) > 0:
# 1. Mapping old indices to new indices
count = 0
node_id_mapping = {}
for i, is_node_valid in enumerate(valid_nodes_mask):
if not is_node_valid:
node_id_mapping[i] = -1
else:
node_id_mapping[i] = count
count += 1
# 2. Update graph_edges using the id mapping
for node_id, graph_edge in enumerate(graph_edges):
# compute mask of valid neighbors
valid_neighboring_nodes = np.invert(np.isin(graph_edge, node_id_black_list))
# make a copy of the current neighbors' ids
graph_edge_copy = np.copy(graph_edge)
graph_edge_weights_copy = np.copy(graph_edges_weights[node_id])
graph_edge_distances_copy = np.copy(graph_edges_distances[node_id])
# set the neighbors' ids to -1
graph_edges[node_id] = -np.ones_like(graph_edge_copy)
graph_edges_weights[node_id] = np.zeros_like(graph_edge_weights_copy)
graph_edges_distances[node_id] = np.zeros_like(graph_edge_distances_copy)
count_valid_neighbors = 0
for neighbor_idx, is_valid_neighbor in enumerate(valid_neighboring_nodes):
if is_valid_neighbor:
# current neighbor id
current_neighbor_id = graph_edge_copy[neighbor_idx]
# get mapped neighbor id
if current_neighbor_id == -1:
mapped_neighbor_id = -1
else:
mapped_neighbor_id = node_id_mapping[current_neighbor_id]
graph_edges[node_id, count_valid_neighbors] = mapped_neighbor_id
graph_edges_weights[node_id, count_valid_neighbors] = graph_edge_weights_copy[neighbor_idx]
graph_edges_distances[node_id, count_valid_neighbors] = graph_edge_distances_copy[neighbor_idx]
count_valid_neighbors += 1
# normalize edges' weights
sum_weights = np.sum(graph_edges_weights[node_id])
if sum_weights > 0:
graph_edges_weights[node_id] /= sum_weights
else:
raise ValueError(f"Weight sum for node anchors is {sum_weights}. Weights: {str(graph_edges_weights[node_id])}.")
# 3. Update pixel anchors using the id mapping (note that, at this point, pixel_anchors is already free of "bad" nodes, since
# 'compute_pixel_anchors_shortest_path_c' was given 'valid_nodes_mask')
update_pixel_anchors_c(node_id_mapping, pixel_anchors)
#########################################################################
# Compute clusters.
#########################################################################
cluster_sizes, graph_clusters = compute_clusters_c(graph_edges)
for i, cluster_size in enumerate(cluster_sizes):
if cluster_size <= 2:
raise ValueError(f"Cluster is too small: {cluster_size}, it only has nodes: {str(np.where(graph_clusters == i)[0])}")
return DeformationGraphNumpy(node_coords, graph_edges, graph_edges_weights, graph_clusters), node_deformations, pixel_anchors, pixel_weights
def generate_paths(seq_dir: str):
dst_graph_nodes_dir = os.path.join(seq_dir, "graph_nodes")
if not os.path.exists(dst_graph_nodes_dir): os.makedirs(dst_graph_nodes_dir)
dst_graph_edges_dir = os.path.join(seq_dir, "graph_edges")
if not os.path.exists(dst_graph_edges_dir):
os.makedirs(dst_graph_edges_dir)
dst_graph_edges_weights_dir = os.path.join(seq_dir, "graph_edges_weights")
if not os.path.exists(dst_graph_edges_weights_dir):
os.makedirs(dst_graph_edges_weights_dir)
dst_graph_clusters_dir = os.path.join(seq_dir, "graph_clusters")
if not os.path.exists(dst_graph_clusters_dir):
os.makedirs(dst_graph_clusters_dir)
dst_node_deformations_dir = os.path.join(seq_dir, "graph_node_deformations")
if not os.path.exists(dst_node_deformations_dir):
os.makedirs(dst_node_deformations_dir)
dst_pixel_anchors_dir = os.path.join(seq_dir, "pixel_anchors")
if not os.path.exists(dst_pixel_anchors_dir):
os.makedirs(dst_pixel_anchors_dir)
dst_pixel_weights_dir = os.path.join(seq_dir, "pixel_weights")
if not os.path.exists(dst_pixel_weights_dir):
os.makedirs(dst_pixel_weights_dir)
return dst_graph_nodes_dir, dst_graph_edges_dir, dst_graph_edges_weights_dir, dst_pixel_weights_dir, \
dst_graph_clusters_dir, dst_node_deformations_dir, dst_pixel_anchors_dir, dst_pixel_weights_dir
def save_graph_data(seq_dir: str, pair_name: str, node_coords: np.ndarray, graph_edges: np.ndarray,
graph_edges_weights: np.ndarray, graph_clusters: np.ndarray,
node_deformations: typing.Union[None, np.ndarray] = None,
pixel_anchors: typing.Union[None, np.ndarray] = None,
pixel_weights: typing.Union[None, np.ndarray] = None):
node_coverage = Parameters.graph.node_coverage.value
dst_graph_nodes_dir, dst_graph_edges_dir, dst_pixel_weights_dir, dst_graph_edges_weights_dir, dst_graph_clusters_dir, \
dst_node_deformations_dir, dst_pixel_anchors_dir, dst_pixel_weights_dir = generate_paths(seq_dir)
output_graph_nodes_path = os.path.join(dst_graph_nodes_dir, pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
output_graph_edges_path = os.path.join(dst_graph_edges_dir, pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
output_graph_edges_weights_path = os.path.join(dst_graph_edges_weights_dir,
pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
output_node_deformations_path = os.path.join(dst_node_deformations_dir, pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
output_graph_clusters_path = os.path.join(dst_graph_clusters_dir, pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
output_pixel_anchors_path = os.path.join(dst_pixel_anchors_dir, pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
output_pixel_weights_path = os.path.join(dst_pixel_weights_dir, pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
dio.save_graph_nodes(output_graph_nodes_path, node_coords)
dio.save_graph_edges(output_graph_edges_path, graph_edges)
dio.save_graph_edges_weights(output_graph_edges_weights_path, graph_edges_weights)
if node_deformations is not None:
dio.save_graph_node_deformations(output_node_deformations_path, node_deformations)
dio.save_graph_clusters(output_graph_clusters_path, graph_clusters)
if pixel_anchors is not None:
dio.save_int_image(output_pixel_anchors_path, pixel_anchors)
if pixel_weights is not None:
dio.save_float_image(output_pixel_weights_path, pixel_weights)
def check_graph_data_against_ground_truth(seq_dir: str, ground_truth_pair_name: str,
node_coords: np.ndarray, graph_edges: np.ndarray,
graph_edges_weights: np.ndarray, graph_clusters: np.ndarray,
node_deformations: typing.Union[None, np.ndarray] = None,
pixel_anchors: typing.Union[None, np.ndarray] = None,
pixel_weights: typing.Union[None, np.ndarray] = None):
node_coverage = Parameters.graph.node_coverage.value
dst_graph_nodes_dir, dst_graph_edges_dir, dst_pixel_weights_dir, dst_graph_edges_weights_dir, dst_graph_clusters_dir, \
dst_node_deformations_dir, dst_pixel_anchors_dir, dst_pixel_weights_dir = generate_paths(seq_dir)
gt_output_graph_nodes_path = os.path.join(dst_graph_nodes_dir,
ground_truth_pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
gt_output_graph_edges_path = os.path.join(dst_graph_edges_dir,
ground_truth_pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
gt_output_graph_edges_weights_path = os.path.join(dst_graph_edges_weights_dir,
ground_truth_pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
gt_output_node_deformations_path = os.path.join(dst_node_deformations_dir,
ground_truth_pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
gt_output_graph_clusters_path = os.path.join(dst_graph_clusters_dir,
ground_truth_pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
gt_output_pixel_anchors_path = os.path.join(dst_pixel_anchors_dir,
ground_truth_pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
gt_output_pixel_weights_path = os.path.join(dst_pixel_weights_dir,
ground_truth_pair_name + "_{}_{:.2f}.bin".format("geodesic", node_coverage))
assert np.array_equal(node_coords, dio.load_graph_nodes(gt_output_graph_nodes_path))
assert np.array_equal(graph_edges, dio.load_graph_edges(gt_output_graph_edges_path))
assert np.array_equal(graph_edges_weights, dio.load_graph_edges_weights(gt_output_graph_edges_weights_path))
assert np.array_equal(graph_clusters, dio.load_graph_clusters(gt_output_graph_clusters_path))
if node_deformations is not None:
assert np.allclose(node_deformations, dio.load_graph_node_deformations(gt_output_node_deformations_path))
if pixel_anchors is not None:
assert np.array_equal(pixel_anchors, dio.load_int_image(gt_output_pixel_anchors_path))
if pixel_weights is not None:
assert np.array_equal(pixel_weights, dio.load_float_image(gt_output_pixel_weights_path))
PROGRAM_EXIT_SUCCESS = 0
def main():
#########################################################################
# Options
#########################################################################
VISUAL_DEBUGGING = False
# Scene flow data is assumed to be only known at training time. To compute graphs for test time,
# this should be set to false.
USE_SCENE_FLOW_DATA = False
# Depth-to-mesh conversion
DEPTH_SCALE_RECIPROCAL = 1000.0
MAX_TRIANGLE_DISTANCE = 0.05
# Erosion of vertices in the boundaries
EROSION_NUM_ITERATIONS = 4 # original authors' value: 10. 4 works better for the berlin sequence
EROSION_MIN_NEIGHBORS = 4
# Node sampling and edges computation
USE_ONLY_VALID_VERTICES = True
NEIGHBOR_COUNT = 8
ENFORCE_NEIGHBOR_COUNT = False
SAMPLE_RANDOM_SHUFFLE = False
# Pixel anchors
NEIGHBORHOOD_DEPTH = 2 # unused in code. Is this set as default parameter C++-side?
MIN_CLUSTER_SIZE = 3 # unused in code. Is this set as default parameter C++-side?
MIN_NUM_NEIGHBORS = 2 # unused in code. Is this set as default parameter C++-side?
# Node clean-up
REMOVE_NODES_WITH_TOO_FEW_NEIGHBORS = True
#########################################################################
# Paths.
#########################################################################
slice_name = "train"
sequence_number = 70
seq_dir = os.path.join(Parameters.path.dataset_base_directory.value, slice_name, f"seq{sequence_number:03d}")
start_frame_number = 0
end_frame_number = 100
segment_name = "adult0"
depth_image_path = os.path.join(seq_dir, "depth", f"{start_frame_number:06d}.png")
mask_image_path = os.path.join(seq_dir, "mask", f"{start_frame_number:06d}_{segment_name:s}.png")
scene_flow_path = \
os.path.join(seq_dir, "scene_flow",
f"{segment_name:s}_{start_frame_number:06d}_{end_frame_number:06d}.sflow") if USE_SCENE_FLOW_DATA else None
intrinsics_path = os.path.join(seq_dir, "intrinsics.txt")
prefix = "generated"
pair_name = f"{prefix:s}_{segment_name:s}_{start_frame_number:06d}_{end_frame_number:06d}"
SAVE_GRAPH_DATA = True
# enables/disables optional checks at end of script
CHECK_AGAINST_GROUND_TRUTH = False
# both prefixes can be set to the same value to simply check functions for the loading / saving of the graph
ground_truth_prefix = "5c8446e47ef76a0addc6d0d1"
ground_truth_pair_name = f"{ground_truth_prefix:s}_{segment_name:s}_{start_frame_number:06d}_{end_frame_number:06d}"
#########################################################################
# Load data.
#########################################################################
# Load intrinsics.
intrinsic_matrix = np.loadtxt(intrinsics_path)
# Load depth image.
depth_image = skimage.io.imread(depth_image_path)
# Load mask image.
mask_image = skimage.io.imread(mask_image_path)
graph, node_deformations, pixel_anchors, pixel_weights = \
build_deformation_graph_from_depth_image(depth_image, mask_image, intrinsic_matrix, MAX_TRIANGLE_DISTANCE, DEPTH_SCALE_RECIPROCAL,
EROSION_NUM_ITERATIONS, EROSION_MIN_NEIGHBORS, REMOVE_NODES_WITH_TOO_FEW_NEIGHBORS,
USE_ONLY_VALID_VERTICES, SAMPLE_RANDOM_SHUFFLE, NEIGHBOR_COUNT, ENFORCE_NEIGHBOR_COUNT,
scene_flow_path, VISUAL_DEBUGGING)
if SAVE_GRAPH_DATA:
save_graph_data(seq_dir, pair_name, graph.nodes, graph.edges, graph.edge_weights, graph.clusters,
node_deformations, pixel_anchors, pixel_weights)
if CHECK_AGAINST_GROUND_TRUTH:
check_graph_data_against_ground_truth(seq_dir, ground_truth_pair_name,
graph.nodes, graph.edges, graph.edge_weights, graph.clusters,
node_deformations, pixel_anchors, pixel_weights)
return PROGRAM_EXIT_SUCCESS
if __name__ == "__main__":
sys.exit(main())
|
[
"numpy.isin",
"numpy.moveaxis",
"data.io.save_float_image",
"nnrt.compute_mesh_from_depth",
"numpy.sum",
"data.io.save_int_image",
"numpy.ones",
"nnrt.compute_clusters",
"open3d.visualization.draw_geometries",
"nnrt.sample_nodes",
"nnrt.get_vertex_erosion_mask",
"os.path.join",
"nnrt.compute_pixel_anchors_shortest_path",
"numpy.zeros_like",
"nnrt.compute_edges_shortest_path",
"numpy.copy",
"os.path.exists",
"numpy.isfinite",
"data.io.save_graph_clusters",
"data.io.load_float_image",
"data.io.load_graph_edges",
"numpy.loadtxt",
"data.io.load_flow",
"data.io.load_graph_clusters",
"numpy.ones_like",
"data.io.save_grayscale_image",
"data.io.load_int_image",
"data.io.load_graph_nodes",
"nnrt.update_pixel_anchors",
"warp_field.graph.DeformationGraphNumpy",
"open3d.utility.Vector3dVector",
"data.io.load_graph_node_deformations",
"numpy.all",
"nnrt.compute_mesh_from_depth_and_flow",
"data.io.save_graph_nodes",
"data.io.load_graph_edges_weights",
"os.makedirs",
"image_processing.backproject_depth",
"open3d.utility.Vector3iVector",
"data.io.save_graph_node_deformations",
"numpy.where",
"data.io.save_graph_edges",
"nnrt.node_and_edge_clean_up",
"data.io.save_graph_edges_weights"
] |
[((2599, 2703), 'image_processing.backproject_depth', 'image_processing.backproject_depth', (['depth_image', 'fx', 'fy', 'cx', 'cy'], {'depth_scale': 'depth_scale_reciprocal'}), '(depth_image, fx, fy, cx, cy, depth_scale\n =depth_scale_reciprocal)\n', (2633, 2703), False, 'import image_processing\n'), ((3680, 3756), 'nnrt.get_vertex_erosion_mask', 'erode_mesh_c', (['vertices', 'faces', 'erosion_num_iterations', 'erosion_min_neighbors'], {}), '(vertices, faces, erosion_num_iterations, erosion_min_neighbors)\n', (3692, 3756), True, 'from nnrt import get_vertex_erosion_mask as erode_mesh_c\n'), ((4955, 5062), 'nnrt.sample_nodes', 'sample_nodes_c', (['vertices', 'valid_vertices', 'node_coverage', 'use_only_valid_vertices', 'sample_random_shuffle'], {}), '(vertices, valid_vertices, node_coverage,\n use_only_valid_vertices, sample_random_shuffle)\n', (4969, 5062), True, 'from nnrt import sample_nodes as sample_nodes_c\n'), ((6081, 6109), 'numpy.ones_like', 'np.ones_like', (['valid_vertices'], {}), '(valid_vertices)\n', (6093, 6109), True, 'import numpy as np\n'), ((6209, 6373), 'nnrt.compute_edges_shortest_path', 'compute_edges_shortest_path_c', (['vertices', 'visible_vertices', 'faces', 'node_indices', 'neighbor_count', 'Parameters.graph.node_coverage.value', 'enforce_neighbor_count'], {}), '(vertices, visible_vertices, faces,\n node_indices, neighbor_count, Parameters.graph.node_coverage.value,\n enforce_neighbor_count)\n', (6238, 6373), True, 'from nnrt import compute_edges_shortest_path as compute_edges_shortest_path_c\n'), ((6444, 6479), 'numpy.ones', 'np.ones', (['(num_nodes, 1)'], {'dtype': 'bool'}), '((num_nodes, 1), dtype=bool)\n', (6451, 6479), True, 'import numpy as np\n'), ((7273, 7413), 'nnrt.compute_pixel_anchors_shortest_path', 'compute_pixel_anchors_shortest_path_c', (['node_to_vertex_distances', 'valid_nodes_mask', 'vertices', 'vertex_pixels', 'width', 'height', 'node_coverage'], {}), '(node_to_vertex_distances,\n valid_nodes_mask, vertices, vertex_pixels, width, height, node_coverage)\n', (7310, 7413), True, 'from nnrt import compute_pixel_anchors_shortest_path as compute_pixel_anchors_shortest_path_c\n'), ((11714, 11745), 'nnrt.compute_clusters', 'compute_clusters_c', (['graph_edges'], {}), '(graph_edges)\n', (11732, 11745), True, 'from nnrt import compute_clusters as compute_clusters_c\n'), ((12168, 12204), 'os.path.join', 'os.path.join', (['seq_dir', '"""graph_nodes"""'], {}), "(seq_dir, 'graph_nodes')\n", (12180, 12204), False, 'import os\n'), ((12313, 12349), 'os.path.join', 'os.path.join', (['seq_dir', '"""graph_edges"""'], {}), "(seq_dir, 'graph_edges')\n", (12325, 12349), False, 'import os\n'), ((12474, 12518), 'os.path.join', 'os.path.join', (['seq_dir', '"""graph_edges_weights"""'], {}), "(seq_dir, 'graph_edges_weights')\n", (12486, 12518), False, 'import os\n'), ((12654, 12693), 'os.path.join', 'os.path.join', (['seq_dir', '"""graph_clusters"""'], {}), "(seq_dir, 'graph_clusters')\n", (12666, 12693), False, 'import os\n'), ((12822, 12870), 'os.path.join', 'os.path.join', (['seq_dir', '"""graph_node_deformations"""'], {}), "(seq_dir, 'graph_node_deformations')\n", (12834, 12870), False, 'import os\n'), ((13001, 13039), 'os.path.join', 'os.path.join', (['seq_dir', '"""pixel_anchors"""'], {}), "(seq_dir, 'pixel_anchors')\n", (13013, 13039), False, 'import os\n'), ((13162, 13200), 'os.path.join', 'os.path.join', (['seq_dir', '"""pixel_weights"""'], {}), "(seq_dir, 'pixel_weights')\n", (13174, 13200), False, 'import os\n'), ((15200, 15258), 'data.io.save_graph_nodes', 'dio.save_graph_nodes', (['output_graph_nodes_path', 'node_coords'], {}), '(output_graph_nodes_path, node_coords)\n', (15220, 15258), True, 'import data.io as dio\n'), ((15263, 15321), 'data.io.save_graph_edges', 'dio.save_graph_edges', (['output_graph_edges_path', 'graph_edges'], {}), '(output_graph_edges_path, graph_edges)\n', (15283, 15321), True, 'import data.io as dio\n'), ((15326, 15412), 'data.io.save_graph_edges_weights', 'dio.save_graph_edges_weights', (['output_graph_edges_weights_path', 'graph_edges_weights'], {}), '(output_graph_edges_weights_path,\n graph_edges_weights)\n', (15354, 15412), True, 'import data.io as dio\n'), ((15542, 15609), 'data.io.save_graph_clusters', 'dio.save_graph_clusters', (['output_graph_clusters_path', 'graph_clusters'], {}), '(output_graph_clusters_path, graph_clusters)\n', (15565, 15609), True, 'import data.io as dio\n'), ((20320, 20423), 'os.path.join', 'os.path.join', (['Parameters.path.dataset_base_directory.value', 'slice_name', 'f"""seq{sequence_number:03d}"""'], {}), "(Parameters.path.dataset_base_directory.value, slice_name,\n f'seq{sequence_number:03d}')\n", (20332, 20423), False, 'import os\n'), ((20527, 20590), 'os.path.join', 'os.path.join', (['seq_dir', '"""depth"""', 'f"""{start_frame_number:06d}.png"""'], {}), "(seq_dir, 'depth', f'{start_frame_number:06d}.png')\n", (20539, 20590), False, 'import os\n'), ((20613, 20692), 'os.path.join', 'os.path.join', (['seq_dir', '"""mask"""', 'f"""{start_frame_number:06d}_{segment_name:s}.png"""'], {}), "(seq_dir, 'mask', f'{start_frame_number:06d}_{segment_name:s}.png')\n", (20625, 20692), False, 'import os\n'), ((20912, 20951), 'os.path.join', 'os.path.join', (['seq_dir', '"""intrinsics.txt"""'], {}), "(seq_dir, 'intrinsics.txt')\n", (20924, 20951), False, 'import os\n'), ((21704, 21731), 'numpy.loadtxt', 'np.loadtxt', (['intrinsics_path'], {}), '(intrinsics_path)\n', (21714, 21731), True, 'import numpy as np\n'), ((3007, 3068), 'nnrt.compute_mesh_from_depth', 'compute_mesh_from_depth_c', (['point_image', 'max_triangle_distance'], {}), '(point_image, max_triangle_distance)\n', (3032, 3068), True, 'from nnrt import compute_mesh_from_depth as compute_mesh_from_depth_c\n'), ((3169, 3199), 'data.io.load_flow', 'dio.load_flow', (['scene_flow_path'], {}), '(scene_flow_path)\n', (3182, 3199), True, 'import data.io as dio\n'), ((3227, 3262), 'numpy.moveaxis', 'np.moveaxis', (['scene_flow_image', '(0)', '(2)'], {}), '(scene_flow_image, 0, 2)\n', (3238, 3262), True, 'import numpy as np\n'), ((3333, 3425), 'nnrt.compute_mesh_from_depth_and_flow', 'compute_mesh_from_depth_and_flow_c', (['point_image', 'scene_flow_image', 'max_triangle_distance'], {}), '(point_image, scene_flow_image,\n max_triangle_distance)\n', (3367, 3425), True, 'from nnrt import compute_mesh_from_depth_and_flow as compute_mesh_from_depth_and_flow_c\n'), ((4104, 4176), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[mesh, pcd]'], {'mesh_show_back_face': '(True)'}), '([mesh, pcd], mesh_show_back_face=True)\n', (4137, 4176), True, 'import open3d as o3d\n'), ((5765, 5837), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[pcd_nodes]'], {'mesh_show_back_face': '(True)'}), '([pcd_nodes], mesh_show_back_face=True)\n', (5798, 5837), True, 'import open3d as o3d\n'), ((6580, 6635), 'nnrt.node_and_edge_clean_up', 'node_and_edge_clean_up_c', (['graph_edges', 'valid_nodes_mask'], {}), '(graph_edges, valid_nodes_mask)\n', (6604, 6635), True, 'from nnrt import node_and_edge_clean_up as node_and_edge_clean_up_c\n'), ((7600, 7629), 'numpy.sum', 'np.sum', (['pixel_anchors'], {'axis': '(2)'}), '(pixel_anchors, axis=2)\n', (7606, 7629), True, 'import numpy as np\n'), ((7816, 7893), 'data.io.save_grayscale_image', 'dio.save_grayscale_image', (['"""pixel_anchors_mask_ed.jpeg"""', 'pixel_anchors_mask_ed'], {}), "('pixel_anchors_mask_ed.jpeg', pixel_anchors_mask_ed)\n", (7840, 7893), True, 'import data.io as dio\n'), ((11443, 11497), 'nnrt.update_pixel_anchors', 'update_pixel_anchors_c', (['node_id_mapping', 'pixel_anchors'], {}), '(node_id_mapping, pixel_anchors)\n', (11465, 11497), True, 'from nnrt import update_pixel_anchors as update_pixel_anchors_c\n'), ((11972, 12060), 'warp_field.graph.DeformationGraphNumpy', 'DeformationGraphNumpy', (['node_coords', 'graph_edges', 'graph_edges_weights', 'graph_clusters'], {}), '(node_coords, graph_edges, graph_edges_weights,\n graph_clusters)\n', (11993, 12060), False, 'from warp_field.graph import DeformationGraphNumpy\n'), ((12216, 12251), 'os.path.exists', 'os.path.exists', (['dst_graph_nodes_dir'], {}), '(dst_graph_nodes_dir)\n', (12230, 12251), False, 'import os\n'), ((12253, 12285), 'os.makedirs', 'os.makedirs', (['dst_graph_nodes_dir'], {}), '(dst_graph_nodes_dir)\n', (12264, 12285), False, 'import os\n'), ((12361, 12396), 'os.path.exists', 'os.path.exists', (['dst_graph_edges_dir'], {}), '(dst_graph_edges_dir)\n', (12375, 12396), False, 'import os\n'), ((12406, 12438), 'os.makedirs', 'os.makedirs', (['dst_graph_edges_dir'], {}), '(dst_graph_edges_dir)\n', (12417, 12438), False, 'import os\n'), ((12530, 12573), 'os.path.exists', 'os.path.exists', (['dst_graph_edges_weights_dir'], {}), '(dst_graph_edges_weights_dir)\n', (12544, 12573), False, 'import os\n'), ((12583, 12623), 'os.makedirs', 'os.makedirs', (['dst_graph_edges_weights_dir'], {}), '(dst_graph_edges_weights_dir)\n', (12594, 12623), False, 'import os\n'), ((12705, 12743), 'os.path.exists', 'os.path.exists', (['dst_graph_clusters_dir'], {}), '(dst_graph_clusters_dir)\n', (12719, 12743), False, 'import os\n'), ((12753, 12788), 'os.makedirs', 'os.makedirs', (['dst_graph_clusters_dir'], {}), '(dst_graph_clusters_dir)\n', (12764, 12788), False, 'import os\n'), ((12882, 12923), 'os.path.exists', 'os.path.exists', (['dst_node_deformations_dir'], {}), '(dst_node_deformations_dir)\n', (12896, 12923), False, 'import os\n'), ((12933, 12971), 'os.makedirs', 'os.makedirs', (['dst_node_deformations_dir'], {}), '(dst_node_deformations_dir)\n', (12944, 12971), False, 'import os\n'), ((13051, 13088), 'os.path.exists', 'os.path.exists', (['dst_pixel_anchors_dir'], {}), '(dst_pixel_anchors_dir)\n', (13065, 13088), False, 'import os\n'), ((13098, 13132), 'os.makedirs', 'os.makedirs', (['dst_pixel_anchors_dir'], {}), '(dst_pixel_anchors_dir)\n', (13109, 13132), False, 'import os\n'), ((13212, 13249), 'os.path.exists', 'os.path.exists', (['dst_pixel_weights_dir'], {}), '(dst_pixel_weights_dir)\n', (13226, 13249), False, 'import os\n'), ((13259, 13293), 'os.makedirs', 'os.makedirs', (['dst_pixel_weights_dir'], {}), '(dst_pixel_weights_dir)\n', (13270, 13293), False, 'import os\n'), ((15455, 15541), 'data.io.save_graph_node_deformations', 'dio.save_graph_node_deformations', (['output_node_deformations_path', 'node_deformations'], {}), '(output_node_deformations_path,\n node_deformations)\n', (15487, 15541), True, 'import data.io as dio\n'), ((15653, 15713), 'data.io.save_int_image', 'dio.save_int_image', (['output_pixel_anchors_path', 'pixel_anchors'], {}), '(output_pixel_anchors_path, pixel_anchors)\n', (15671, 15713), True, 'import data.io as dio\n'), ((15756, 15818), 'data.io.save_float_image', 'dio.save_float_image', (['output_pixel_weights_path', 'pixel_weights'], {}), '(output_pixel_weights_path, pixel_weights)\n', (15776, 15818), True, 'import data.io as dio\n'), ((18112, 18160), 'data.io.load_graph_nodes', 'dio.load_graph_nodes', (['gt_output_graph_nodes_path'], {}), '(gt_output_graph_nodes_path)\n', (18132, 18160), True, 'import data.io as dio\n'), ((18201, 18249), 'data.io.load_graph_edges', 'dio.load_graph_edges', (['gt_output_graph_edges_path'], {}), '(gt_output_graph_edges_path)\n', (18221, 18249), True, 'import data.io as dio\n'), ((18298, 18362), 'data.io.load_graph_edges_weights', 'dio.load_graph_edges_weights', (['gt_output_graph_edges_weights_path'], {}), '(gt_output_graph_edges_weights_path)\n', (18326, 18362), True, 'import data.io as dio\n'), ((18406, 18460), 'data.io.load_graph_clusters', 'dio.load_graph_clusters', (['gt_output_graph_clusters_path'], {}), '(gt_output_graph_clusters_path)\n', (18429, 18460), True, 'import data.io as dio\n'), ((20725, 20839), 'os.path.join', 'os.path.join', (['seq_dir', '"""scene_flow"""', 'f"""{segment_name:s}_{start_frame_number:06d}_{end_frame_number:06d}.sflow"""'], {}), "(seq_dir, 'scene_flow',\n f'{segment_name:s}_{start_frame_number:06d}_{end_frame_number:06d}.sflow')\n", (20737, 20839), False, 'import os\n'), ((3871, 3907), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['vertices'], {}), '(vertices)\n', (3897, 3907), True, 'import open3d as o3d\n'), ((3909, 3942), 'open3d.utility.Vector3iVector', 'o3d.utility.Vector3iVector', (['faces'], {}), '(faces)\n', (3935, 3942), True, 'import open3d as o3d\n'), ((4226, 4293), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[mesh]'], {'mesh_show_back_face': '(True)'}), '([mesh], mesh_show_back_face=True)\n', (4259, 4293), True, 'import open3d as o3d\n'), ((4586, 4675), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[mesh, mesh_transformed]'], {'mesh_show_back_face': '(True)'}), '([mesh, mesh_transformed],\n mesh_show_back_face=True)\n', (4619, 4675), True, 'import open3d as o3d\n'), ((5716, 5755), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['node_coords'], {}), '(node_coords)\n', (5742, 5755), True, 'import open3d as o3d\n'), ((9525, 9544), 'numpy.copy', 'np.copy', (['graph_edge'], {}), '(graph_edge)\n', (9532, 9544), True, 'import numpy as np\n'), ((9583, 9620), 'numpy.copy', 'np.copy', (['graph_edges_weights[node_id]'], {}), '(graph_edges_weights[node_id])\n', (9590, 9620), True, 'import numpy as np\n'), ((9661, 9700), 'numpy.copy', 'np.copy', (['graph_edges_distances[node_id]'], {}), '(graph_edges_distances[node_id])\n', (9668, 9700), True, 'import numpy as np\n'), ((9854, 9892), 'numpy.zeros_like', 'np.zeros_like', (['graph_edge_weights_copy'], {}), '(graph_edge_weights_copy)\n', (9867, 9892), True, 'import numpy as np\n'), ((9938, 9978), 'numpy.zeros_like', 'np.zeros_like', (['graph_edge_distances_copy'], {}), '(graph_edge_distances_copy)\n', (9951, 9978), True, 'import numpy as np\n'), ((10944, 10980), 'numpy.sum', 'np.sum', (['graph_edges_weights[node_id]'], {}), '(graph_edges_weights[node_id])\n', (10950, 10980), True, 'import numpy as np\n'), ((18547, 18613), 'data.io.load_graph_node_deformations', 'dio.load_graph_node_deformations', (['gt_output_node_deformations_path'], {}), '(gt_output_node_deformations_path)\n', (18579, 18613), True, 'import data.io as dio\n'), ((18694, 18742), 'data.io.load_int_image', 'dio.load_int_image', (['gt_output_pixel_anchors_path'], {}), '(gt_output_pixel_anchors_path)\n', (18712, 18742), True, 'import data.io as dio\n'), ((18823, 18873), 'data.io.load_float_image', 'dio.load_float_image', (['gt_output_pixel_weights_path'], {}), '(gt_output_pixel_weights_path)\n', (18843, 18873), True, 'import data.io as dio\n'), ((4365, 4416), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['(vertices + vertex_flows)'], {}), '(vertices + vertex_flows)\n', (4391, 4416), True, 'import open3d as o3d\n'), ((4418, 4451), 'open3d.utility.Vector3iVector', 'o3d.utility.Vector3iVector', (['faces'], {}), '(faces)\n', (4444, 4451), True, 'import open3d as o3d\n'), ((5431, 5461), 'numpy.isfinite', 'np.isfinite', (['node_deformations'], {}), '(node_deformations)\n', (5442, 5461), True, 'import numpy as np\n'), ((7499, 7534), 'numpy.all', 'np.all', (['(pixel_anchors != -1)'], {'axis': '(2)'}), '(pixel_anchors != -1, axis=2)\n', (7505, 7534), True, 'import numpy as np\n'), ((7662, 7690), 'numpy.copy', 'np.copy', (['pixel_anchors_image'], {}), '(pixel_anchors_image)\n', (7669, 7690), True, 'import numpy as np\n'), ((9397, 9436), 'numpy.isin', 'np.isin', (['graph_edge', 'node_id_black_list'], {}), '(graph_edge, node_id_black_list)\n', (9404, 9436), True, 'import numpy as np\n'), ((9781, 9810), 'numpy.ones_like', 'np.ones_like', (['graph_edge_copy'], {}), '(graph_edge_copy)\n', (9793, 9810), True, 'import numpy as np\n'), ((6706, 6741), 'numpy.where', 'np.where', (['(valid_nodes_mask == False)'], {}), '(valid_nodes_mask == False)\n', (6714, 6741), True, 'import numpy as np\n'), ((11923, 11952), 'numpy.where', 'np.where', (['(graph_clusters == i)'], {}), '(graph_clusters == i)\n', (11931, 11952), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from typing import Tuple
from domain import Domain3D
from cloudforms import CylinderCloud
import numpy as np
import time
from scipy.special import gamma
class Plank(Domain3D):
def __init__(self, kilometers: Tuple[float, float, float] = (50., 50., 10.),
nodes: Tuple[int, int, int] = (300, 300, 500),
clouds_bottom: float = 1.5):
"""
Модель Планка разрывной кучевой облачности в 3D
:param kilometers: размеры по осям Ox, Oy и Oz в километрах
:param nodes: кол-во узлов по соответствующим осям
:param clouds_bottom: высота нижней границы облаков
"""
super().__init__(kilometers, nodes)
self.clouds_bottom = clouds_bottom
@classmethod
def from_domain3D(cls, domain: 'Domain3D', clouds_bottom: float = 1.5):
return cls((domain.PX, domain.PY, domain.PZ),
(domain.Nx, domain.Ny, domain.Nz), clouds_bottom=clouds_bottom)
def cloudiness(self, Dm: float = 3., K: float = 100,
alpha: float = 1., beta: float = 0.5, eta: float = 1., seed: int = 42,
timeout: float = 30.,
verbose=True) -> list:
"""
:param Dm: максимальный диаметр облака, км
:param K: нормировочный коэффициент, безразм.
:param alpha: безразм. коэфф.
:param beta: безразм. коэфф.
:param eta: безразм. коэфф.
:param seed: состояние генератора случайных чисел (определяет положения облаков в 3D)
:param timeout: максимальное время ожидания
:param verbose: вывод доп. информации
:return: 2D-распределение мощности облаков в проекции на плоскость Oxy
"""
np.random.seed(seed)
cloudiness = []
r = np.sqrt(self.i(Dm) * self.i(Dm) + self.j(Dm) * self.j(Dm))
steps = np.arange(Dm, 0, -Dm / r)
N = len(steps)
for i, D in enumerate(steps):
if verbose:
print('\r{:.2f}%'.format((i + 1) / N * 100), end='', flush=True)
n = int(np.round(K * np.exp(-alpha * D)))
if n < 1:
n = 1
for k in range(n):
start_time = time.time()
while True:
x, y = np.random.uniform(0., self.PX), np.random.uniform(0., self.PY)
z = self.clouds_bottom
rx = ry = D / 2
H = eta * D * np.power(D / Dm, beta)
cloud = CylinderCloud((x, y, z), rx, ry, H)
if not cloud.belongsQ((self.PX, self.PY, self.PZ)):
continue
intersections = False
for c in cloudiness:
if not cloud.disjointQ(c):
intersections = True
break
if not intersections:
cloudiness.append(cloud)
break
if time.time() - start_time > timeout:
raise TimeoutError('превышено допустимое время ожидания')
if verbose:
print()
return cloudiness
def h_map(self, Dm: float = 3., K: float = 100,
alpha: float = 1., beta: float = 0.5, eta: float = 1., seed: int = 42,
timeout: float = 30.,
verbose=True) -> np.ndarray:
"""
:param Dm: максимальный диаметр облака, км
:param K: нормировочный коэффициент, безразм.
:param alpha: безразм. коэфф.
:param beta: безразм. коэфф.
:param eta: безразм. коэфф.
:param seed: состояние генератора случайных чисел (определяет положения облаков в 3D)
:param timeout: максимальное время ожидания
:param verbose: вывод доп. информации
:return: 2D-распределение мощности облаков в проекции на плоскость Oxy
"""
cloudiness = self.cloudiness(Dm, K, alpha, beta, eta, seed, timeout, verbose)
hmap = np.zeros((self.Nx, self.Ny), dtype=float)
for cloud in cloudiness:
for x in np.arange(cloud.x - cloud.rx, cloud.x + cloud.rx, self.dx):
for y in np.arange(cloud.y - cloud.ry, cloud.y + cloud.ry, self.dy):
if cloud.includesQ((x, y, self.clouds_bottom)):
hmap[self.i(x), self.j(y)] = cloud.height
return hmap # 2D array
def lw_dist(self, height_map: np.ndarray, const_w=False,
mu0: float = 3.27, psi0: float = 0.67) -> np.ndarray:
"""
Расчет 3D поля водности по заданному 2D-распределению мощности облаков
:param height_map: 2D-распределение мощности облаков в проекции на плоскость Oxy
:param const_w: если True, внутри облака водность не меняется с высотой; если False, используется модель Мазина
:param mu0: безразм. параметр
:param psi0: безразм. параметр
:return: поле водности в 3D
"""
min_level = self.k(self.clouds_bottom)
max_level = self.k(self.clouds_bottom + np.max(height_map))
W = 0.132574 * np.power(height_map, 2.30215)
w = np.zeros((self.Nx, self.Ny, self.Nz), dtype=float)
cond = np.logical_not(np.isclose(height_map, 0.))
if const_w:
for k in range(min_level, max_level):
xi = (self.z(k) - self.clouds_bottom) / height_map[cond]
xi[(xi < 0) | (xi > 1)] = 0.
xi[(0 <= xi) | (xi <= 1)] = 1.
w[cond, k] = xi * W[cond] / height_map[cond]
else:
for k in range(min_level, max_level):
xi = (self.z(k) - self.clouds_bottom) / height_map[cond]
xi[(xi < 0) | (xi > 1)] = 0.
w[cond, k] = \
np.power(xi, mu0) * np.power(1 - xi, psi0) * W[cond] / height_map[cond] * \
gamma(2 + mu0 + psi0) / (gamma(1 + mu0) * gamma(1 + psi0))
return w # 3D array
def get_lw_dist(self, Dm: float = 3., K: float = 100,
alpha: float = 1., beta: float = 0.5, eta: float = 1., seed: int = 42,
verbose=True, const_w=False,
mu0: float = 3.27, psi0: float = 0.67) -> np.ndarray:
"""
Выполняет lw_dist(h_map(...), ...)
"""
return self.lw_dist(self.h_map(Dm, K, alpha, beta, eta, seed, verbose), const_w, mu0, psi0) # 3D array
|
[
"numpy.random.uniform",
"numpy.random.seed",
"numpy.power",
"numpy.zeros",
"time.time",
"numpy.isclose",
"numpy.max",
"numpy.arange",
"cloudforms.CylinderCloud",
"numpy.exp",
"scipy.special.gamma"
] |
[((1729, 1749), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1743, 1749), True, 'import numpy as np\n'), ((1861, 1886), 'numpy.arange', 'np.arange', (['Dm', '(0)', '(-Dm / r)'], {}), '(Dm, 0, -Dm / r)\n', (1870, 1886), True, 'import numpy as np\n'), ((4020, 4061), 'numpy.zeros', 'np.zeros', (['(self.Nx, self.Ny)'], {'dtype': 'float'}), '((self.Nx, self.Ny), dtype=float)\n', (4028, 4061), True, 'import numpy as np\n'), ((5168, 5218), 'numpy.zeros', 'np.zeros', (['(self.Nx, self.Ny, self.Nz)'], {'dtype': 'float'}), '((self.Nx, self.Ny, self.Nz), dtype=float)\n', (5176, 5218), True, 'import numpy as np\n'), ((4116, 4174), 'numpy.arange', 'np.arange', (['(cloud.x - cloud.rx)', '(cloud.x + cloud.rx)', 'self.dx'], {}), '(cloud.x - cloud.rx, cloud.x + cloud.rx, self.dx)\n', (4125, 4174), True, 'import numpy as np\n'), ((5126, 5155), 'numpy.power', 'np.power', (['height_map', '(2.30215)'], {}), '(height_map, 2.30215)\n', (5134, 5155), True, 'import numpy as np\n'), ((5249, 5276), 'numpy.isclose', 'np.isclose', (['height_map', '(0.0)'], {}), '(height_map, 0.0)\n', (5259, 5276), True, 'import numpy as np\n'), ((2211, 2222), 'time.time', 'time.time', ([], {}), '()\n', (2220, 2222), False, 'import time\n'), ((4201, 4259), 'numpy.arange', 'np.arange', (['(cloud.y - cloud.ry)', '(cloud.y + cloud.ry)', 'self.dy'], {}), '(cloud.y - cloud.ry, cloud.y + cloud.ry, self.dy)\n', (4210, 4259), True, 'import numpy as np\n'), ((5083, 5101), 'numpy.max', 'np.max', (['height_map'], {}), '(height_map)\n', (5089, 5101), True, 'import numpy as np\n'), ((2505, 2540), 'cloudforms.CylinderCloud', 'CylinderCloud', (['(x, y, z)', 'rx', 'ry', 'H'], {}), '((x, y, z), rx, ry, H)\n', (2518, 2540), False, 'from cloudforms import CylinderCloud\n'), ((2086, 2104), 'numpy.exp', 'np.exp', (['(-alpha * D)'], {}), '(-alpha * D)\n', (2092, 2104), True, 'import numpy as np\n'), ((2278, 2309), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.PX'], {}), '(0.0, self.PX)\n', (2295, 2309), True, 'import numpy as np\n'), ((2310, 2341), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', 'self.PY'], {}), '(0.0, self.PY)\n', (2327, 2341), True, 'import numpy as np\n'), ((2454, 2476), 'numpy.power', 'np.power', (['(D / Dm)', 'beta'], {}), '(D / Dm, beta)\n', (2462, 2476), True, 'import numpy as np\n'), ((5903, 5924), 'scipy.special.gamma', 'gamma', (['(2 + mu0 + psi0)'], {}), '(2 + mu0 + psi0)\n', (5908, 5924), False, 'from scipy.special import gamma\n'), ((5928, 5942), 'scipy.special.gamma', 'gamma', (['(1 + mu0)'], {}), '(1 + mu0)\n', (5933, 5942), False, 'from scipy.special import gamma\n'), ((5945, 5960), 'scipy.special.gamma', 'gamma', (['(1 + psi0)'], {}), '(1 + psi0)\n', (5950, 5960), False, 'from scipy.special import gamma\n'), ((3007, 3018), 'time.time', 'time.time', ([], {}), '()\n', (3016, 3018), False, 'import time\n'), ((5807, 5824), 'numpy.power', 'np.power', (['xi', 'mu0'], {}), '(xi, mu0)\n', (5815, 5824), True, 'import numpy as np\n'), ((5827, 5849), 'numpy.power', 'np.power', (['(1 - xi)', 'psi0'], {}), '(1 - xi, psi0)\n', (5835, 5849), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import numpy as np
def multiclass_get_points_and_labels(input, class_labels):
size = len(class_labels)
arrays = []
for i in range(size):
arrays.append(input[class_labels[i]])
total_array = np.concatenate(arrays)
labels = []
for i in range(size):
labels.append(np.ones(len(arrays[i]))*i)
test_label = np.concatenate(labels)
label_to_class = {i: class_labels[i] for i in range(size)}
return total_array, test_label, label_to_class
|
[
"numpy.concatenate"
] |
[((889, 911), 'numpy.concatenate', 'np.concatenate', (['arrays'], {}), '(arrays)\n', (903, 911), True, 'import numpy as np\n'), ((1021, 1043), 'numpy.concatenate', 'np.concatenate', (['labels'], {}), '(labels)\n', (1035, 1043), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Python implementation of the Oslo Ricepile model.
"""
import numpy as np
import pickle
import os
import binascii
class Oslo:
""" Docstring """
def __init__(self, L,mode = 'n'):
if type(L) != int:
raise ValueError("Grid size, L, must be integer type.")
self.__L = L
self.__t = 0
self.__z = np.zeros(L,dtype='int')
self.__z_c = np.random.randint(1,3,L)
self.s = []
self.d = []
self.cor = []
self.r = []
self.point = [[],[]]
if mode == 'r':
self.__z += 2
Oslo.run(self,1)
self.__t = 0
self.s = []
self.d = []
self.cor = []
self.r = []
self.point = [[],[]]
self.d_offset = Oslo.height(self)
else:
self.d_offset = 0
def height(self):
j = 0
h = 0
for i in self.__z[::-1]:
j += i
h += j
return h
def save(self, foldername = None):
files = (self.__L,self.__t,self.point)
if foldername == None:
folder = str('filedump_L' + str(self.__L) + '_t' + str(self.__t) +
'_' + binascii.b2a_hex(os.urandom(6)))
else:
folder = foldername
os.makedirs(folder)
with open(folder + '/meta.pickle', 'wb') as f:
pickle.dump(files, f)
np.save(folder + '/z',self.__z)
np.save(folder + '/z_c',self.__z_c)
np.save(folder + '/s',self.s)
np.save(folder + '/d',self.d)
def custom_z(self,X):
"""Input custom pile configuration.
Parameters:
X: numpy.ndarray, shape(L)
Numpy array with entries in range {0,1,2,3}.
"""
X = X.astype('int')
if np.shape(X) != (self.__L,):
raise ValueError('Input array is not of shape (L,)')
if np.all(np.in1d(X,[0,1,2,3])):
self.__z = X
else:
raise ValueError('Custom array contains values other\
than [0,1,2,3]')
def newslope(self):
if np.random.random() > 0.5:
return 1
else:
return 2
def micro_run(self):
"""Docstring"""
tm = 0
sm = 0
dm = 0
cor = False
r = 0
# print('break')
while len(self.point[tm%2]) != 0:
# print(self.point[tm%2],self.point,tm)
for i in self.point[tm%2]:
if i == self.__L - 1:
cor = True
if i > r:
r = i
if self.__z[i] > self.__z_c[i]:
self.__z[i] -= 2
self.__z_c[i] = self.newslope()
sm += 1
if i == 0:
self.point[(tm+1)%2].append(i+1)
self.__z[i+1] += 1
elif i == self.__L - 1:
self.point[(tm+1)%2].append(i-1)
self.point[(tm+1)%2].append(i)
self.__z[i-1] += 1
self.__z[i] += 1
dm += 1
else:
self.point[(tm+1)%2].append(i+1)
self.__z[i+1] += 1
self.point[(tm+1)%2].append(i-1)
self.__z[i-1] += 1
self.point[tm%2] = []
tm += 1
self.cor.append(cor)
self.r.append(r)
self.s.append(sm)
self.d.append(dm)
def run(self,N):
"""Docstring"""
index = np.arange(self.__L)
self.__z[0] += 1
z_t = ((self.__z - self.__z_c) > 0)
self.__z[0] -= 1
self.point[0] = list(index[z_t])
checks = 0
for j in range(N):
if 0 not in self.point[0]:
self.point[0].append(0)
self.__z[0] += 1
self.__t += 1
self.micro_run()
# print(self.__z)
def info(self,single = False):
"""Returns key information about current state of the ricepile.
Returns tuple (L,t,z,z_c) if single == False.
If single == i for any i in {0,1,2,3}, returns (L,t,z,z_c)[i].
L: int
System size of ricepile. Equal to number of grid spaces.
t: int
Macroscopic time of system. Increases by 1 each time a grain
is added to the pile.
z: numpy.ndarray, shape(L)
Current slope at each grid location. Grains propagate from
right to left. Grains are dissipated at right boundary.
z_c: numpy.ndarray, shape(L)
Current critical slope at each grid location. Possible
values at each site {1,2}.
"""
if single not in [False,1,2,3,4]:
raise ValueError("single must take value in [False,1,2,3,4]")
data = (self.__L, self.__t, self.__z, self.__z_c)
if single == False:
return data
else:
return data[single]
# a = Oslo(32)
# a.run(100000)
|
[
"pickle.dump",
"numpy.save",
"os.makedirs",
"numpy.zeros",
"numpy.shape",
"numpy.random.randint",
"numpy.arange",
"numpy.random.random",
"os.urandom",
"numpy.in1d"
] |
[((395, 419), 'numpy.zeros', 'np.zeros', (['L'], {'dtype': '"""int"""'}), "(L, dtype='int')\n", (403, 419), True, 'import numpy as np\n'), ((440, 466), 'numpy.random.randint', 'np.random.randint', (['(1)', '(3)', 'L'], {}), '(1, 3, L)\n', (457, 466), True, 'import numpy as np\n'), ((1371, 1390), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (1382, 1390), False, 'import os\n'), ((1488, 1520), 'numpy.save', 'np.save', (["(folder + '/z')", 'self.__z'], {}), "(folder + '/z', self.__z)\n", (1495, 1520), True, 'import numpy as np\n'), ((1528, 1564), 'numpy.save', 'np.save', (["(folder + '/z_c')", 'self.__z_c'], {}), "(folder + '/z_c', self.__z_c)\n", (1535, 1564), True, 'import numpy as np\n'), ((1572, 1602), 'numpy.save', 'np.save', (["(folder + '/s')", 'self.s'], {}), "(folder + '/s', self.s)\n", (1579, 1602), True, 'import numpy as np\n'), ((1610, 1640), 'numpy.save', 'np.save', (["(folder + '/d')", 'self.d'], {}), "(folder + '/d', self.d)\n", (1617, 1640), True, 'import numpy as np\n'), ((3737, 3756), 'numpy.arange', 'np.arange', (['self.__L'], {}), '(self.__L)\n', (3746, 3756), True, 'import numpy as np\n'), ((1458, 1479), 'pickle.dump', 'pickle.dump', (['files', 'f'], {}), '(files, f)\n', (1469, 1479), False, 'import pickle\n'), ((1905, 1916), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1913, 1916), True, 'import numpy as np\n'), ((2016, 2040), 'numpy.in1d', 'np.in1d', (['X', '[0, 1, 2, 3]'], {}), '(X, [0, 1, 2, 3])\n', (2023, 2040), True, 'import numpy as np\n'), ((2231, 2249), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2247, 2249), True, 'import numpy as np\n'), ((1301, 1314), 'os.urandom', 'os.urandom', (['(6)'], {}), '(6)\n', (1311, 1314), False, 'import os\n')]
|
"""
Codes for gas, oil, and water PVT correlations
@author: <NAME>
@email: <EMAIL>
"""
"""
GAS
"""
def gas_pseudoprops(temp, pressure, sg, x_h2s, x_co2):
"""
Calculate Gas Pseudo-critical and Pseudo-reduced Pressure and Temperature
* Pseudo-critical properties
For range: 0.57 < sg < 1.68
(Sutton, 1985)
* Pseudo-reduced properties
For range: x_h2s (mol%) < 0.738; x_co2 (mol%) < 0.544; 154 < p (psia) < 7026; 40 < temp (°F) < 300 (error 0.97%)
(Wichert and Aziz, 1972)
"""
import numpy as np
temp = temp + 459.67 # convert to Rankine
# calculate pseudocritical properties (Sutton, valid for 0.57<sg<1.68)
P_pc = 756.8 - (131.07 * sg) - (3.6 * sg**2)
T_pc = 169.2 + (349.50 * sg) - (74 * sg**2) # in Rankine
# calculate adjustment to pseudocritical properties for sour gas (Wiechert-Aziz, valid for x_co2<0.544 and x_h2s<0.738)
e = (120 * (((x_h2s + x_co2)**0.9) - ((x_h2s + x_co2)**1.6))) + (15 * (x_h2s**0.5 - x_h2s**4))
T_pc = T_pc - e # corrected T_pc
P_pc = (P_pc * T_pc) / (T_pc - x_h2s * e * (1-x_h2s))
# calculate pseudoreduced properties
P_pr = pressure / P_pc
T_pr = temp / T_pc
return(P_pc, T_pc, P_pr, T_pr)
def gas_zfactor(T_pr, P_pr):
"""
Calculate Gas Compressibility Factor
For range: 0.2 < P_pr < 30; 1 < T_pr < 3 (error 0.486%)
(Dranchuk and Aboukassem, 1975)
"""
# T_pr : calculated pseudoreduced temperature
# P_pr : calculated pseudoreduced pressure
from scipy.optimize import fsolve # non-linear solver
import numpy as np
a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475
a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210
def f(y):
rho_pr, z = y
c1 = a1 + (a2/T_pr) + (a3/(T_pr**3))+ (a4/(T_pr**4))+ (a5/(T_pr**5))
c2 = a6 + (a7/T_pr) + (a8/(T_pr**2))
c3 = a9*((a7/T_pr) + (a8/(T_pr**2)))
c4 = (a10)*(1+(a11*(rho_pr**2)))*((rho_pr**2)/(T_pr**3))*(np.exp(-a11*(rho_pr**2)))
f1 = z + (c3*(rho_pr**5)) - (c2*(rho_pr**2)) - (c1*(rho_pr**1)) - c4 - 1
f2 = rho_pr - ((0.27 * P_pr) / (z * T_pr))
return[f1, f2]
solve = fsolve(f, [1, 1]) # initial guess
return(solve[0], solve[1]) # result is density, z-factor
def gas_density(temp, pressure, sg, z):
temp = temp + 459.67
R = 10.732 # gas constant in (ft3*psi)/(lb-mol*R)
rhogas = (28.97 * sg * pressure) / (z * R * temp)
return rhogas
def gas_fvf(z, temp, pressure):
"""
Calculate Gas FVF
For range: this is not a correlation, so valid for infinite intervals
"""
temp = temp + 459.67
Bg = 0.0282793 * z * temp / pressure
return(Bg)
def gas_fvf2(unit='unit1', z=0.8, temp=186, pressure=2000):
"""
Gas FVF calculated in other units
unit: choice of units (unit1: RB/scf, unit2: res m3/std m3)
for unit1, inputs temp in Rankine (Fahrenheit + 460), pressure in psia or psig
for unit2, inputs temp in Kelvin, pressure in psia or psig
"""
if unit == 'unit1':
return(0.00503676 * z * temp / pressure)
if unit == 'unit2':
return(0.350958 * z * temp / pressure)
def gas_mu(temp, rhogas, sg):
"""
Calculate Gas Viscosity
For gas with CO2 and N2 composition
For range: 100 < temp (°F) < 340; 0.9 < x_CO2 (mol%) < 3.2; x_N2 (mol%) < 4.8 (std 2.7-9.0%)
(Lee et al, 1996)
"""
import numpy as np
temp = temp + 459.67
Mg = 28.97 * sg
rhogas_lee = rhogas * 0.0160185 # lbm/ft3 converted to gas density unit of Lee et al (g/cm3)
K = ((0.00094 + 2E-06)*(temp**1.5)) / (209 + 19*Mg + temp)
x = 3.5 + (986 / temp) + (0.01 * Mg)
y = 2.4 - 0.2*x
viscogas = K * np.exp(x * (rhogas_lee**y))
return viscogas
def gas_compressibility(T_pr, P_pr, rho_pr, z, P_pc):
"""
Calculate Gas Isothermal Compressibility
For range: unspecified
(Trube, 1957; Mattar, 1975)
"""
import numpy as np
a1 = 0.3265; a2 = -1.0700; a3 = -0.5339; a4 = 0.01569; a5 = -0.05165; a6 = 0.5475
a7 = -0.7361; a8 = 0.1844; a9 = 0.1056; a10 = 0.6134; a11 = 0.7210
do = ((a1 + (a2/T_pr) + (a3/T_pr**3) +(a4/T_pr**4) + (a5/T_pr**5)) * rho_pr) + \
(2 * ((a6 + (a7/T_pr) + (a8/T_pr**2))) * rho_pr**2) - \
(5 * a9 * (((a7/T_pr) + (a8/T_pr**2))) * rho_pr**4) + (1 + (a11 * rho_pr**2) - (a11 * rho_pr**2)**2) \
* ((2 * a10 * rho_pr / T_pr**3)*np.exp(-a11 * rho_pr**2))
c_pr_analytical = (1 / P_pr) - ((0.27 / (z**2 * T_pr)) * (do / (1 + ((rho_pr / z) * do))))
cgas_analytical = c_pr_analytical / P_pc
return(cgas_analytical)
"""
OIL
"""
def oil_pbubble(Rsb, sg2, api, temp2):
"""
Calculate Oil Bubble-Point Pressure
For range: 20 < Rsb (scf/STB) < 2,070; 0.56 < sg < 1.18; 16 < api < 58; 70 < temp (°F) < 295 (err=0.7%)
(Vazquez and Beggs, 1980)
"""
import numpy as np
# c1, c2, c3 coefficient from Vazquez-Beggs
if api <=30:
c1 = 0.0362
c2 = 1.0937
c3 = 25.7240
if api > 30:
c1 = 0.0178
c2 = 1.187
c3 = 23.9310
P_bubble_vaz = (Rsb / (c1 * sg2 * np.exp((c3 * api)/(temp2 + 459.67))))**(1 / c2) # convert temp to Rankine
return(P_bubble_vaz)
def oil_fvf(P_bubble, api, Rsb, sg2, temp2, pressure2):
"""
Calculate Oil FVF
* Above bubble-point pressure
For range: unspecified
(Vazquez and Beggs, 1980)
* At and bubble-point pressure
For range: unspecified
(Levitan and Murtha, 1999)
"""
import numpy as np
# FVF of oil at bubblepoint pressure using Levitan-Murtha
so = 141.5 / (api + 131.5)
Bo_bubble = 1 + ((0.0005 * Rsb) * ((sg2 / so)**0.25)) + ((0.0004*(temp2- 60)) / (so * sg2)) # temp in def F
Bo_array = []
if pressure2 < P_bubble: # use Vazquez-Beggs
if api <= 30:
# use Vazquez-Beggs
c1 = 0.0362
c2 = 1.0937
c3 = 25.7240
c4 = 4.677E-4
c5 = 1.751E-5
c6 = -1.811E-8
if api > 30:
c1 = 0.0178
c2 = 1.187
c3 = 23.9310
c4 = 4.670E-4
c5 = 1.100E-5
c6 = 1.337E-9
Rsc = (pressure2**c2) * c1 * sg2 * np.exp((c3 * api) / (temp2 + 459.67))
Bo = 1 + (c4 * Rsc) + (c5 * (temp2 - 60) * (api / sg2)) + (c6 * Rsc *(temp2 - 60) * (api / sg2)) # temp in deg F
if pressure2 == P_bubble:
# use Levitan-Murtha
Bo = Bo_bubble
if pressure2 > P_bubble:
# Calculate oil compressibility first using Levitan-Murtha
coil = ((5 * Rsb) + (17.2 * temp2) - (1180 * sg2) + (12.61 * api) - 1433) / (1E+05 * pressure2)
# Calculate Bo using Levitan-Murtha
Bo = Bo_bubble * np.exp(coil * (P_bubble - pressure2))
return Bo
def oil_mu(pressure2, P_bubble, sg2, api, temp2, Rs):
"""
Calculate Oil Viscosity
* Below and at bubble-point pressure
For range: 0 < p (psia) < 5,250; range sg unspecified; 16 < api < 58; 70 < temp (°F) < 295; 20 < Rs (scf/STB) < 2,070 (err=1.83%)
(Beggs and Robinson, 1975; Chew and Connally, 1959)
* Above bubble-point pressure
For range: 126 < p (psia) < 9,500; 0.511 < sg < 1.351; 15.3 < api < 59.5; range temp unspecified; 9.3 < Rs (scf/STB) < 2199 (err=7.54%)
(Vazquez and Beggs, 1980)
"""
# Calculate viscosity of oil
import numpy as np
mu_oil_array = []
if pressure2 <= P_bubble:
if api <=30:
c1 = 0.0362
c2 = 1.0937
c3 = 25.7240
if api > 30:
c1 = 0.0178
c2 = 1.187
c3 = 23.9310
# use Beggs and Robinson
# valid for: 0 < pressure < 5250 psig, 70 < temp < 295 F, 20 < Rs < 2070 scf/STB, 16 < api < 58 API
x = (temp2**(-1.163)) * np.exp(6.9824 - (0.04658 * api))
mu_dead_oil = 10**x - 1
a = 10.715 * ((Rs + 100)**(-0.515))
b = 5.44 * ((Rs + 150)**(-0.338))
mu_live_oil = a * (mu_dead_oil**b)
if pressure2 > P_bubble:
if api <=30:
c1 = 0.0362
c2 = 1.0937
c3 = 25.7240
if api > 30:
c1 = 0.0178
c2 = 1.187
c3 = 23.9310
# use Vazquez and Beggs
# valid for: 126 < pressure < 9500 psig, 9.3 < Rs < 2199 scf/STB, 15.3 < api < 59.5 API, 0.511 < sg < 1.351
# compute oil viscosity at bubblepoint first
x_bubble = (temp2**(-1.163)) * np.exp(6.9824 - (0.04658 * api))
mu_dead_oil_bubble = 10**x_bubble - 1
a_bubble = 10.715 * ((Rs + 100)**(-0.515))
b_bubble = 5.44 * ((Rs + 150)**(-0.338))
mu_live_oil_bubble = a_bubble * (mu_dead_oil_bubble**b_bubble)
m = 2.6 * (pressure2**1.187) * np.exp(-11.513 - (8.98E-05 * pressure2))
mu_live_oil = mu_live_oil_bubble * ((pressure2 / P_bubble)**m)
return mu_live_oil
def oil_compressibility(pressure2, P_bubble, temp2, api, Rsb, sg2):
"""
Calculate Oil Isothermal Compressibility
* Below bubble-point pressure
For range: unspecified
(McCain, 1988)
* Above and at bubble-point pressure
For range: unspecified
(Vazquez and Beggs, 1980)
"""
import numpy as np
from math import e
# oil isothermal compressibility
coil_array = []
if pressure2 < P_bubble:
# use McCain
ln_coil = -7.573 - (1.45 * np.log(pressure2)) - (0.383 * np.log(P_bubble)) + (1.402 * np.log(temp2)) + (0.256 * np.log(api)) + (0.449 * np.log(Rsb))
coil = np.exp(ln_coil)
if pressure2 >= P_bubble:
# use Vazquez-Beggs
coil = ((5 * Rsb) + (17.2 * temp2) - (1180 * sg2) + (12.61 * api) - 1433) / (1E+05 * pressure2)
return coil
def gasoilratio(pressure2, P_bubble, sg2, api, temp2, Rsb):
"""
Calculate Solution Gas-Oil Ratio in Oil Phase
* Below Bubble-Point
For range: unspecified
(Vazquez and Beggs, 1980)
* At and Above Bubble-Point
Rs equals to Rs @ bubble-point pressure
"""
import numpy as np
Rs_array = []
if pressure2 < P_bubble:
# Using Vazquez and Beggs
if api <=30:
c1 = 0.0362
c2 = 1.0937
c3 = 25.7240
if api > 30:
c1 = 0.0178
c2 = 1.187
c3 = 23.9310
Rs = (pressure2**c2) * c1 * sg2 * np.exp((c3 * api) / (temp2 + 459.67))
if pressure2 >= P_bubble:
# Because Rs will be constant above BB
Rs = Rsb
return Rs
"""
WATER
"""
def waterfvf(temp, p):
"Water FVF (Bw)"
# temp in Fahrenheit
# p pressure in psia
Vwp = (-1.95301E-9 * p * temp) - (1.72834E-13 * (p**2) * temp) - (3.588922E-7 * p) - (2.25341E-10 * p**2)
Vwt = (-1.001E-2) + (1.33391E-4 * temp) + (5.50654E-7 * temp**2)
Bw = (1 + Vwt) * (1 + Vwp)
return(Bw)
|
[
"numpy.log",
"scipy.optimize.fsolve",
"numpy.exp"
] |
[((2107, 2124), 'scipy.optimize.fsolve', 'fsolve', (['f', '[1, 1]'], {}), '(f, [1, 1])\n', (2113, 2124), False, 'from scipy.optimize import fsolve\n'), ((3563, 3590), 'numpy.exp', 'np.exp', (['(x * rhogas_lee ** y)'], {}), '(x * rhogas_lee ** y)\n', (3569, 3590), True, 'import numpy as np\n'), ((8948, 8963), 'numpy.exp', 'np.exp', (['ln_coil'], {}), '(ln_coil)\n', (8954, 8963), True, 'import numpy as np\n'), ((1926, 1952), 'numpy.exp', 'np.exp', (['(-a11 * rho_pr ** 2)'], {}), '(-a11 * rho_pr ** 2)\n', (1932, 1952), True, 'import numpy as np\n'), ((5893, 5928), 'numpy.exp', 'np.exp', (['(c3 * api / (temp2 + 459.67))'], {}), '(c3 * api / (temp2 + 459.67))\n', (5899, 5928), True, 'import numpy as np\n'), ((6371, 6408), 'numpy.exp', 'np.exp', (['(coil * (P_bubble - pressure2))'], {}), '(coil * (P_bubble - pressure2))\n', (6377, 6408), True, 'import numpy as np\n'), ((7356, 7386), 'numpy.exp', 'np.exp', (['(6.9824 - 0.04658 * api)'], {}), '(6.9824 - 0.04658 * api)\n', (7362, 7386), True, 'import numpy as np\n'), ((7932, 7962), 'numpy.exp', 'np.exp', (['(6.9824 - 0.04658 * api)'], {}), '(6.9824 - 0.04658 * api)\n', (7938, 7962), True, 'import numpy as np\n'), ((8212, 8250), 'numpy.exp', 'np.exp', (['(-11.513 - 8.98e-05 * pressure2)'], {}), '(-11.513 - 8.98e-05 * pressure2)\n', (8218, 8250), True, 'import numpy as np\n'), ((9684, 9719), 'numpy.exp', 'np.exp', (['(c3 * api / (temp2 + 459.67))'], {}), '(c3 * api / (temp2 + 459.67))\n', (9690, 9719), True, 'import numpy as np\n'), ((4242, 4268), 'numpy.exp', 'np.exp', (['(-a11 * rho_pr ** 2)'], {}), '(-a11 * rho_pr ** 2)\n', (4248, 4268), True, 'import numpy as np\n'), ((4910, 4945), 'numpy.exp', 'np.exp', (['(c3 * api / (temp2 + 459.67))'], {}), '(c3 * api / (temp2 + 459.67))\n', (4916, 4945), True, 'import numpy as np\n'), ((8922, 8933), 'numpy.log', 'np.log', (['Rsb'], {}), '(Rsb)\n', (8928, 8933), True, 'import numpy as np\n'), ((8898, 8909), 'numpy.log', 'np.log', (['api'], {}), '(api)\n', (8904, 8909), True, 'import numpy as np\n'), ((8872, 8885), 'numpy.log', 'np.log', (['temp2'], {}), '(temp2)\n', (8878, 8885), True, 'import numpy as np\n'), ((8843, 8859), 'numpy.log', 'np.log', (['P_bubble'], {}), '(P_bubble)\n', (8849, 8859), True, 'import numpy as np\n'), ((8813, 8830), 'numpy.log', 'np.log', (['pressure2'], {}), '(pressure2)\n', (8819, 8830), True, 'import numpy as np\n')]
|
import tensorflow as tf
import os
import numpy as np
from scipy.ndimage import imread
def sample_Z(m,n):
return np.random.uniform(-1., 1., size=[m,n])
def get_y(x):
return 10 + x*x;
def sample_data(n=10000, scale=100):
data = []
x = scale*(np.random.random_sample((n,))-0.5)
for i in range(n):
yi = get_y(x[i])
data.append([x[i], yi])
return np.array(data)
def generator(Z, hsize=[16,16]):
with tf.variable_scope("GAN/Generator", reuse=False):
h1 = tf.layers.dense(Z,hsize[0], activation=tf.nn.leaky_relu)
h2 = tf.layers.dense(h1,hsize[1], activation=tf.nn.leaky_relu)
out = tf.layers.dense(h2,2)
return out
def discriminator(X, hsize=[16,16], reuse=False):
with tf.variable_scope("GAN/Discriminator", reuse=reuse):
h1 = tf.layers.dense(X, hsize[0],activation=tf.nn.leaky_relu)
h2 = tf.layers.dense(h1,hsize[1],activation=tf.nn.leaky_relu)
h3 = tf.layers.dense(h2,2)
out = tf.layers.dense(h3,1)
return out, h3
def load_data(directory_path):
print(len(os.listdir(directory_path)))
train_data = []
for file_ in sorted(os.listdir(directory_path)):
if file_.endswith('.png'):
if directory_path.endswith('/'):
image_path = directory_path + file_
else: image_path = directory_path + '/' + file_
image = imread(image_path)/255.0 # Normalize values
image = np.expand_dims(image, axis=-1) # Add channel dim
train_data.append(image)
train_data = np.array(train_data)
return train_data
def main():
print("Hello World from RaGAN")
X = tf.placeholder(tf.float32, [None,2])
Z = tf.placeholder(tf.float32, [None,2])
G_sample = generator(Z )
r_logits, r_rep = discriminator(X)
f_logits, g_rep = discriminator(G_sample, reuse=True)
# Loss functions
disc_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=r_logits, labels=tf.ones_like(r_logits)) +
tf.nn.sigmoid_cross_entropy_with_logits(
logits=f_logits, labels=tf.zeros_like(f_logits)))
gen_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=f_logits, labels=tf.ones_like(f_logits)))
# Optimizers
gen_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope="GAN/Generator")
disc_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
scope="GAN/Discriminator")
gen_step = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(gen_loss,
var_list=gen_vars) # G train step
disc_step = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(disc_loss,
var_list=disc_vars) # G train step
#Session
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
# Training the network
batch_size=256
nd_steps = 10
ng_steps = 10
for i in range(10001):
# Reading the data
#X_batch = sample_data(n=batch_size);
#Z_batch = sample_Z(batch_size, 2);
X_batch = load_data("data/simages28/Hernia");
print("shape of X_batch : ", X_batch.shape)
print(np.size(X_batch))
exit();
for _ in range(nd_steps):
_ ,dloss = sess.run([disc_step, disc_loss],
feed_dict={X:X_batch, Z:Z_batch})
rrep_gstep, grep_step = sess.run([r_rep, g_rep],
feed_dict={X:X_batch, Z:Z_batch})
for _ in range(ng_steps):
_ ,gloss = sess.run([gen_step, gen_loss],
feed_dict={Z:Z_batch})
rrep_gstep, grep_step = sess.run([r_rep, g_rep],
feed_dict={X:X_batch, Z:Z_batch})
print("Iteration: %d\t Discriminator loss: %.4f\t Generator loss: %.4f"%(i, dloss, gloss))
main();
|
[
"numpy.random.uniform",
"numpy.size",
"numpy.random.random_sample",
"tensorflow.get_collection",
"tensorflow.global_variables_initializer",
"tensorflow.layers.dense",
"tensorflow.Session",
"tensorflow.variable_scope",
"numpy.expand_dims",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.ones_like",
"tensorflow.placeholder",
"tensorflow.zeros_like",
"numpy.array",
"scipy.ndimage.imread",
"os.listdir"
] |
[((118, 159), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': '[m, n]'}), '(-1.0, 1.0, size=[m, n])\n', (135, 159), True, 'import numpy as np\n'), ((386, 400), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (394, 400), True, 'import numpy as np\n'), ((1557, 1577), 'numpy.array', 'np.array', (['train_data'], {}), '(train_data)\n', (1565, 1577), True, 'import numpy as np\n'), ((1658, 1695), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), '(tf.float32, [None, 2])\n', (1672, 1695), True, 'import tensorflow as tf\n'), ((1703, 1740), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 2]'], {}), '(tf.float32, [None, 2])\n', (1717, 1740), True, 'import tensorflow as tf\n'), ((2320, 2391), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""GAN/Generator"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='GAN/Generator')\n", (2337, 2391), True, 'import tensorflow as tf\n'), ((2416, 2491), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': '"""GAN/Discriminator"""'}), "(tf.GraphKeys.GLOBAL_VARIABLES, scope='GAN/Discriminator')\n", (2433, 2491), True, 'import tensorflow as tf\n'), ((2777, 2789), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2787, 2789), True, 'import tensorflow as tf\n'), ((444, 491), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""GAN/Generator"""'], {'reuse': '(False)'}), "('GAN/Generator', reuse=False)\n", (461, 491), True, 'import tensorflow as tf\n'), ((506, 563), 'tensorflow.layers.dense', 'tf.layers.dense', (['Z', 'hsize[0]'], {'activation': 'tf.nn.leaky_relu'}), '(Z, hsize[0], activation=tf.nn.leaky_relu)\n', (521, 563), True, 'import tensorflow as tf\n'), ((576, 634), 'tensorflow.layers.dense', 'tf.layers.dense', (['h1', 'hsize[1]'], {'activation': 'tf.nn.leaky_relu'}), '(h1, hsize[1], activation=tf.nn.leaky_relu)\n', (591, 634), True, 'import tensorflow as tf\n'), ((648, 670), 'tensorflow.layers.dense', 'tf.layers.dense', (['h2', '(2)'], {}), '(h2, 2)\n', (663, 670), True, 'import tensorflow as tf\n'), ((745, 796), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""GAN/Discriminator"""'], {'reuse': 'reuse'}), "('GAN/Discriminator', reuse=reuse)\n", (762, 796), True, 'import tensorflow as tf\n'), ((811, 868), 'tensorflow.layers.dense', 'tf.layers.dense', (['X', 'hsize[0]'], {'activation': 'tf.nn.leaky_relu'}), '(X, hsize[0], activation=tf.nn.leaky_relu)\n', (826, 868), True, 'import tensorflow as tf\n'), ((881, 939), 'tensorflow.layers.dense', 'tf.layers.dense', (['h1', 'hsize[1]'], {'activation': 'tf.nn.leaky_relu'}), '(h1, hsize[1], activation=tf.nn.leaky_relu)\n', (896, 939), True, 'import tensorflow as tf\n'), ((951, 973), 'tensorflow.layers.dense', 'tf.layers.dense', (['h2', '(2)'], {}), '(h2, 2)\n', (966, 973), True, 'import tensorflow as tf\n'), ((987, 1009), 'tensorflow.layers.dense', 'tf.layers.dense', (['h3', '(1)'], {}), '(h3, 1)\n', (1002, 1009), True, 'import tensorflow as tf\n'), ((1147, 1173), 'os.listdir', 'os.listdir', (['directory_path'], {}), '(directory_path)\n', (1157, 1173), False, 'import os\n'), ((260, 289), 'numpy.random.random_sample', 'np.random.random_sample', (['(n,)'], {}), '((n,))\n', (283, 289), True, 'import numpy as np\n'), ((1074, 1100), 'os.listdir', 'os.listdir', (['directory_path'], {}), '(directory_path)\n', (1084, 1100), False, 'import os\n'), ((1454, 1484), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(-1)'}), '(image, axis=-1)\n', (1468, 1484), True, 'import numpy as np\n'), ((2516, 2562), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2541, 2562), True, 'import tensorflow as tf\n'), ((2641, 2687), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2666, 2687), True, 'import tensorflow as tf\n'), ((2794, 2827), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2825, 2827), True, 'import tensorflow as tf\n'), ((3194, 3210), 'numpy.size', 'np.size', (['X_batch'], {}), '(X_batch)\n', (3201, 3210), True, 'import numpy as np\n'), ((1390, 1408), 'scipy.ndimage.imread', 'imread', (['image_path'], {}), '(image_path)\n', (1396, 1408), False, 'from scipy.ndimage import imread\n'), ((2262, 2284), 'tensorflow.ones_like', 'tf.ones_like', (['f_logits'], {}), '(f_logits)\n', (2274, 2284), True, 'import tensorflow as tf\n'), ((2006, 2028), 'tensorflow.ones_like', 'tf.ones_like', (['r_logits'], {}), '(r_logits)\n', (2018, 2028), True, 'import tensorflow as tf\n'), ((2118, 2141), 'tensorflow.zeros_like', 'tf.zeros_like', (['f_logits'], {}), '(f_logits)\n', (2131, 2141), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
import scipy.spatial
import numpy as np
import sys
import glob
def get_sssa_components(coordinates):
hull = scipy.spatial.ConvexHull(coordinates, qhull_options='QJ')
return hull.volume, hull.area
def coordinate_array(fn):
lines = open(fn).readlines()
numatoms = int(lines[0])
coords = []
for atom in range(numatoms):
coords.append(list(map(float, lines[2+atom].strip().split()[1:4])))
return np.array(coords), lines[1].strip()
fn = '/mnt/c/Users/guido/data/qm9/coord/46/3e/dsgdb9nsd_086034.xyz'
def do_fn(fn):
c, label = coordinate_array(fn)
print (label, *get_sssa_components(c))
for xyzfile in glob.glob('/mnt/c/Users/guido/data/qm9/coord/*/*/*.xyz'):
try:
do_fn(xyzfile)
except:
continue
|
[
"numpy.array",
"glob.glob"
] |
[((639, 695), 'glob.glob', 'glob.glob', (['"""/mnt/c/Users/guido/data/qm9/coord/*/*/*.xyz"""'], {}), "('/mnt/c/Users/guido/data/qm9/coord/*/*/*.xyz')\n", (648, 695), False, 'import glob\n'), ((431, 447), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (439, 447), True, 'import numpy as np\n')]
|
import numpy as np
from io import SEEK_CUR
__all__ = ['imread', 'imwrite']
def imwrite(filename, image, write_order=None):
"""Write an image as a BMP.
Depending on the dtype and shape of the image, the image will either
be encoded with 1 bit per pixel (boolean 2D images), 8 bit per pixel
(uint8 2D images), 24 bit per pixel (2D + RGB), or 32 bit per pixel
(3D + RGB).
Other file formats may be supported in the future, but cannot be selected
automatically.
This function opens the provided filename with ``'bw+'``.
Parameters
==========
filename: str or anything that ``open`` can handle
Where the file should be stored.
image: [N, M [, P]] np.uint8 or np.bool
Image to save.
write_order: 'RGBA' or 'BGRA'
The order in which the bytes are stored in the BMP file (little endian).
The default for most BMP images is ``'BGRA'``. Unfortunately, many
numpy images are in ``RGBA``. As such saving in 'BGRA' mode is a little
slower (25% slower) than ``'RGBA'`` mode. However, PILLOW doesn't
support ``'RGBA'`` mode and therefore you need to be careful about
interoperability with others. The default will stay as ``'BGRA'`` until
Pillow supports ``'RGBA'``. At that point, saving in ``'BGRA'``
mode might be considered out of scope for this project.
"""
if image.dtype == np.uint8 and image.ndim == 2:
_encode_8bpp(filename, image)
elif image.dtype == np.bool and image.ndim == 2:
_encode_1bpp(filename, image)
elif image.dtype == np.uint8 and image.ndim == 3 and image.shape[-1] == 3:
_encode_24bpp(filename, image)
elif image.dtype == np.uint8 and image.ndim == 3 and image.shape[-1] == 4:
_encode_32bpp(filename, image, write_order=write_order)
else:
raise NotImplementedError('Only uint8 and bool images are supported.')
def _encode_1bpp(filename, image):
color_table = gray_color_table_bool
packed_image = np.packbits(image, axis=1)
bits_per_pixel = 1
# BMP wants images to be padded to a multiple of 4
row_size = (bits_per_pixel * image.shape[1] + 31) // 32 * 4
image_size = row_size * image.shape[0]
header = np.zeros(1, dtype=header_t)
info_header = np.zeros(1, dtype=bitmap_info_header_t)
header['signature'] = 'BM'.encode()
header['file_offset_to_pixelarray'] = (header.nbytes +
info_header.nbytes +
color_table.nbytes)
header['filesize'] = (header['file_offset_to_pixelarray'] + image_size)
info_header['header_size'] = info_header.nbytes
info_header['image_width'] = image.shape[1]
# A positive height states the the array is saved "bottom to top"
# A negative height states that the array is saved "top to bottom"
# Top to bottom has a larger chance of being contiguous in C memory
info_header['image_height'] = -image.shape[0]
info_header['image_planes'] = 1
info_header['bits_per_pixel'] = bits_per_pixel
info_header['compression'] = compression_types.index('BI_RGB')
info_header['image_size'] = image_size
info_header['colors_in_color_table'] = color_table.shape[0]
_write_file(filename, header, info_header, packed_image, color_table, row_size)
def _encode_8bpp(filename, image):
color_table = gray_color_table_uint8
bits_per_pixel = 8
header = np.zeros(1, dtype=header_t)
info_header = np.zeros(1, dtype=bitmap_info_header_t)
header['signature'] = 'BM'.encode()
# Not correct for color images
# BMP wants images to be padded to a multiple of 4
row_size = (bits_per_pixel * image.shape[1] + 31) // 32 * 4
image_size = row_size * image.shape[0]
header['file_offset_to_pixelarray'] = (header.nbytes +
info_header.nbytes +
color_table.nbytes)
header['filesize'] = (header['file_offset_to_pixelarray'] + image_size)
info_header['header_size'] = info_header.nbytes
info_header['image_width'] = image.shape[1]
# A positive height states the the array is saved "bottom to top"
# A negative height states that the array is saved "top to bottom"
# Top to bottom has a larger chance of being contiguous in C memory
info_header['image_height'] = -image.shape[0]
info_header['image_planes'] = 1
info_header['bits_per_pixel'] = bits_per_pixel
info_header['compression'] = compression_types.index('BI_RGB')
info_header['image_size'] = image_size
info_header['colors_in_color_table'] = color_table.shape[0]
_write_file(filename, header, info_header, image, color_table, row_size)
def _encode_24bpp(filename, image):
color_table = np.empty((0, 4), dtype=np.uint8)
bits_per_pixel = 24
# Images are typically stored in BGR format
# In the future, I'll store them as bitfields format
# specifying the order of the pixels to match the memory
packed_image = image[:, :, ::-1].reshape(image.shape[0], -1)
header = np.zeros(1, dtype=header_t)
info_header = np.zeros(1, dtype=bitmap_info_header_t)
header['signature'] = 'BM'.encode()
# Not correct for color images
# BMP wants images to be padded to a multiple of 4
row_size = (bits_per_pixel * image.shape[1] + 31) // 32 * 4
image_size = row_size * image.shape[0]
header['file_offset_to_pixelarray'] = (header.nbytes +
info_header.nbytes +
color_table.nbytes)
header['filesize'] = (header['file_offset_to_pixelarray'] + image_size)
info_header['header_size'] = info_header.nbytes
info_header['image_width'] = image.shape[1]
# A positive height states the the array is saved "bottom to top"
# A negative height states that the array is saved "top to bottom"
# Top to bottom has a larger chance of being contiguous in C memory
info_header['image_height'] = -image.shape[0]
info_header['image_planes'] = 1
info_header['bits_per_pixel'] = bits_per_pixel
info_header['compression'] = compression_types.index('BI_RGB')
info_header['image_size'] = image_size
info_header['colors_in_color_table'] = color_table.shape[0]
_write_file(filename, header, info_header, packed_image, color_table, row_size)
def _encode_32bpp(filename, image, write_order=None):
color_table = np.empty((0, 4), dtype=np.uint8)
bits_per_pixel = 32
header = np.zeros(1, dtype=header_t)
info_header = np.zeros(1, dtype=bitmap_v4_header_t)
if write_order not in [None, 'BGRA', 'RGBA']:
raise ValueError(
'``write_order`` must be either ``RGBA`` or ``BGRA`` if sepecified.')
if write_order == 'RGBA':
packed_image = image.reshape(image.shape[0], -1).copy()
info_header['blue_mask'] = 0x00FF0000
info_header['green_mask'] = 0x0000FF00
info_header['red_mask'] = 0x000000FF
info_header['alpha_mask'] = 0xFF000000
else:
# Images are typically stored in BGR format
# specifying the order of the pixels to match the memory
image = image.copy()
image[..., [0, 2]] = image[..., [2, 0]]
packed_image = image.reshape(image.shape[0], -1).copy()
info_header['red_mask'] = 0x00FF0000
info_header['green_mask'] = 0x0000FF00
info_header['blue_mask'] = 0x000000FF
info_header['alpha_mask'] = 0xFF000000
header['signature'] = 'BM'.encode()
# Not correct for color images
# BMP wants images to be padded to a multiple of 4
row_size = (bits_per_pixel * image.shape[1] + 31) // 32 * 4
image_size = row_size * image.shape[0]
header['file_offset_to_pixelarray'] = (header.nbytes +
info_header.nbytes +
color_table.nbytes)
header['filesize'] = (header['file_offset_to_pixelarray'] + image_size)
info_header['header_size'] = info_header.nbytes
info_header['image_width'] = image.shape[1]
# A positive height states the the array is saved "bottom to top"
# A negative height states that the array is saved "top to bottom"
# Top to bottom has a larger chance of being contiguous in C memory
info_header['image_height'] = -image.shape[0]
info_header['image_planes'] = 1
info_header['bits_per_pixel'] = bits_per_pixel
info_header['compression'] = compression_types.index('BI_BITFIELDS')
info_header['image_size'] = image_size
info_header['colors_in_color_table'] = color_table.shape[0]
_write_file(filename, header, info_header, packed_image, color_table, row_size)
def _write_file(filename, header, info_header, packed_image, color_table, row_size):
with open(filename, 'bw+') as f:
f.write(header)
f.write(info_header)
f.write(color_table)
if row_size == packed_image.shape[1]:
# Small optimization when the image is a multiple of 4 bytes
# it actually avoids a full memory copy, so it is quite useful
# Unfortunately, for RGB images, the bytes may be swapped
# This causes a serious slowdown when saving images.
# Maybe ascontiguousarray is useful?
packed_image.tofile(f)
else:
# Now slice just the part of the image that we actually write to.
data = np.empty((packed_image.shape[0], row_size), dtype=np.uint8)
data[:packed_image.shape[0],
:packed_image.shape[1]] = packed_image
data.tofile(f)
def imread(filename):
"""Read a BMP image.
The returned image is almost always a np.uint8 dtype.
If a grayscale image is detected, then the returned image is only 2D.
If the use of color channels is detected, then an RGB (or RGBA) image is
returned accordingly.
The image may or may not be C contiguous. Due to intricacies about how
bitmap images are stored.
This may make image seem to load faster, but many algorithms call
``np.ascontiguousarray``. As such, you should benchmark your code to
see what is best for you.
This function opens the provided filename with ``'br'``.
Parameters
==========
filename: str or anything that ``open`` can handle
What image should be opened.
"""
with open(filename, 'br') as f:
header = np.fromfile(f, dtype=header_t, count=1)
if header['signature'] != 'BM'.encode():
raise ValueError('Provided file is not a bmp file.')
header_size = int(np.fromfile(f, dtype='<u4', count=1))
if header_size not in header_sizes.values():
raise ValueError(
'Unsupported image format. Header size has a value of {}.'
''.format(header_size))
f.seek(-bitmap_info_header_t['header_size'].itemsize, SEEK_CUR)
info = np.fromfile(f, dtype=info_header_t_dict[header_size], count=1)
info_header = np.zeros(1, dtype=bitmap_v5_header_t)
for name in info.dtype.names:
info_header[name] = info[name]
shape = (int(abs(info_header['image_height'])), int(info_header['image_width']))
if info_header['image_planes'] != 1:
raise NotImplementedError(
"We don't know how to handle more than 1 image plane. "
"Got {} image planes.".format(info_header['image_planes']))
compression = compression_types[info_header['compression'][0]]
implemented_compressions = ['BI_RGB', 'BI_BITFIELDS']
if compression not in implemented_compressions:
raise NotImplementedError(
"We only handle images with compression format {}. "
"Got compression format {}.".format(
implemented_compressions, compression))
bits_per_pixel = info_header['bits_per_pixel'][0]
if bits_per_pixel not in _decoders.keys():
raise NotImplementedError(
"We only support images with one of {} bits per "
"pixel. Got an image with {} bits per pixel.".format(
list(_decoders.keys()),
bits_per_pixel)
)
color_table_max_shape = int(header['file_offset_to_pixelarray'][0] -
header.nbytes - info.nbytes)
if info_header['colors_in_color_table'] != 0:
color_table_max_shape = min(color_table_max_shape,
int(info_header['colors_in_color_table']) * 4)
color_table_count = min(color_table_max_shape, 2 ** bits_per_pixel * 4)
# Bitfields doesn't use a color table
if compression == 'BI_BITFIELDS':
color_table_count = 0
color_table = np.fromfile(f, dtype='<u1', count=color_table_count)
if header_size == header_sizes['BITMAPCOREHEADER']:
# bitmap core header color tables only contain 3 values, not 4
color_table = color_table.reshape(-1, 3)
else:
color_table = color_table.reshape(-1, 4)
# When color tables are used, alpha is ignored.
color_table = color_table[..., :3]
row_size = (bits_per_pixel * shape[1] + 31) // 32 * 4
decoder = _decoders[bits_per_pixel]
return decoder(f, header, info_header, color_table, shape, row_size)
def _compress_color_table(color_table):
if np.all(color_table[:, 0:1] == color_table[:, 1:3]):
return color_table[:, 0]
else:
# Color table is provided in BGR, not RGB
return color_table[:, ::-1]
def _decode_32bpp(f, header, info_header, color_table, shape, row_size):
compression = compression_types[info_header['compression'][0]]
if compression == 'BI_BITFIELDS':
#
if info_header['header_size'] <= header_sizes['BITMAPINFOHEADER']:
# with info header, you can have valid bitfields, but only RGB
# not RGBA
bitfields = np.fromfile(f, dtype='<u4', count=3).tolist()
else:
bitfields = [info_header['red_mask'], info_header['green_mask'],
info_header['blue_mask'], info_header['alpha_mask']]
right_shift = []
precision = []
for bitfield in bitfields:
for i in range(32):
if np.bitwise_and(bitfield, 0x1) == 1:
right_shift.append(i)
for j in range(i, 32):
bitfield = np.right_shift(bitfield, 1)
if np.bitwise_and(bitfield, 0x1) == 0:
precision.append(j - i + 1)
break
break
bitfield = np.right_shift(bitfield, 1)
bitfields_use_uint8 = (
precision in [[8, 8, 8], [8, 8, 8, 8], [8, 8, 8, 0]] and
all(shift % 8 == 0 for shift in right_shift))
is_rgba = len(precision) == 0 and precision[-1] != 0
else:
bitfields = [0x0000FF00, 0x00FF0000, 0xFF000000]
f.seek(int(header['file_offset_to_pixelarray']))
image_size = row_size * shape[0]
if (compression == 'BI_BITFIELDS' and
not bitfields_use_uint8):
raw = np.fromfile(f, dtype='<u4',
count=image_size // 4).reshape(-1, row_size // 4)
else:
raw = np.fromfile(f, dtype='<u1',
count=image_size).reshape(-1, row_size)
# BMPs are saved typically as the last row first.
# Except if the image height is negative
if info_header['image_height'] > 0:
raw = raw[::-1, :]
# image format is returned as BGRA, not RGBA
# this is actually quite costly
if compression == 'BI_RGB':
image = raw.reshape(shape[0], -1, 4)
# Alpha only exists in BITMAPV3INFOHEADER and later
if info_header['header_size'] <= header_sizes['BITMAPINFOHEADER']:
image = image[..., :3]
return image[:, :, ::-1]
raise RuntimeError('How did you get here?')
image[:, :, [2, 0]] = image[:, :, [0, 2]]
return image
elif compression == 'BI_BITFIELDS':
if bitfields_use_uint8:
image = raw.reshape(shape[0], -1, 4)
if right_shift == [16, 8, 0]:
image = image[:, :, :3]
return image[:, :, ::-1]
elif right_shift == [16, 8, 0, 24]:
# advanced indexing is not the right tool, it copies the arrays
image[:, :, [0, 2]] = image[:, :, [2, 0]]
return image
elif right_shift == [0, 8, 16, 24]:
return image
else:
raw = raw.reshape(shape[0], -1)
if precision == [8, 8, 8]:
image = np.empty(raw.shape + (3,), dtype=np.uint8)
for i, r in zip(range(3), right_shift):
np.right_shift(raw, r, out=image[:, :, i],
casting='unsafe')
return image
raise NotImplementedError(
"We don't support your particular format yet.")
def _decode_1bpp(f, header, info_header, color_table,
shape, row_size):
f.seek(int(header['file_offset_to_pixelarray']))
packed_image = np.fromfile(f, dtype='<u1',
count=row_size * shape[0]).reshape(-1, row_size)
if info_header['image_height'] > 0:
packed_image = packed_image[::-1, :]
color_index = np.unpackbits(packed_image, axis=1)
color_index = color_index[:shape[0], :shape[1]]
color_table = _compress_color_table(color_table)
return color_table[color_index]
def _decode_24bpp(f, header, info_header, color_table,
shape, row_size):
f.seek(int(header['file_offset_to_pixelarray']))
image = np.fromfile(f, dtype='<u1',
count=row_size * shape[0]).reshape(-1, row_size)
if info_header['image_height'] > 0:
image = image[::-1, :]
image = image.reshape(image.shape[0], -1, 3)
# image format is returned as BGR, not RGB
return image[:, :shape[1], ::-1]
def _decode_8bpp(f, header, info_header, color_table,
shape, row_size):
f.seek(int(header['file_offset_to_pixelarray']))
image = np.fromfile(f, dtype='<u1',
count=row_size * shape[0]).reshape(-1, row_size)
if info_header['image_height'] > 0:
image = image[::-1, :]
# These images are padded, make sure you slice them
image = image[:shape[0], :shape[1]]
color_table = _compress_color_table(color_table)
if np.array_equal(color_table, gray_color_table_compressed_uint8):
return image
else:
return color_table[image]
def _decode_4bpp(f, header, info_header, color_table,
shape, row_size):
f.seek(int(header['file_offset_to_pixelarray']))
packed_image = np.fromfile(f, dtype='<u1',
count=row_size * shape[0]).reshape(-1, row_size)
if info_header['image_height'] > 0:
packed_image = packed_image[::-1, :]
# Unpack the image
color_index = np.repeat(packed_image, 2, axis=1)
np.right_shift(color_index[:, ::2], 4, out=color_index[:, ::2])
color_index = color_index[:shape[0], :shape[1]]
# repeat the color table to index quickly
table = np.zeros((256 // 2**4, 2**4, color_table.shape[1]), dtype=np.uint8)
table[:, :color_table.shape[0], :] = color_table
color_table = table.reshape(-1, color_table.shape[1])
color_table = _compress_color_table(color_table)
return color_table[color_index]
def _decode_16bpp(f, header, info_header, color_table,
shape, row_size):
if color_table.size != 0:
raise NotImplementedError(
"We don't support colormaps for 16 bit images.")
compression = compression_types[info_header['compression'][0]]
if compression == 'BI_BITFIELDS':
bitfields = np.fromfile(f, dtype='<u4', count=3).tolist()
else:
bitfields = BITFIELDS_555
f.seek(int(header['file_offset_to_pixelarray']))
image_size = shape[0] * row_size
image = np.fromfile(f, dtype='<u2',
count=image_size // 2).reshape(-1, row_size // 2)
# BMPs are saved typically as the last row first.
# Except if the image height is negative
if info_header['image_height'] > 0:
image = image[::-1, :]
packed_image = image[:shape[0], :shape[1]]
image = np.empty(shape + (3,), dtype=np.uint8)
if bitfields == BITFIELDS_555:
np.right_shift(packed_image, 5 + 5, out=image[:, :, 0],
casting='unsafe')
np.right_shift(packed_image, 5, out=image[:, :, 1],
casting='unsafe')
np.copyto(image[:, :, 2], packed_image, casting='unsafe')
np.take(gray_table_uint5, image, out=image)
elif bitfields == BITFIELDS_565:
np.right_shift(packed_image, 5 + 6, out=image[:, :, 0],
casting='unsafe')
np.right_shift(packed_image, 5, out=image[:, :, 1],
casting='unsafe')
np.copyto(image[:, :, 2], packed_image, casting='unsafe')
np.take(gray_table_uint5, image[:, :, 0::2], out=image[:, :, 0::2])
np.take(gray_table_uint6, image[:, :, 1], out=image[:, :, 1])
else:
raise NotImplementedError(
"We still haven't implemented your particular bitfield pattern.")
return image
# Convenient decoder dictionary
_decoders = dict(zip([1, 4, 8, 16, 24, 32],
[_decode_1bpp, _decode_4bpp, _decode_8bpp,
_decode_16bpp, _decode_24bpp, _decode_32bpp]))
header_t = np.dtype([
('signature', '|S2'),
('filesize', '<u4'),
('reserved1', '<u2'),
('reserved2', '<u2'),
('file_offset_to_pixelarray', '<u4')
])
bitmap_info_header_t = np.dtype([
('header_size', '<u4'),
('image_width', '<i4'),
('image_height', '<i4'),
('image_planes', '<u2'),
('bits_per_pixel', '<u2'),
('compression', '<u4'),
('image_size', '<u4'),
('x_pixels_per_meter', '<i4'),
('y_pixels_per_meter', '<i4'),
('colors_in_color_table', '<u4'),
('important_color_count', '<u4'),
])
bitmap_v4_header_t = np.dtype([
('header_size', '<u4'), # 4
('image_width', '<i4'), # 4
('image_height', '<i4'), # 4
('image_planes', '<u2'), # 2
('bits_per_pixel', '<u2'), # 2
('compression', '<u4'), # 4
('image_size', '<u4'), # 4
('x_pixels_per_meter', '<i4'), # 4
('y_pixels_per_meter', '<i4'), # 4
('colors_in_color_table', '<u4'), # 4
('important_color_count', '<u4'), # 4
('red_mask', '<u4'), # 4
('green_mask', '<u4'), # 4
('blue_mask', '<u4'), # 4
('alpha_mask', '<u4'), # 4
('color_space', '|S4'), # 4
('cie_xyz_tripple', '<u4', (3, 3)), # 4 * 3 * 3
('gamma_red', '<u4'), # 4
('gamma_green', '<u4'), # 4
('gamma_blue', '<u4'), # 4
])
bitmap_v5_header_t = np.dtype([
('header_size', '<u4'), # 4
('image_width', '<i4'), # 4
('image_height', '<i4'), # 4
('image_planes', '<u2'), # 2
('bits_per_pixel', '<u2'), # 2
('compression', '<u4'), # 4
('image_size', '<u4'), # 4
('x_pixels_per_meter', '<i4'), # 4
('y_pixels_per_meter', '<i4'), # 4
('colors_in_color_table', '<u4'), # 4
('important_color_count', '<u4'), # 4
('red_mask', '<u4'), # 4
('green_mask', '<u4'), # 4
('blue_mask', '<u4'), # 4
('alpha_mask', '<u4'), # 4
('color_space', '|S4'), # 4
('cie_xyz_tripple', '<u4', (3, 3)), # 4 * 3 * 3
('gamma_red', '<u4'), # 4
('gamma_green', '<u4'), # 4
('gamma_blue', '<u4'), # 4
('intent', '<u4'), # 4
('profile_data', '<u4'), # 4
('profile_size', '<u4'), # 4
('reserved', '<u4'), # 4
])
bitmap_core_header_t = np.dtype([
('header_size', '<u4'),
('image_width', '<u2'),
('image_height', '<u2'),
('image_planes', '<u2'),
('bits_per_pixel', '<u2'),
])
header_sizes = {'BITMAPCOREHEADER':12,
'BITMAPINFOHEADER': 40,
'BITMAPV4HEADER': 108,
'BITMAPV5HEADER': 124}
# bitfields is so poorly documented
# See this post about it
# http://www.virtualdub.org/blog/pivot/entry.php?id=177
BITFIELDS_555 = [0x7C00, 0x03E0, 0x001F]
BITFIELDS_565 = [0xF800, 0x07E0, 0x001F]
info_header_t_dict = {12 : bitmap_core_header_t,
40 : bitmap_info_header_t,
108: bitmap_v4_header_t,
124: bitmap_v5_header_t}
compression_types = ['BI_RGB', 'BI_RLE8', 'BI_RLE4', 'BI_BITFIELDS', 'BI_JPEG',
'BI_PNG', 'BI_ALPHABITFIELDS', 'BI_CMYK', 'BI_CMYKRLE8'
'BI_CMYKRLE4']
# These are mostly for writing.
gray_color_table_compressed_uint8 = np.arange(256, dtype='<u1')
gray_color_table_uint8 = np.stack([gray_color_table_compressed_uint8,
gray_color_table_compressed_uint8,
gray_color_table_compressed_uint8,
np.full_like(gray_color_table_compressed_uint8,
fill_value=0, dtype='<u1')],
axis=1)
gray_color_table_bool = np.asarray([0, 255], dtype='<u1')
gray_color_table_bool = np.stack([gray_color_table_bool,
gray_color_table_bool,
gray_color_table_bool,
np.full_like(gray_color_table_bool,
fill_value=0, dtype='<u1')],
axis=1)
# Need to convert 16 bit packed numbers to RGB
# These are pretty hacky optimizations
# basically, the size of a 256 bit array is insiginifcant in terms of
# memory consumption. Therefore, we create an array that can be indexed
# while effectively ignoring the most significant bits in the a uint8
gray_table_uint1 = np.asarray([0, 255], dtype='<u1')
gray_table_uint1 = np.concatenate([gray_table_uint1] * (256 // gray_table_uint1.size))
gray_table_uint2 = np.asarray([0, 85, 170, 255], dtype='<u1')
gray_table_uint2 = np.concatenate([gray_table_uint2] * (256 // gray_table_uint2.size))
gray_table_uint3 = np.linspace(0, 255, num=2**3, dtype=np.uint8)
gray_table_uint3 = np.concatenate([gray_table_uint3] * (256 // gray_table_uint3.size))
gray_table_uint4 = np.linspace(0, 255, num=2**4, dtype=np.uint8)
gray_table_uint4 = np.concatenate([gray_table_uint4] * (256 // gray_table_uint4.size))
gray_table_uint5 = np.linspace(0, 255, num=2**5, dtype=np.uint8)
gray_table_uint5 = np.concatenate([gray_table_uint5] * (256 // gray_table_uint5.size))
gray_table_uint6 = np.linspace(0, 255, num=2**6, dtype=np.uint8)
gray_table_uint6 = np.concatenate([gray_table_uint6] * (256 // gray_table_uint6.size))
gray_table_uint7 = np.linspace(0, 255, num=2**7, dtype=np.uint8)
gray_table_uint7 = np.concatenate([gray_table_uint7] * (256 // gray_table_uint7.size))
gray_tables = dict(zip(range(1, 8),
[gray_table_uint1,
gray_table_uint2,
gray_table_uint3,
gray_table_uint4,
gray_table_uint5,
gray_table_uint6,
gray_table_uint7]))
|
[
"numpy.full_like",
"numpy.right_shift",
"numpy.fromfile",
"numpy.empty",
"numpy.asarray",
"numpy.dtype",
"numpy.packbits",
"numpy.zeros",
"numpy.all",
"numpy.arange",
"numpy.take",
"numpy.linspace",
"numpy.unpackbits",
"numpy.array_equal",
"numpy.bitwise_and",
"numpy.copyto",
"numpy.concatenate",
"numpy.repeat"
] |
[((21809, 21948), 'numpy.dtype', 'np.dtype', (["[('signature', '|S2'), ('filesize', '<u4'), ('reserved1', '<u2'), (\n 'reserved2', '<u2'), ('file_offset_to_pixelarray', '<u4')]"], {}), "([('signature', '|S2'), ('filesize', '<u4'), ('reserved1', '<u2'),\n ('reserved2', '<u2'), ('file_offset_to_pixelarray', '<u4')])\n", (21817, 21948), True, 'import numpy as np\n'), ((21991, 22320), 'numpy.dtype', 'np.dtype', (["[('header_size', '<u4'), ('image_width', '<i4'), ('image_height', '<i4'), (\n 'image_planes', '<u2'), ('bits_per_pixel', '<u2'), ('compression',\n '<u4'), ('image_size', '<u4'), ('x_pixels_per_meter', '<i4'), (\n 'y_pixels_per_meter', '<i4'), ('colors_in_color_table', '<u4'), (\n 'important_color_count', '<u4')]"], {}), "([('header_size', '<u4'), ('image_width', '<i4'), ('image_height',\n '<i4'), ('image_planes', '<u2'), ('bits_per_pixel', '<u2'), (\n 'compression', '<u4'), ('image_size', '<u4'), ('x_pixels_per_meter',\n '<i4'), ('y_pixels_per_meter', '<i4'), ('colors_in_color_table', '<u4'),\n ('important_color_count', '<u4')])\n", (21999, 22320), True, 'import numpy as np\n'), ((22373, 22933), 'numpy.dtype', 'np.dtype', (["[('header_size', '<u4'), ('image_width', '<i4'), ('image_height', '<i4'), (\n 'image_planes', '<u2'), ('bits_per_pixel', '<u2'), ('compression',\n '<u4'), ('image_size', '<u4'), ('x_pixels_per_meter', '<i4'), (\n 'y_pixels_per_meter', '<i4'), ('colors_in_color_table', '<u4'), (\n 'important_color_count', '<u4'), ('red_mask', '<u4'), ('green_mask',\n '<u4'), ('blue_mask', '<u4'), ('alpha_mask', '<u4'), ('color_space',\n '|S4'), ('cie_xyz_tripple', '<u4', (3, 3)), ('gamma_red', '<u4'), (\n 'gamma_green', '<u4'), ('gamma_blue', '<u4')]"], {}), "([('header_size', '<u4'), ('image_width', '<i4'), ('image_height',\n '<i4'), ('image_planes', '<u2'), ('bits_per_pixel', '<u2'), (\n 'compression', '<u4'), ('image_size', '<u4'), ('x_pixels_per_meter',\n '<i4'), ('y_pixels_per_meter', '<i4'), ('colors_in_color_table', '<u4'),\n ('important_color_count', '<u4'), ('red_mask', '<u4'), ('green_mask',\n '<u4'), ('blue_mask', '<u4'), ('alpha_mask', '<u4'), ('color_space',\n '|S4'), ('cie_xyz_tripple', '<u4', (3, 3)), ('gamma_red', '<u4'), (\n 'gamma_green', '<u4'), ('gamma_blue', '<u4')])\n", (22381, 22933), True, 'import numpy as np\n'), ((23297, 23952), 'numpy.dtype', 'np.dtype', (["[('header_size', '<u4'), ('image_width', '<i4'), ('image_height', '<i4'), (\n 'image_planes', '<u2'), ('bits_per_pixel', '<u2'), ('compression',\n '<u4'), ('image_size', '<u4'), ('x_pixels_per_meter', '<i4'), (\n 'y_pixels_per_meter', '<i4'), ('colors_in_color_table', '<u4'), (\n 'important_color_count', '<u4'), ('red_mask', '<u4'), ('green_mask',\n '<u4'), ('blue_mask', '<u4'), ('alpha_mask', '<u4'), ('color_space',\n '|S4'), ('cie_xyz_tripple', '<u4', (3, 3)), ('gamma_red', '<u4'), (\n 'gamma_green', '<u4'), ('gamma_blue', '<u4'), ('intent', '<u4'), (\n 'profile_data', '<u4'), ('profile_size', '<u4'), ('reserved', '<u4')]"], {}), "([('header_size', '<u4'), ('image_width', '<i4'), ('image_height',\n '<i4'), ('image_planes', '<u2'), ('bits_per_pixel', '<u2'), (\n 'compression', '<u4'), ('image_size', '<u4'), ('x_pixels_per_meter',\n '<i4'), ('y_pixels_per_meter', '<i4'), ('colors_in_color_table', '<u4'),\n ('important_color_count', '<u4'), ('red_mask', '<u4'), ('green_mask',\n '<u4'), ('blue_mask', '<u4'), ('alpha_mask', '<u4'), ('color_space',\n '|S4'), ('cie_xyz_tripple', '<u4', (3, 3)), ('gamma_red', '<u4'), (\n 'gamma_green', '<u4'), ('gamma_blue', '<u4'), ('intent', '<u4'), (\n 'profile_data', '<u4'), ('profile_size', '<u4'), ('reserved', '<u4')])\n", (23305, 23952), True, 'import numpy as np\n'), ((24399, 24538), 'numpy.dtype', 'np.dtype', (["[('header_size', '<u4'), ('image_width', '<u2'), ('image_height', '<u2'), (\n 'image_planes', '<u2'), ('bits_per_pixel', '<u2')]"], {}), "([('header_size', '<u4'), ('image_width', '<u2'), ('image_height',\n '<u2'), ('image_planes', '<u2'), ('bits_per_pixel', '<u2')])\n", (24407, 24538), True, 'import numpy as np\n'), ((25372, 25399), 'numpy.arange', 'np.arange', (['(256)'], {'dtype': '"""<u1"""'}), "(256, dtype='<u1')\n", (25381, 25399), True, 'import numpy as np\n'), ((25837, 25870), 'numpy.asarray', 'np.asarray', (['[0, 255]'], {'dtype': '"""<u1"""'}), "([0, 255], dtype='<u1')\n", (25847, 25870), True, 'import numpy as np\n'), ((26547, 26580), 'numpy.asarray', 'np.asarray', (['[0, 255]'], {'dtype': '"""<u1"""'}), "([0, 255], dtype='<u1')\n", (26557, 26580), True, 'import numpy as np\n'), ((26600, 26667), 'numpy.concatenate', 'np.concatenate', (['([gray_table_uint1] * (256 // gray_table_uint1.size))'], {}), '([gray_table_uint1] * (256 // gray_table_uint1.size))\n', (26614, 26667), True, 'import numpy as np\n'), ((26688, 26730), 'numpy.asarray', 'np.asarray', (['[0, 85, 170, 255]'], {'dtype': '"""<u1"""'}), "([0, 85, 170, 255], dtype='<u1')\n", (26698, 26730), True, 'import numpy as np\n'), ((26750, 26817), 'numpy.concatenate', 'np.concatenate', (['([gray_table_uint2] * (256 // gray_table_uint2.size))'], {}), '([gray_table_uint2] * (256 // gray_table_uint2.size))\n', (26764, 26817), True, 'import numpy as np\n'), ((26838, 26885), 'numpy.linspace', 'np.linspace', (['(0)', '(255)'], {'num': '(2 ** 3)', 'dtype': 'np.uint8'}), '(0, 255, num=2 ** 3, dtype=np.uint8)\n', (26849, 26885), True, 'import numpy as np\n'), ((26903, 26970), 'numpy.concatenate', 'np.concatenate', (['([gray_table_uint3] * (256 // gray_table_uint3.size))'], {}), '([gray_table_uint3] * (256 // gray_table_uint3.size))\n', (26917, 26970), True, 'import numpy as np\n'), ((26991, 27038), 'numpy.linspace', 'np.linspace', (['(0)', '(255)'], {'num': '(2 ** 4)', 'dtype': 'np.uint8'}), '(0, 255, num=2 ** 4, dtype=np.uint8)\n', (27002, 27038), True, 'import numpy as np\n'), ((27056, 27123), 'numpy.concatenate', 'np.concatenate', (['([gray_table_uint4] * (256 // gray_table_uint4.size))'], {}), '([gray_table_uint4] * (256 // gray_table_uint4.size))\n', (27070, 27123), True, 'import numpy as np\n'), ((27144, 27191), 'numpy.linspace', 'np.linspace', (['(0)', '(255)'], {'num': '(2 ** 5)', 'dtype': 'np.uint8'}), '(0, 255, num=2 ** 5, dtype=np.uint8)\n', (27155, 27191), True, 'import numpy as np\n'), ((27209, 27276), 'numpy.concatenate', 'np.concatenate', (['([gray_table_uint5] * (256 // gray_table_uint5.size))'], {}), '([gray_table_uint5] * (256 // gray_table_uint5.size))\n', (27223, 27276), True, 'import numpy as np\n'), ((27297, 27344), 'numpy.linspace', 'np.linspace', (['(0)', '(255)'], {'num': '(2 ** 6)', 'dtype': 'np.uint8'}), '(0, 255, num=2 ** 6, dtype=np.uint8)\n', (27308, 27344), True, 'import numpy as np\n'), ((27362, 27429), 'numpy.concatenate', 'np.concatenate', (['([gray_table_uint6] * (256 // gray_table_uint6.size))'], {}), '([gray_table_uint6] * (256 // gray_table_uint6.size))\n', (27376, 27429), True, 'import numpy as np\n'), ((27450, 27497), 'numpy.linspace', 'np.linspace', (['(0)', '(255)'], {'num': '(2 ** 7)', 'dtype': 'np.uint8'}), '(0, 255, num=2 ** 7, dtype=np.uint8)\n', (27461, 27497), True, 'import numpy as np\n'), ((27515, 27582), 'numpy.concatenate', 'np.concatenate', (['([gray_table_uint7] * (256 // gray_table_uint7.size))'], {}), '([gray_table_uint7] * (256 // gray_table_uint7.size))\n', (27529, 27582), True, 'import numpy as np\n'), ((2022, 2048), 'numpy.packbits', 'np.packbits', (['image'], {'axis': '(1)'}), '(image, axis=1)\n', (2033, 2048), True, 'import numpy as np\n'), ((2250, 2277), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'header_t'}), '(1, dtype=header_t)\n', (2258, 2277), True, 'import numpy as np\n'), ((2296, 2335), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'bitmap_info_header_t'}), '(1, dtype=bitmap_info_header_t)\n', (2304, 2335), True, 'import numpy as np\n'), ((3465, 3492), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'header_t'}), '(1, dtype=header_t)\n', (3473, 3492), True, 'import numpy as np\n'), ((3511, 3550), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'bitmap_info_header_t'}), '(1, dtype=bitmap_info_header_t)\n', (3519, 3550), True, 'import numpy as np\n'), ((4812, 4844), 'numpy.empty', 'np.empty', (['(0, 4)'], {'dtype': 'np.uint8'}), '((0, 4), dtype=np.uint8)\n', (4820, 4844), True, 'import numpy as np\n'), ((5115, 5142), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'header_t'}), '(1, dtype=header_t)\n', (5123, 5142), True, 'import numpy as np\n'), ((5161, 5200), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'bitmap_info_header_t'}), '(1, dtype=bitmap_info_header_t)\n', (5169, 5200), True, 'import numpy as np\n'), ((6487, 6519), 'numpy.empty', 'np.empty', (['(0, 4)'], {'dtype': 'np.uint8'}), '((0, 4), dtype=np.uint8)\n', (6495, 6519), True, 'import numpy as np\n'), ((6558, 6585), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'header_t'}), '(1, dtype=header_t)\n', (6566, 6585), True, 'import numpy as np\n'), ((6604, 6641), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'bitmap_v4_header_t'}), '(1, dtype=bitmap_v4_header_t)\n', (6612, 6641), True, 'import numpy as np\n'), ((13522, 13572), 'numpy.all', 'np.all', (['(color_table[:, 0:1] == color_table[:, 1:3])'], {}), '(color_table[:, 0:1] == color_table[:, 1:3])\n', (13528, 13572), True, 'import numpy as np\n'), ((17578, 17613), 'numpy.unpackbits', 'np.unpackbits', (['packed_image'], {'axis': '(1)'}), '(packed_image, axis=1)\n', (17591, 17613), True, 'import numpy as np\n'), ((18707, 18769), 'numpy.array_equal', 'np.array_equal', (['color_table', 'gray_color_table_compressed_uint8'], {}), '(color_table, gray_color_table_compressed_uint8)\n', (18721, 18769), True, 'import numpy as np\n'), ((19234, 19268), 'numpy.repeat', 'np.repeat', (['packed_image', '(2)'], {'axis': '(1)'}), '(packed_image, 2, axis=1)\n', (19243, 19268), True, 'import numpy as np\n'), ((19273, 19336), 'numpy.right_shift', 'np.right_shift', (['color_index[:, ::2]', '(4)'], {'out': 'color_index[:, ::2]'}), '(color_index[:, ::2], 4, out=color_index[:, ::2])\n', (19287, 19336), True, 'import numpy as np\n'), ((19448, 19519), 'numpy.zeros', 'np.zeros', (['(256 // 2 ** 4, 2 ** 4, color_table.shape[1])'], {'dtype': 'np.uint8'}), '((256 // 2 ** 4, 2 ** 4, color_table.shape[1]), dtype=np.uint8)\n', (19456, 19519), True, 'import numpy as np\n'), ((20590, 20628), 'numpy.empty', 'np.empty', (['(shape + (3,))'], {'dtype': 'np.uint8'}), '(shape + (3,), dtype=np.uint8)\n', (20598, 20628), True, 'import numpy as np\n'), ((10487, 10526), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'header_t', 'count': '(1)'}), '(f, dtype=header_t, count=1)\n', (10498, 10526), True, 'import numpy as np\n'), ((10991, 11053), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'info_header_t_dict[header_size]', 'count': '(1)'}), '(f, dtype=info_header_t_dict[header_size], count=1)\n', (11002, 11053), True, 'import numpy as np\n'), ((11076, 11113), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'bitmap_v5_header_t'}), '(1, dtype=bitmap_v5_header_t)\n', (11084, 11113), True, 'import numpy as np\n'), ((12879, 12931), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u1"""', 'count': 'color_table_count'}), "(f, dtype='<u1', count=color_table_count)\n", (12890, 12931), True, 'import numpy as np\n'), ((20672, 20745), 'numpy.right_shift', 'np.right_shift', (['packed_image', '(5 + 5)'], {'out': 'image[:, :, 0]', 'casting': '"""unsafe"""'}), "(packed_image, 5 + 5, out=image[:, :, 0], casting='unsafe')\n", (20686, 20745), True, 'import numpy as np\n'), ((20777, 20846), 'numpy.right_shift', 'np.right_shift', (['packed_image', '(5)'], {'out': 'image[:, :, 1]', 'casting': '"""unsafe"""'}), "(packed_image, 5, out=image[:, :, 1], casting='unsafe')\n", (20791, 20846), True, 'import numpy as np\n'), ((20878, 20935), 'numpy.copyto', 'np.copyto', (['image[:, :, 2]', 'packed_image'], {'casting': '"""unsafe"""'}), "(image[:, :, 2], packed_image, casting='unsafe')\n", (20887, 20935), True, 'import numpy as np\n'), ((20944, 20987), 'numpy.take', 'np.take', (['gray_table_uint5', 'image'], {'out': 'image'}), '(gray_table_uint5, image, out=image)\n', (20951, 20987), True, 'import numpy as np\n'), ((25645, 25719), 'numpy.full_like', 'np.full_like', (['gray_color_table_compressed_uint8'], {'fill_value': '(0)', 'dtype': '"""<u1"""'}), "(gray_color_table_compressed_uint8, fill_value=0, dtype='<u1')\n", (25657, 25719), True, 'import numpy as np\n'), ((26076, 26138), 'numpy.full_like', 'np.full_like', (['gray_color_table_bool'], {'fill_value': '(0)', 'dtype': '"""<u1"""'}), "(gray_color_table_bool, fill_value=0, dtype='<u1')\n", (26088, 26138), True, 'import numpy as np\n'), ((9490, 9549), 'numpy.empty', 'np.empty', (['(packed_image.shape[0], row_size)'], {'dtype': 'np.uint8'}), '((packed_image.shape[0], row_size), dtype=np.uint8)\n', (9498, 9549), True, 'import numpy as np\n'), ((10667, 10703), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u4"""', 'count': '(1)'}), "(f, dtype='<u4', count=1)\n", (10678, 10703), True, 'import numpy as np\n'), ((17366, 17420), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u1"""', 'count': '(row_size * shape[0])'}), "(f, dtype='<u1', count=row_size * shape[0])\n", (17377, 17420), True, 'import numpy as np\n'), ((17915, 17969), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u1"""', 'count': '(row_size * shape[0])'}), "(f, dtype='<u1', count=row_size * shape[0])\n", (17926, 17969), True, 'import numpy as np\n'), ((18377, 18431), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u1"""', 'count': '(row_size * shape[0])'}), "(f, dtype='<u1', count=row_size * shape[0])\n", (18388, 18431), True, 'import numpy as np\n'), ((18999, 19053), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u1"""', 'count': '(row_size * shape[0])'}), "(f, dtype='<u1', count=row_size * shape[0])\n", (19010, 19053), True, 'import numpy as np\n'), ((20257, 20307), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u2"""', 'count': '(image_size // 2)'}), "(f, dtype='<u2', count=image_size // 2)\n", (20268, 20307), True, 'import numpy as np\n'), ((21034, 21107), 'numpy.right_shift', 'np.right_shift', (['packed_image', '(5 + 6)'], {'out': 'image[:, :, 0]', 'casting': '"""unsafe"""'}), "(packed_image, 5 + 6, out=image[:, :, 0], casting='unsafe')\n", (21048, 21107), True, 'import numpy as np\n'), ((21139, 21208), 'numpy.right_shift', 'np.right_shift', (['packed_image', '(5)'], {'out': 'image[:, :, 1]', 'casting': '"""unsafe"""'}), "(packed_image, 5, out=image[:, :, 1], casting='unsafe')\n", (21153, 21208), True, 'import numpy as np\n'), ((21240, 21297), 'numpy.copyto', 'np.copyto', (['image[:, :, 2]', 'packed_image'], {'casting': '"""unsafe"""'}), "(image[:, :, 2], packed_image, casting='unsafe')\n", (21249, 21297), True, 'import numpy as np\n'), ((21306, 21373), 'numpy.take', 'np.take', (['gray_table_uint5', 'image[:, :, 0::2]'], {'out': 'image[:, :, 0::2]'}), '(gray_table_uint5, image[:, :, 0::2], out=image[:, :, 0::2])\n', (21313, 21373), True, 'import numpy as np\n'), ((21382, 21443), 'numpy.take', 'np.take', (['gray_table_uint6', 'image[:, :, 1]'], {'out': 'image[:, :, 1]'}), '(gray_table_uint6, image[:, :, 1], out=image[:, :, 1])\n', (21389, 21443), True, 'import numpy as np\n'), ((14830, 14857), 'numpy.right_shift', 'np.right_shift', (['bitfield', '(1)'], {}), '(bitfield, 1)\n', (14844, 14857), True, 'import numpy as np\n'), ((15332, 15382), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u4"""', 'count': '(image_size // 4)'}), "(f, dtype='<u4', count=image_size // 4)\n", (15343, 15382), True, 'import numpy as np\n'), ((15460, 15505), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u1"""', 'count': 'image_size'}), "(f, dtype='<u1', count=image_size)\n", (15471, 15505), True, 'import numpy as np\n'), ((20063, 20099), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u4"""', 'count': '(3)'}), "(f, dtype='<u4', count=3)\n", (20074, 20099), True, 'import numpy as np\n'), ((14091, 14127), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""<u4"""', 'count': '(3)'}), "(f, dtype='<u4', count=3)\n", (14102, 14127), True, 'import numpy as np\n'), ((14440, 14467), 'numpy.bitwise_and', 'np.bitwise_and', (['bitfield', '(1)'], {}), '(bitfield, 1)\n', (14454, 14467), True, 'import numpy as np\n'), ((16871, 16913), 'numpy.empty', 'np.empty', (['(raw.shape + (3,))'], {'dtype': 'np.uint8'}), '(raw.shape + (3,), dtype=np.uint8)\n', (16879, 16913), True, 'import numpy as np\n'), ((14596, 14623), 'numpy.right_shift', 'np.right_shift', (['bitfield', '(1)'], {}), '(bitfield, 1)\n', (14610, 14623), True, 'import numpy as np\n'), ((16990, 17050), 'numpy.right_shift', 'np.right_shift', (['raw', 'r'], {'out': 'image[:, :, i]', 'casting': '"""unsafe"""'}), "(raw, r, out=image[:, :, i], casting='unsafe')\n", (17004, 17050), True, 'import numpy as np\n'), ((14651, 14678), 'numpy.bitwise_and', 'np.bitwise_and', (['bitfield', '(1)'], {}), '(bitfield, 1)\n', (14665, 14678), True, 'import numpy as np\n')]
|
import os
import torch
from torch import nn
from torch.autograd import Variable
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import utils
from arch import define_Gen, define_Dis
import numpy as np
from sklearn.metrics import mean_absolute_error
from skimage.metrics import peak_signal_noise_ratio
from calculate_fid import calculate_fid
import tensorflow as tf #to print shape of tensor with tf.shape()
def test(args):
transform = transforms.Compose([transforms.Resize((args.crop_height,args.crop_width)), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
dataset_dirs = utils.get_testdata_link(args.dataset_dir)
a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=transform)
b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=transform)
a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=args.batch_size, shuffle=False, num_workers=4)
b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=args.batch_size, shuffle=False, num_workers=4)
Gab = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks', norm=args.norm,
use_dropout= not args.no_dropout, gpu_ids=args.gpu_ids)
Gba = define_Gen(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks', norm=args.norm,
use_dropout= not args.no_dropout, gpu_ids=args.gpu_ids)
utils.print_networks([Gab,Gba], ['Gab','Gba'])
try:
ckpt = utils.load_checkpoint('%s/latest.ckpt' % (args.checkpoint_dir))
Gab.load_state_dict(ckpt['Gab'])
Gba.load_state_dict(ckpt['Gba'])
except:
print(' [*] No checkpoint!')
#run test and calculate evaluation metrics
a_real_test = iter(a_test_loader)
b_real_test=iter(b_test_loader)
device = torch.device("cuda")
batch_size= args.batch_size
#real examples for plotting (save pic)
a_real_example=(a_real_test.next()[0]).to(device)
b_real_example=(b_real_test.next()[0]).to(device)
Gab.eval()
Gba.eval()
list_T1_mae=[]
list_T1_psnr=[]
list_T1_fid=[]
# for b test dataset - corresponds to T1 images
for imagesT1, imagesT2 in zip(b_real_test, a_real_test):
with torch.no_grad():
imagesT1=imagesT1[0].to(device) #só o primeiro índice mas então o que têm os outros?
imagesT2=imagesT2[0].to(device)
a_fake_test = Gab(imagesT1) #T2 translated
b_recon_test = Gba(a_fake_test) #T1 reconstructed
b_fake_test = Gba(imagesT2) #T1 translated
a_recon_test = Gab(b_fake_test) #T2 reconstructed
imagesT1=imagesT1.cpu()
#brec_to_cpu=b_recon_test.cpu() #T1 reconstructed
bfake_to_cpu=b_fake_test.cpu() #T1 translated
#brec_to_cpu = brec_to_cpu.view(batch_size, 3, 256, 256).numpy()
bfake_to_cpu = bfake_to_cpu.view(batch_size, 3, 256, 256).numpy()
imagesT1=np.squeeze(imagesT1) # squeezed to be [3, 256, 256] before was [1, 3, 256, 256]
#brec_to_cpu=np.squeeze(brec_to_cpu) # squeezed to be [3, 256, 256] before was [1, 3, 256, 256]
bfake_to_cpu=np.squeeze(bfake_to_cpu) # squeezed to be [3, 256, 256] before was [1, 3, 256, 256]
imagesT1=imagesT1[0,:,:].numpy() #choose 1 channel of the RGB - output is [1,256,256]
#brec_to_cpu=brec_to_cpu[1,:,:] #choose 1 channel of the RGB
bfake_to_cpu=bfake_to_cpu[0,:,:] #choose 1 channel of the RGB
#images_fid=imagesT1.reshape((1, 256, 256)) # check if it is this or reshape(1,256,256) - see AE_T1T2 the shape and size of the tensors before going in the MAE
#brec_fid= brec_to_cpu.reshape((1, 256, 256))
#bfake_fid= bfake_to_cpu.reshape((1, 256, 256))
#squeeze all to be (256,256)
imagesT1=np.squeeze(imagesT1)
bfake_to_cpu=np.squeeze(bfake_to_cpu)
#change this to calculate the MAE, PSNR and FID between b_real (from the dataset of T1 images real) and b_fake (the translated T1 images from the T2 slices)
list_T1_mae.append(mean_absolute_error(imagesT1,bfake_to_cpu))
list_T1_psnr.append(peak_signal_noise_ratio(imagesT1,bfake_to_cpu))
list_T1_fid.append(calculate_fid(imagesT1,bfake_to_cpu))
# could add to see the shape/size of the list - should be flatten :
#print mean of MAE, PSNR, FID # compute the mean of the flatten array
print("Mean of MAE = " + str(np.mean(list_T1_mae)))
print("Mean of PSNR = " + str(np.mean(list_T1_psnr)))
print("Mean of FID = " + str(np.mean(list_T1_fid)))
#print variance of MAE, PSNR, FID # compute the variance of the flatten array
print("Variance of MAE = " + str(np.var(list_T1_mae)))
print("Variance of PSNR = " + str(np.var(list_T1_psnr)))
print("Variance of FID = " + str(np.var(list_T1_fid)))
#Example for saving pic - just using the first image example of the datasets to plot the image
with torch.no_grad():
#input is T2 images
b_fake_example = Gba(a_real_example) # output is the translated T1 image from the inputed T2 slice
a_recon_example = Gab(b_fake_example) # output is the reconstructed T2 slice
#input is T1 images
a_fake_example = Gab(b_real_example) # output is the translated T2 image from the inputed T1 slice
b_recon_example = Gba(a_fake_example) # output is the reconstructed T1 slice
# a_real_example= T2 real ; b_fake_example= T1 translated ; a_recon_example = T2 reconstructed | b_real_example= T1 real ; a_fake_example = T2 translated ; b_recon_example= T1 reconstructed
pic = (torch.cat([a_real_example, b_fake_example, a_recon_example, b_real_example, a_fake_example, b_recon_example], dim=0).data + 1) / 2.0
if not os.path.isdir(args.results_dir):
os.makedirs(args.results_dir)
torchvision.utils.save_image(pic, args.results_dir+'/sample.jpg', nrow=3)
b_real_example=np.squeeze(b_real_example.cpu())
b_fake_example=np.squeeze(b_fake_example.cpu())
b_real_example=b_real_example[0,:,:].numpy()
b_fake_example=b_fake_example[0,:,:].numpy()
print(mean_absolute_error(np.squeeze(b_real_example),np.squeeze(b_fake_example)))
print(peak_signal_noise_ratio(np.squeeze(b_real_example),np.squeeze(b_fake_example)))
print(calculate_fid(np.squeeze(b_real_example),np.squeeze(b_fake_example)))
|
[
"arch.define_Gen",
"torch.cat",
"sklearn.metrics.mean_absolute_error",
"utils.print_networks",
"numpy.mean",
"torch.device",
"torchvision.transforms.Normalize",
"torch.no_grad",
"torch.utils.data.DataLoader",
"utils.load_checkpoint",
"utils.get_testdata_link",
"numpy.var",
"torchvision.datasets.ImageFolder",
"torchvision.utils.save_image",
"numpy.squeeze",
"torchvision.transforms.Resize",
"os.makedirs",
"calculate_fid.calculate_fid",
"os.path.isdir",
"skimage.metrics.peak_signal_noise_ratio",
"torchvision.transforms.ToTensor"
] |
[((682, 723), 'utils.get_testdata_link', 'utils.get_testdata_link', (['args.dataset_dir'], {}), '(args.dataset_dir)\n', (705, 723), False, 'import utils\n'), ((743, 804), 'torchvision.datasets.ImageFolder', 'dsets.ImageFolder', (["dataset_dirs['testA']"], {'transform': 'transform'}), "(dataset_dirs['testA'], transform=transform)\n", (760, 804), True, 'import torchvision.datasets as dsets\n'), ((823, 884), 'torchvision.datasets.ImageFolder', 'dsets.ImageFolder', (["dataset_dirs['testB']"], {'transform': 'transform'}), "(dataset_dirs['testB'], transform=transform)\n", (840, 884), True, 'import torchvision.datasets as dsets\n'), ((907, 1009), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['a_test_data'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(a_test_data, batch_size=args.batch_size,\n shuffle=False, num_workers=4)\n', (934, 1009), False, 'import torch\n'), ((1026, 1128), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['b_test_data'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(4)'}), '(b_test_data, batch_size=args.batch_size,\n shuffle=False, num_workers=4)\n', (1053, 1128), False, 'import torch\n'), ((1136, 1283), 'arch.define_Gen', 'define_Gen', ([], {'input_nc': '(3)', 'output_nc': '(3)', 'ngf': 'args.ngf', 'netG': '"""resnet_9blocks"""', 'norm': 'args.norm', 'use_dropout': '(not args.no_dropout)', 'gpu_ids': 'args.gpu_ids'}), "(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks',\n norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids)\n", (1146, 1283), False, 'from arch import define_Gen, define_Dis\n'), ((1344, 1491), 'arch.define_Gen', 'define_Gen', ([], {'input_nc': '(3)', 'output_nc': '(3)', 'ngf': 'args.ngf', 'netG': '"""resnet_9blocks"""', 'norm': 'args.norm', 'use_dropout': '(not args.no_dropout)', 'gpu_ids': 'args.gpu_ids'}), "(input_nc=3, output_nc=3, ngf=args.ngf, netG='resnet_9blocks',\n norm=args.norm, use_dropout=not args.no_dropout, gpu_ids=args.gpu_ids)\n", (1354, 1491), False, 'from arch import define_Gen, define_Dis\n'), ((1547, 1595), 'utils.print_networks', 'utils.print_networks', (['[Gab, Gba]', "['Gab', 'Gba']"], {}), "([Gab, Gba], ['Gab', 'Gba'])\n", (1567, 1595), False, 'import utils\n'), ((1955, 1975), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1967, 1975), False, 'import torch\n'), ((6315, 6390), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['pic', "(args.results_dir + '/sample.jpg')"], {'nrow': '(3)'}), "(pic, args.results_dir + '/sample.jpg', nrow=3)\n", (6343, 6390), False, 'import torchvision\n'), ((1619, 1680), 'utils.load_checkpoint', 'utils.load_checkpoint', (["('%s/latest.ckpt' % args.checkpoint_dir)"], {}), "('%s/latest.ckpt' % args.checkpoint_dir)\n", (1640, 1680), False, 'import utils\n'), ((5431, 5446), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5444, 5446), False, 'import torch\n'), ((6235, 6266), 'os.path.isdir', 'os.path.isdir', (['args.results_dir'], {}), '(args.results_dir)\n', (6248, 6266), False, 'import os\n'), ((6276, 6305), 'os.makedirs', 'os.makedirs', (['args.results_dir'], {}), '(args.results_dir)\n', (6287, 6305), False, 'import os\n'), ((513, 567), 'torchvision.transforms.Resize', 'transforms.Resize', (['(args.crop_height, args.crop_width)'], {}), '((args.crop_height, args.crop_width))\n', (530, 567), True, 'import torchvision.transforms as transforms\n'), ((568, 589), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (587, 589), True, 'import torchvision.transforms as transforms\n'), ((591, 654), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (611, 654), True, 'import torchvision.transforms as transforms\n'), ((2406, 2421), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2419, 2421), False, 'import torch\n'), ((3227, 3247), 'numpy.squeeze', 'np.squeeze', (['imagesT1'], {}), '(imagesT1)\n', (3237, 3247), True, 'import numpy as np\n'), ((3440, 3464), 'numpy.squeeze', 'np.squeeze', (['bfake_to_cpu'], {}), '(bfake_to_cpu)\n', (3450, 3464), True, 'import numpy as np\n'), ((4188, 4208), 'numpy.squeeze', 'np.squeeze', (['imagesT1'], {}), '(imagesT1)\n', (4198, 4208), True, 'import numpy as np\n'), ((4234, 4258), 'numpy.squeeze', 'np.squeeze', (['bfake_to_cpu'], {}), '(bfake_to_cpu)\n', (4244, 4258), True, 'import numpy as np\n'), ((6628, 6654), 'numpy.squeeze', 'np.squeeze', (['b_real_example'], {}), '(b_real_example)\n', (6638, 6654), True, 'import numpy as np\n'), ((6655, 6681), 'numpy.squeeze', 'np.squeeze', (['b_fake_example'], {}), '(b_fake_example)\n', (6665, 6681), True, 'import numpy as np\n'), ((6718, 6744), 'numpy.squeeze', 'np.squeeze', (['b_real_example'], {}), '(b_real_example)\n', (6728, 6744), True, 'import numpy as np\n'), ((6745, 6771), 'numpy.squeeze', 'np.squeeze', (['b_fake_example'], {}), '(b_fake_example)\n', (6755, 6771), True, 'import numpy as np\n'), ((6798, 6824), 'numpy.squeeze', 'np.squeeze', (['b_real_example'], {}), '(b_real_example)\n', (6808, 6824), True, 'import numpy as np\n'), ((6825, 6851), 'numpy.squeeze', 'np.squeeze', (['b_fake_example'], {}), '(b_fake_example)\n', (6835, 6851), True, 'import numpy as np\n'), ((4472, 4515), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['imagesT1', 'bfake_to_cpu'], {}), '(imagesT1, bfake_to_cpu)\n', (4491, 4515), False, 'from sklearn.metrics import mean_absolute_error\n'), ((4548, 4595), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['imagesT1', 'bfake_to_cpu'], {}), '(imagesT1, bfake_to_cpu)\n', (4571, 4595), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((4627, 4664), 'calculate_fid.calculate_fid', 'calculate_fid', (['imagesT1', 'bfake_to_cpu'], {}), '(imagesT1, bfake_to_cpu)\n', (4640, 4664), False, 'from calculate_fid import calculate_fid\n'), ((4894, 4914), 'numpy.mean', 'np.mean', (['list_T1_mae'], {}), '(list_T1_mae)\n', (4901, 4914), True, 'import numpy as np\n'), ((4951, 4972), 'numpy.mean', 'np.mean', (['list_T1_psnr'], {}), '(list_T1_psnr)\n', (4958, 4972), True, 'import numpy as np\n'), ((5008, 5028), 'numpy.mean', 'np.mean', (['list_T1_fid'], {}), '(list_T1_fid)\n', (5015, 5028), True, 'import numpy as np\n'), ((5157, 5176), 'numpy.var', 'np.var', (['list_T1_mae'], {}), '(list_T1_mae)\n', (5163, 5176), True, 'import numpy as np\n'), ((5217, 5237), 'numpy.var', 'np.var', (['list_T1_psnr'], {}), '(list_T1_psnr)\n', (5223, 5237), True, 'import numpy as np\n'), ((5277, 5296), 'numpy.var', 'np.var', (['list_T1_fid'], {}), '(list_T1_fid)\n', (5283, 5296), True, 'import numpy as np\n'), ((6086, 6206), 'torch.cat', 'torch.cat', (['[a_real_example, b_fake_example, a_recon_example, b_real_example,\n a_fake_example, b_recon_example]'], {'dim': '(0)'}), '([a_real_example, b_fake_example, a_recon_example, b_real_example,\n a_fake_example, b_recon_example], dim=0)\n', (6095, 6206), False, 'import torch\n')]
|
"""
RAMP backend API
Methods for interacting with the database
"""
from __future__ import print_function, absolute_import
import os
import numpy as np
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.engine.url import URL
from ..model import Model
from .query import select_submissions_by_state
from .query import select_submissions_by_id
from .query import select_submission_by_name
from .query import select_event_by_name
from ..config import STATES, UnknownStateError
__all__ = [
'get_submissions',
'get_submission_by_id',
'get_submission_by_name',
'set_submission_state',
'get_submission_state',
'set_submission_max_ram',
'set_submission_error_msg',
'set_predictions',
'score_submission',
'get_event_nb_folds',
]
def get_submissions(config, event_name, state='new'):
"""
Retrieve a list of submissions and their associated files
depending on their current status
Parameters
----------
config : dict
configuration
event_name : str
name of the RAMP event
state : str, optional
state of the requested submissions (default is 'new')
Returns
-------
List of tuples (int, List[str]) :
(submission_id, [path to submission files on the db])
Raises
------
ValueError :
when mandatory connexion parameters are missing from config
UnknownStateError :
when the requested state does not exist in the database
"""
if state not in STATES:
raise UnknownStateError("Unrecognized state : '{}'".format(state))
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submissions = select_submissions_by_state(session, event_name, state)
if not submissions:
return []
subids = [submission.id for submission in submissions]
subfiles = [submission.files for submission in submissions]
filenames = [[f.path for f in files] for files in subfiles]
return list(zip(subids, filenames))
def get_submission_by_id(config, submission_id):
"""
Get a `Submission` instance given a submission id
Parameters
----------
config : dict
configuration
submission_id : int
submission id
Returns
-------
`Submission` instance
"""
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submission = select_submissions_by_id(session, submission_id)
# force event name and team name to be cached
submission.event.name
submission.team.name
return submission
def get_submission_by_name(config, event_name, team_name, name):
"""
Get a submission by name
Parameters
----------
config : dict
configuration
session :
database connexion session
event_name : str
name of the RAMP event
team_name : str
name of the RAMP team
name : str
name of the submission
Returns
-------
`Submission` instance
"""
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submission = select_submission_by_name(
session,
event_name,
team_name,
name)
# force event name and team name to be cached
submission.event.name
submission.team.name
return submission
def set_submission_state(config, submission_id, state):
"""
Modify the state of a submission in the RAMP database
Parameters
----------
config : dict
configuration
submission_id : int
id of the requested submission
state : str
new state of the submission
Raises
------
ValueError :
when mandatory connexion parameters are missing from config
UnknownStateError :
when the requested state does not exist in the database
"""
if state not in STATES:
raise UnknownStateError("Unrecognized state : '{}'".format(state))
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submission = select_submissions_by_id(session, submission_id)
submission.set_state(state)
session.commit()
def get_submission_state(config, submission_id):
"""
Modify the state of a submission in the RAMP database
Parameters
----------
config : dict
configuration
submission_id : int
id of the requested submission
Raises
------
ValueError :
when mandatory connexion parameters are missing from config
UnknownStateError :
when the requested state does not exist in the database
"""
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submission = select_submissions_by_id(session, submission_id)
return submission.state
def set_predictions(config, submission_id, prediction_path, ext='npy'):
"""
Insert predictions in the database after training/testing
Parameters
----------
config : dict
configuration
submission_id : int
id of the related submission
prediction_path : str
local path where predictions are saved.
Should end with 'training_output'.
ext : {'npy', 'npz', 'csv'}, optional
extension of the saved prediction extension file (default is 'npy')
Raises
------
NotImplementedError :
when the extension cannot be read properly
"""
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submission = select_submissions_by_id(session, submission_id)
for fold_id, cv_fold in enumerate(submission.on_cv_folds):
cv_fold.full_train_y_pred = _load_submission(
prediction_path, fold_id, 'train', ext)
cv_fold.test_y_pred = _load_submission(
prediction_path, fold_id, 'test', ext)
cv_fold.train_time = _get_time(prediction_path, fold_id, 'train')
cv_fold.valid_time = _get_time(prediction_path, fold_id, 'valid')
cv_fold.test_time = _get_time(prediction_path, fold_id, 'test')
cv_fold.state = 'tested'
session.commit()
submission.state = 'tested'
session.commit()
def _load_submission(path, fold_id, typ, ext):
"""
Prediction loader method
Parameters
----------
path : str
local path where predictions are saved
fold_id : int
id of the current CV fold
type : {'train', 'test'}
type of prediction
ext : {'npy', 'npz', 'csv'}
extension of the saved prediction extension file
Raises
------
ValueError :
when typ is neither 'train' nor 'test'
NotImplementedError :
when the extension cannot be read properly
"""
pred_file = os.path.join(path,
'fold_{}'.format(fold_id),
'y_pred_{}.{}'.format(typ, ext))
if typ not in ['train', 'test']:
raise ValueError("Only 'train' or 'test' are expected for arg 'typ'")
if ext.lower() in ['npy', 'npz']:
return np.load(pred_file)['y_pred']
elif ext.lower() == 'csv':
return np.loadfromtxt(pred_file)
else:
return NotImplementedError("No reader implemented for extension {ext}"
.format(ext))
def _get_time(path, fold_id, typ):
"""
get time duration in seconds of train or valid
or test for a given fold.
Parameters
----------
path : str
local path where predictions are saved
fold_id : int
id of the current CV fold
typ : {'train', 'valid, 'test'}
Raises
------
ValueError :
when typ is neither is not 'train' or 'valid' or test'
"""
if typ not in ['train', 'valid', 'test']:
raise ValueError(
"Only 'train' or 'valid' or 'test' are expected for arg 'typ'")
time_file = os.path.join(path, 'fold_{}'.format(fold_id), typ + '_time')
return float(open(time_file).read())
def score_submission(config, submission_id):
"""
Score a submission and change its state to 'scored'
Parameters
----------
config : dict
configuration
submission_id : int
submission id
Raises
------
ValueError :
when the state of the submission is not 'tested'
(only a submission with state 'tested' can be scored)
"""
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submission = select_submissions_by_id(session, submission_id)
if submission.state != 'tested':
raise ValueError('Submission state must be "tested"'
' to score, not "{}"'.format(submission.state))
# We are conservative:
# only score if all stages (train, test, validation)
# were completed. submission_on_cv_fold compute scores can be called
# manually if needed for submission in various error states.
for submission_on_cv_fold in submission.on_cv_folds:
submission_on_cv_fold.session = session
submission_on_cv_fold.compute_train_scores(session)
submission_on_cv_fold.compute_valid_scores(session)
submission_on_cv_fold.compute_test_scores(session)
submission_on_cv_fold.state = 'scored'
session.commit()
submission.compute_test_score_cv_bag(session)
submission.compute_valid_score_cv_bag(session)
# Means and stds were constructed on demand by fetching fold times.
# It was slow because submission_on_folds contain also possibly large
# predictions. If postgres solves this issue (which can be tested on
# the mean and std scores on the private leaderbord), the
# corresponding columns (which are now redundant) can be deleted in
# Submission and this computation can also be deleted.
submission.train_time_cv_mean = np.mean(
[ts.train_time for ts in submission.on_cv_folds])
submission.valid_time_cv_mean = np.mean(
[ts.valid_time for ts in submission.on_cv_folds])
submission.test_time_cv_mean = np.mean(
[ts.test_time for ts in submission.on_cv_folds])
submission.train_time_cv_std = np.std(
[ts.train_time for ts in submission.on_cv_folds])
submission.valid_time_cv_std = np.std(
[ts.valid_time for ts in submission.on_cv_folds])
submission.test_time_cv_std = np.std(
[ts.test_time for ts in submission.on_cv_folds])
submission.state = 'scored'
session.commit()
def set_submission_max_ram(config, submission_id, max_ram_mb):
"""
Modify the max RAM mb usage of a submission
Parameters
----------
config : dict
configuration
submission_id : int
id of the requested submission
max_ram_mb : float
max ram usage in MB
"""
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submission = select_submissions_by_id(session, submission_id)
submission.max_ram = max_ram_mb
session.commit()
def set_submission_error_msg(config, submission_id, error_msg):
"""
Set submission message error
Parameters
----------
config : dict
configuration
submission_id : int
id of the requested submission
error_msg : str
message error
"""
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
submission = select_submissions_by_id(session, submission_id)
submission.error_msg = error_msg
session.commit()
def get_event_nb_folds(config, event_name):
# Create database url
db_url = URL(**config)
db = create_engine(db_url)
# Create a configured "Session" class
Session = sessionmaker(db)
# Link the relational model to the database
Model.metadata.create_all(db)
# Connect to the dabase and perform action
with db.connect() as conn:
session = Session(bind=conn)
event = select_event_by_name(session, event_name)
return len(event.cv_folds)
|
[
"numpy.load",
"numpy.std",
"sqlalchemy.orm.sessionmaker",
"numpy.mean",
"numpy.loadfromtxt",
"sqlalchemy.create_engine",
"sqlalchemy.engine.url.URL"
] |
[((1656, 1669), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (1659, 1669), False, 'from sqlalchemy.engine.url import URL\n'), ((1679, 1700), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (1692, 1700), False, 'from sqlalchemy import create_engine\n'), ((1758, 1774), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (1770, 1774), False, 'from sqlalchemy.orm import sessionmaker\n'), ((2676, 2689), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (2679, 2689), False, 'from sqlalchemy.engine.url import URL\n'), ((2699, 2720), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (2712, 2720), False, 'from sqlalchemy import create_engine\n'), ((2778, 2794), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (2790, 2794), False, 'from sqlalchemy.orm import sessionmaker\n'), ((3671, 3684), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (3674, 3684), False, 'from sqlalchemy.engine.url import URL\n'), ((3694, 3715), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (3707, 3715), False, 'from sqlalchemy import create_engine\n'), ((3773, 3789), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (3785, 3789), False, 'from sqlalchemy.orm import sessionmaker\n'), ((4916, 4929), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (4919, 4929), False, 'from sqlalchemy.engine.url import URL\n'), ((4939, 4960), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (4952, 4960), False, 'from sqlalchemy import create_engine\n'), ((5018, 5034), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (5030, 5034), False, 'from sqlalchemy.orm import sessionmaker\n'), ((5862, 5875), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (5865, 5875), False, 'from sqlalchemy.engine.url import URL\n'), ((5885, 5906), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (5898, 5906), False, 'from sqlalchemy import create_engine\n'), ((5964, 5980), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (5976, 5980), False, 'from sqlalchemy.orm import sessionmaker\n'), ((6937, 6950), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (6940, 6950), False, 'from sqlalchemy.engine.url import URL\n'), ((6960, 6981), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (6973, 6981), False, 'from sqlalchemy import create_engine\n'), ((7039, 7055), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (7051, 7055), False, 'from sqlalchemy.orm import sessionmaker\n'), ((10205, 10218), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (10208, 10218), False, 'from sqlalchemy.engine.url import URL\n'), ((10228, 10249), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (10241, 10249), False, 'from sqlalchemy import create_engine\n'), ((10307, 10323), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (10319, 10323), False, 'from sqlalchemy.orm import sessionmaker\n'), ((13011, 13024), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (13014, 13024), False, 'from sqlalchemy.engine.url import URL\n'), ((13034, 13055), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (13047, 13055), False, 'from sqlalchemy import create_engine\n'), ((13113, 13129), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (13125, 13129), False, 'from sqlalchemy.orm import sessionmaker\n'), ((13796, 13809), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (13799, 13809), False, 'from sqlalchemy.engine.url import URL\n'), ((13819, 13840), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (13832, 13840), False, 'from sqlalchemy import create_engine\n'), ((13898, 13914), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (13910, 13914), False, 'from sqlalchemy.orm import sessionmaker\n'), ((14336, 14349), 'sqlalchemy.engine.url.URL', 'URL', ([], {}), '(**config)\n', (14339, 14349), False, 'from sqlalchemy.engine.url import URL\n'), ((14359, 14380), 'sqlalchemy.create_engine', 'create_engine', (['db_url'], {}), '(db_url)\n', (14372, 14380), False, 'from sqlalchemy import create_engine\n'), ((14438, 14454), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', (['db'], {}), '(db)\n', (14450, 14454), False, 'from sqlalchemy.orm import sessionmaker\n'), ((11981, 12038), 'numpy.mean', 'np.mean', (['[ts.train_time for ts in submission.on_cv_folds]'], {}), '([ts.train_time for ts in submission.on_cv_folds])\n', (11988, 12038), True, 'import numpy as np\n'), ((12092, 12149), 'numpy.mean', 'np.mean', (['[ts.valid_time for ts in submission.on_cv_folds]'], {}), '([ts.valid_time for ts in submission.on_cv_folds])\n', (12099, 12149), True, 'import numpy as np\n'), ((12202, 12258), 'numpy.mean', 'np.mean', (['[ts.test_time for ts in submission.on_cv_folds]'], {}), '([ts.test_time for ts in submission.on_cv_folds])\n', (12209, 12258), True, 'import numpy as np\n'), ((12311, 12367), 'numpy.std', 'np.std', (['[ts.train_time for ts in submission.on_cv_folds]'], {}), '([ts.train_time for ts in submission.on_cv_folds])\n', (12317, 12367), True, 'import numpy as np\n'), ((12420, 12476), 'numpy.std', 'np.std', (['[ts.valid_time for ts in submission.on_cv_folds]'], {}), '([ts.valid_time for ts in submission.on_cv_folds])\n', (12426, 12476), True, 'import numpy as np\n'), ((12528, 12583), 'numpy.std', 'np.std', (['[ts.test_time for ts in submission.on_cv_folds]'], {}), '([ts.test_time for ts in submission.on_cv_folds])\n', (12534, 12583), True, 'import numpy as np\n'), ((8847, 8865), 'numpy.load', 'np.load', (['pred_file'], {}), '(pred_file)\n', (8854, 8865), True, 'import numpy as np\n'), ((8922, 8947), 'numpy.loadfromtxt', 'np.loadfromtxt', (['pred_file'], {}), '(pred_file)\n', (8936, 8947), True, 'import numpy as np\n')]
|
from power_planner.utils.utils import get_distance_surface, rescale, normalize
import numpy as np
import matplotlib.pyplot as plt
import rasterio
class CorridorUtils():
def __init__(self):
pass
@staticmethod
def get_middle_line(start_inds, dest_inds, instance_corr, num_points=2):
vec = (dest_inds - start_inds) / 2
middle_point = start_inds + vec
ortho_vec = [-vec[1], vec[0]]
ortho_vec = ortho_vec / np.linalg.norm(ortho_vec)
inds_x, inds_y = np.where(instance_corr)
xs, xe = (inds_x[0], inds_x[-1])
ys, ye = (inds_y[0], inds_y[-1])
x, y = tuple(middle_point)
v1, v2 = tuple(ortho_vec)
dists_each = min(
np.absolute(
[(x - xs) / v1, (xe - x) / v1, (y - ys) / v2, (ye - y) / v2]
)
) / (num_points + 1)
points = [middle_point.astype(int)] # start_inds, dest_inds,
for i in range(num_points):
points.append(
(middle_point + ortho_vec * dists_each * (i + 1)).astype(int)
)
points.append(
(middle_point - ortho_vec * dists_each * (i + 1)).astype(int)
)
return points
@staticmethod
def generate_corridors_middle_line(
instance_corr, start_inds, dest_inds, num_corrs=5, n_dilate=100
):
num_middle_points = num_corrs // 2
points = CorridorUtils.get_middle_line(
start_inds, dest_inds, instance_corr, num_points=num_middle_points
)
all_corridors = []
for p in points:
path = [[start_inds.tolist(), p.tolist(), dest_inds.tolist()]]
all_corridors.append(
get_distance_surface(
instance_corr.shape, path, n_dilate=n_dilate
)
)
return all_corridors
@staticmethod
def visualize_middle_line(
all_points, instance_corr, buffer=2, out_path=None
):
example = instance_corr.copy()
for p in all_points:
(i, j) = tuple(p)
example[i - buffer:i + buffer, j - buffer:j + buffer] = 2
plt.figure(figsize=(20, 10))
plt.imshow(example)
if out_path is None:
plt.show()
else:
plt.savefig(out_path + "_corr_lines.png")
@staticmethod
def visualize_corrs(corrs, out_path=None):
plt.figure(figsize=(20, 10))
for i, corr in enumerate(corrs):
plt.subplot(1, len(corrs), (i + 1))
plt.imshow(corr)
if out_path is not None:
plt.savefig(out_path + "corridor_quantiles.png")
else:
plt.show()
@staticmethod
def generate_corridors_from_file(corr_path, nr_corrs=4, scale_param=1):
with rasterio.open(corr_path, 'r') as ds:
cost_img = ds.read()[0]
print("read in corridor", cost_img.shape)
actual_vals = cost_img[cost_img != 9999]
corrs = []
cut_val_prev = 0
log_vals = np.logspace(np.log(0.03), np.log(1), 4, base=1.5)
for i in range(4):
cut_val = np.quantile(actual_vals, log_vals[i]) # (i+1)*0.24)
copied = cost_img.copy()
copied[copied < cut_val_prev] = 9999
copied[copied > cut_val] = 9999
corr_bool = (copied != 9999).astype(int)
corrs.append(rescale(corr_bool, scale_param))
cut_val_prev = cut_val
return corrs
@staticmethod
def get_reduced_patches(
instance, start_inds, dest_inds, factor, balance=[1, 1], quantile=0.1
):
summed = np.sum(instance, axis=0)
red = rescale(summed, factor)
x_len, y_len = red.shape
path_start_end = [
[
(start_inds / factor).astype(int).tolist(),
(dest_inds / factor).astype(int).tolist()
]
]
dist_corr = 1 - normalize(
get_distance_surface(
red.shape, path_start_end, n_dilate=min([x_len, y_len])
)
)
surface_comb = balance[0] * dist_corr + balance[1] * red
quantile_surface = np.quantile(surface_comb, quantile)
patches = surface_comb < quantile_surface
inds_x, inds_y = np.where(patches)
return np.array([inds_x, inds_y]) # *factor
@staticmethod
def generate_corridors_sample_path(
instance,
start_inds,
dest_inds,
factor,
balance=[1, 3],
quantile=0.2,
n_sample=4,
n_onpath=5,
n_dilate=100
):
out_inds = CorridorUtils.get_reduced_patches(
instance,
start_inds,
dest_inds,
factor,
balance=[1, 3],
quantile=0.1
)
# compute distances from start point
minus_start = [
np.linalg.norm(out_inds[:, i] - start_inds / factor)
for i in range(out_inds.shape[1])
]
sorted_patches = np.argsort(minus_start)
all_corridors = list()
for _ in range(n_sample):
drawn_points = np.random.choice(
np.arange(out_inds.shape[1]), n_onpath, replace=False
)
drawn_path = out_inds[:,
sorted_patches[np.
sort(drawn_points)]] * factor
path = [
[start_inds.tolist()] +
np.swapaxes(drawn_path, 1, 0).tolist() + [dest_inds.tolist()]
]
all_corridors.append(
get_distance_surface(
instance.shape[1:], path, n_dilate=n_dilate
)
)
return all_corridors
|
[
"numpy.absolute",
"rasterio.open",
"numpy.quantile",
"numpy.sum",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.imshow",
"numpy.argsort",
"numpy.sort",
"matplotlib.pyplot.figure",
"numpy.where",
"power_planner.utils.utils.rescale",
"numpy.array",
"numpy.linalg.norm",
"numpy.arange",
"numpy.swapaxes",
"power_planner.utils.utils.get_distance_surface",
"matplotlib.pyplot.savefig"
] |
[((510, 533), 'numpy.where', 'np.where', (['instance_corr'], {}), '(instance_corr)\n', (518, 533), True, 'import numpy as np\n'), ((2159, 2187), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2169, 2187), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2215), 'matplotlib.pyplot.imshow', 'plt.imshow', (['example'], {}), '(example)\n', (2206, 2215), True, 'import matplotlib.pyplot as plt\n'), ((2410, 2438), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2420, 2438), True, 'import matplotlib.pyplot as plt\n'), ((3631, 3655), 'numpy.sum', 'np.sum', (['instance'], {'axis': '(0)'}), '(instance, axis=0)\n', (3637, 3655), True, 'import numpy as np\n'), ((3670, 3693), 'power_planner.utils.utils.rescale', 'rescale', (['summed', 'factor'], {}), '(summed, factor)\n', (3677, 3693), False, 'from power_planner.utils.utils import get_distance_surface, rescale, normalize\n'), ((4168, 4203), 'numpy.quantile', 'np.quantile', (['surface_comb', 'quantile'], {}), '(surface_comb, quantile)\n', (4179, 4203), True, 'import numpy as np\n'), ((4280, 4297), 'numpy.where', 'np.where', (['patches'], {}), '(patches)\n', (4288, 4297), True, 'import numpy as np\n'), ((4313, 4339), 'numpy.array', 'np.array', (['[inds_x, inds_y]'], {}), '([inds_x, inds_y])\n', (4321, 4339), True, 'import numpy as np\n'), ((5018, 5041), 'numpy.argsort', 'np.argsort', (['minus_start'], {}), '(minus_start)\n', (5028, 5041), True, 'import numpy as np\n'), ((458, 483), 'numpy.linalg.norm', 'np.linalg.norm', (['ortho_vec'], {}), '(ortho_vec)\n', (472, 483), True, 'import numpy as np\n'), ((2257, 2267), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2265, 2267), True, 'import matplotlib.pyplot as plt\n'), ((2294, 2335), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + '_corr_lines.png')"], {}), "(out_path + '_corr_lines.png')\n", (2305, 2335), True, 'import matplotlib.pyplot as plt\n'), ((2540, 2556), 'matplotlib.pyplot.imshow', 'plt.imshow', (['corr'], {}), '(corr)\n', (2550, 2556), True, 'import matplotlib.pyplot as plt\n'), ((2602, 2650), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + 'corridor_quantiles.png')"], {}), "(out_path + 'corridor_quantiles.png')\n", (2613, 2650), True, 'import matplotlib.pyplot as plt\n'), ((2677, 2687), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2685, 2687), True, 'import matplotlib.pyplot as plt\n'), ((2796, 2825), 'rasterio.open', 'rasterio.open', (['corr_path', '"""r"""'], {}), "(corr_path, 'r')\n", (2809, 2825), False, 'import rasterio\n'), ((3044, 3056), 'numpy.log', 'np.log', (['(0.03)'], {}), '(0.03)\n', (3050, 3056), True, 'import numpy as np\n'), ((3058, 3067), 'numpy.log', 'np.log', (['(1)'], {}), '(1)\n', (3064, 3067), True, 'import numpy as np\n'), ((3131, 3168), 'numpy.quantile', 'np.quantile', (['actual_vals', 'log_vals[i]'], {}), '(actual_vals, log_vals[i])\n', (3142, 3168), True, 'import numpy as np\n'), ((4884, 4936), 'numpy.linalg.norm', 'np.linalg.norm', (['(out_inds[:, i] - start_inds / factor)'], {}), '(out_inds[:, i] - start_inds / factor)\n', (4898, 4936), True, 'import numpy as np\n'), ((724, 797), 'numpy.absolute', 'np.absolute', (['[(x - xs) / v1, (xe - x) / v1, (y - ys) / v2, (ye - y) / v2]'], {}), '([(x - xs) / v1, (xe - x) / v1, (y - ys) / v2, (ye - y) / v2])\n', (735, 797), True, 'import numpy as np\n'), ((1719, 1785), 'power_planner.utils.utils.get_distance_surface', 'get_distance_surface', (['instance_corr.shape', 'path'], {'n_dilate': 'n_dilate'}), '(instance_corr.shape, path, n_dilate=n_dilate)\n', (1739, 1785), False, 'from power_planner.utils.utils import get_distance_surface, rescale, normalize\n'), ((3392, 3423), 'power_planner.utils.utils.rescale', 'rescale', (['corr_bool', 'scale_param'], {}), '(corr_bool, scale_param)\n', (3399, 3423), False, 'from power_planner.utils.utils import get_distance_surface, rescale, normalize\n'), ((5168, 5196), 'numpy.arange', 'np.arange', (['out_inds.shape[1]'], {}), '(out_inds.shape[1])\n', (5177, 5196), True, 'import numpy as np\n'), ((5608, 5673), 'power_planner.utils.utils.get_distance_surface', 'get_distance_surface', (['instance.shape[1:]', 'path'], {'n_dilate': 'n_dilate'}), '(instance.shape[1:], path, n_dilate=n_dilate)\n', (5628, 5673), False, 'from power_planner.utils.utils import get_distance_surface, rescale, normalize\n'), ((5322, 5343), 'numpy.sort', 'np.sort', (['drawn_points'], {}), '(drawn_points)\n', (5329, 5343), True, 'import numpy as np\n'), ((5482, 5511), 'numpy.swapaxes', 'np.swapaxes', (['drawn_path', '(1)', '(0)'], {}), '(drawn_path, 1, 0)\n', (5493, 5511), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
from vnpy.app.cta_strategy.strategies.ma_trend.constant import DataSignalName, DataMethod
from vnpy.app.cta_strategy.strategies.ma_trend.data_center import DataCreator
from vnpy.trader.utility import ArrayManager
class MaInfoCreator(DataCreator):
parameters = ["ma_level", "max_length"]
ma_level = [10,20,30,60,120]
max_length = 400
trend_info = pd.DataFrame()
def init(self):
self.data_center.connect(DataSignalName.ArrayManager, self.on_am_data)
self.data_center.add_method(DataMethod.MaInfoData, self.data)
def data(self, parameters=None):
return self.trend_info
def tag(self):
return "ma_info"
def on_am_data(self, data):
last_ma_lvl = self.ma_level[-1]
am: ArrayManager = data[DataSignalName.ArrayManager]
if am.count < last_ma_lvl:
return
close = am.close[-1]
dt = am.time_array[-1]
trend_info = {}
ma_data = []
for i in self.ma_level:
ma = am.sma(i)
trend_info[i] = [round(ma, 2)]
ma_data.append(ma)
ma = am.sma(5)
trend_info[5] = [round(ma, 2)]
# 统计穿越 start
ma_lvl_tag = []
last_lvl = self.ma_level[-1]
for i in self.ma_level[:-1]:
val = 1 if trend_info[i] > trend_info[last_lvl] else 0
ma_lvl_tag.append(val)
bincount_val = np.bincount(np.array(ma_lvl_tag[:-1]))
trend_info["ma3_5_ref"] = int(bincount_val[1]) if bincount_val.size > 1 else 0
start = 1
for lvl_index in range(start, len(self.ma_level)):
ma_lvl_tag = []
lvl = self.ma_level[-lvl_index]
for i in self.ma_level[:-lvl_index]:
val = 1 if trend_info[i] > trend_info[lvl] else 0
ma_lvl_tag.append(val)
bincount_val = np.bincount(np.array(ma_lvl_tag))
count = len(ma_lvl_tag)
tag = "ma{}_{}_ref".format(count, count + 1)
trend_info[tag] = int(bincount_val[1]) if bincount_val.size > 1 else 0
trend_info["close"] = close
data = []
diff = ma_data[-1]
for v in ma_data:
data.append(round(v / diff, 6))
trend_info["ma5"] = [round(np.var(data) * 1000000, 8)]
data = []
diff = ma_data[-3]
for v in ma_data[:-2]:
data.append(round(v / diff, 6))
trend_info["ma3"] = [round(np.var(data) * 1000000, 8)]
if len(self.trend_info) < self.max_length:
self.trend_info = self.trend_info.append(pd.DataFrame(trend_info, index=[pd.to_datetime(dt)]))
else:
index = self.trend_info.index[0]
self.trend_info = self.trend_info.drop([index])
self.trend_info = self.trend_info.append(pd.DataFrame(trend_info, index=[pd.to_datetime(dt)]))
self.push(DataSignalName.MaInfo, self.trend_info)
@property
def info(self):
return self.trend_info
|
[
"pandas.DataFrame",
"pandas.to_datetime",
"numpy.array",
"numpy.var"
] |
[((405, 419), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (417, 419), True, 'import pandas as pd\n'), ((1453, 1478), 'numpy.array', 'np.array', (['ma_lvl_tag[:-1]'], {}), '(ma_lvl_tag[:-1])\n', (1461, 1478), True, 'import numpy as np\n'), ((1910, 1930), 'numpy.array', 'np.array', (['ma_lvl_tag'], {}), '(ma_lvl_tag)\n', (1918, 1930), True, 'import numpy as np\n'), ((2296, 2308), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (2302, 2308), True, 'import numpy as np\n'), ((2481, 2493), 'numpy.var', 'np.var', (['data'], {}), '(data)\n', (2487, 2493), True, 'import numpy as np\n'), ((2646, 2664), 'pandas.to_datetime', 'pd.to_datetime', (['dt'], {}), '(dt)\n', (2660, 2664), True, 'import pandas as pd\n'), ((2872, 2890), 'pandas.to_datetime', 'pd.to_datetime', (['dt'], {}), '(dt)\n', (2886, 2890), True, 'import pandas as pd\n')]
|
import numpy as np
def relu(x):
return np.maximum(0, x)
def sigmoid(x):
return 1 / (1 + np.exp(-np.clip(x, -10, 10)))
def logexp(x):
return np.where(x > 100, x, np.log(1 + np.exp(x)))
def binary_cross_entropy(x, y):
loss = y * logexp(-x) + (1 - y) * logexp(x)
return loss
|
[
"numpy.maximum",
"numpy.exp",
"numpy.clip"
] |
[((45, 61), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (55, 61), True, 'import numpy as np\n'), ((190, 199), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (196, 199), True, 'import numpy as np\n'), ((108, 127), 'numpy.clip', 'np.clip', (['x', '(-10)', '(10)'], {}), '(x, -10, 10)\n', (115, 127), True, 'import numpy as np\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""310 data_processing"""
import os
import argparse
import numpy as np
from scipy.io.wavfile import write
parser = argparse.ArgumentParser(description='MelGAN')
parser.add_argument('--wav_path', type=str, default='', help='wav data path')
parser.add_argument('--bin_path', type=str, default='', help='bin data path')
parser.add_argument('--sample', type=int, default=22050, help='wav sample')
parser.add_argument('--mode', type=int, choices=[1, 2], default=1,
help='1 for wav to bin, 2 for bin to wav (Default: 1)')
args_opt = parser.parse_args()
if args_opt.mode == 1:
path_all = args_opt.wav_path
if not os.path.exists(args_opt.bin_path):
os.mkdir(args_opt.bin_path)
else:
path_all = args_opt.bin_path
if not os.path.exists(args_opt.wav_path):
os.mkdir(args_opt.wav_path)
filenames = os.listdir(path_all)
for filename in filenames:
if args_opt.mode == 1:
new_name = os.path.join(args_opt.bin_path, filename[:-4]+'.bin')
temp = np.load(path_all+'/'+ filename)
temp = (temp + 5) / 5
if temp.shape[1] < 240:
temp_1 = 240 - temp.shape[1]
temp = np.pad(temp, ((0, 0), (0, temp_1)), mode='constant', constant_values=0.0)
temp[:, :240].tofile(new_name)
else:
abc = np.fromfile(os.path.join(path_all, filename), dtype='float32')
wav_data = 32768.0 * abc
output_path = os.path.join(args_opt.wav_path, filename).replace('.bin', '.wav')
write(output_path, args_opt.sample, wav_data.astype('int16'))
print('get {}, please check it'.format(output_path))
|
[
"numpy.pad",
"os.mkdir",
"numpy.load",
"argparse.ArgumentParser",
"os.path.exists",
"os.path.join",
"os.listdir"
] |
[((783, 828), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MelGAN"""'}), "(description='MelGAN')\n", (806, 828), False, 'import argparse\n'), ((1507, 1527), 'os.listdir', 'os.listdir', (['path_all'], {}), '(path_all)\n', (1517, 1527), False, 'import os\n'), ((1303, 1336), 'os.path.exists', 'os.path.exists', (['args_opt.bin_path'], {}), '(args_opt.bin_path)\n', (1317, 1336), False, 'import os\n'), ((1346, 1373), 'os.mkdir', 'os.mkdir', (['args_opt.bin_path'], {}), '(args_opt.bin_path)\n', (1354, 1373), False, 'import os\n'), ((1424, 1457), 'os.path.exists', 'os.path.exists', (['args_opt.wav_path'], {}), '(args_opt.wav_path)\n', (1438, 1457), False, 'import os\n'), ((1467, 1494), 'os.mkdir', 'os.mkdir', (['args_opt.wav_path'], {}), '(args_opt.wav_path)\n', (1475, 1494), False, 'import os\n'), ((1602, 1657), 'os.path.join', 'os.path.join', (['args_opt.bin_path', "(filename[:-4] + '.bin')"], {}), "(args_opt.bin_path, filename[:-4] + '.bin')\n", (1614, 1657), False, 'import os\n'), ((1671, 1705), 'numpy.load', 'np.load', (["(path_all + '/' + filename)"], {}), "(path_all + '/' + filename)\n", (1678, 1705), True, 'import numpy as np\n'), ((1825, 1898), 'numpy.pad', 'np.pad', (['temp', '((0, 0), (0, temp_1))'], {'mode': '"""constant"""', 'constant_values': '(0.0)'}), "(temp, ((0, 0), (0, temp_1)), mode='constant', constant_values=0.0)\n", (1831, 1898), True, 'import numpy as np\n'), ((1974, 2006), 'os.path.join', 'os.path.join', (['path_all', 'filename'], {}), '(path_all, filename)\n', (1986, 2006), False, 'import os\n'), ((2080, 2121), 'os.path.join', 'os.path.join', (['args_opt.wav_path', 'filename'], {}), '(args_opt.wav_path, filename)\n', (2092, 2121), False, 'import os\n')]
|
from utils.utils_profiling import * # load before other local modules
import argparse
import os
import sys
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import dgl
import numpy as np
import torch
import wandb
import time
import datetime
from torch import optim
import torch.nn as nn
from torch.utils.data import DataLoader
from experiments.nbody.nbody_dataloader import RIDataset
from utils import utils_logging
from experiments.nbody import nbody_models as models
from equivariant_attention.from_se3cnn.SO3 import rot
from experiments.nbody.nbody_flags import get_flags
def to_np(x):
return x.cpu().detach().numpy()
def get_acc(pred, x_T, v_T, y=None, verbose=True):
acc_dict = {}
pred = to_np(pred)
x_T = to_np(x_T)
v_T = to_np(v_T)
assert len(pred) == len(x_T)
if verbose:
y = np.asarray(y.cpu())
_sq = (pred - y) ** 2
acc_dict['mse'] = np.mean(_sq)
_sq = (pred[:, 0, :] - x_T) ** 2
acc_dict['pos_mse'] = np.mean(_sq)
_sq = (pred[:, 1, :] - v_T) ** 2
acc_dict['vel_mse'] = np.mean(_sq)
return acc_dict
def train_epoch(epoch, model, loss_fnc, dataloader, optimizer, schedul, FLAGS):
model.train()
loss_epoch = 0
num_iters = len(dataloader)
wandb.log({"lr": optimizer.param_groups[0]['lr']}, commit=False)
for i, (g, y1, y2) in enumerate(dataloader):
g = g.to(FLAGS.device)
x_T = y1.to(FLAGS.device).view(-1, 3)
v_T = y2.to(FLAGS.device).view(-1, 3)
y = torch.stack([x_T, v_T], dim=1)
optimizer.zero_grad()
# run model forward and compute loss
pred = model(g)
loss = loss_fnc(pred, y)
loss_epoch += to_np(loss)
if torch.isnan(loss):
import pdb
pdb.set_trace()
# backprop
loss.backward()
optimizer.step()
# print to console
if i % FLAGS.print_interval == 0:
print(
f"[{epoch}|{i}] loss: {loss:.5f}")
# log to wandb
if i % FLAGS.log_interval == 0:
# 'commit' is only set to True here, meaning that this is where
# wandb counts the steps
wandb.log({"Train Batch Loss": to_np(loss)}, commit=True)
# exit early if only do profiling
if FLAGS.profile and i == 10:
sys.exit()
schedul.step(epoch + i / num_iters)
# log train accuracy for entire epoch to wandb
loss_epoch /= len(dataloader)
wandb.log({"Train Epoch Loss": loss_epoch}, commit=False)
def test_epoch(epoch, model, loss_fnc, dataloader, FLAGS, dT):
model.eval()
keys = ['pos_mse', 'vel_mse']
acc_epoch = {k: 0.0 for k in keys}
acc_epoch_blc = {k: 0.0 for k in keys} # for constant baseline
acc_epoch_bll = {k: 0.0 for k in keys} # for linear baseline
loss_epoch = 0.0
for i, (g, y1, y2) in enumerate(dataloader):
g = g.to(FLAGS.device)
x_T = y1.view(-1, 3)
v_T = y2.view(-1, 3)
y = torch.stack([x_T, v_T], dim=1).to(FLAGS.device)
# run model forward and compute loss
pred = model(g).detach()
loss_epoch += to_np(loss_fnc(pred, y)/len(dataloader))
acc = get_acc(pred, x_T, v_T, y=y)
for k in keys:
acc_epoch[k] += acc[k]/len(dataloader)
# eval constant baseline
bl_pred = torch.zeros_like(pred)
acc = get_acc(bl_pred, x_T, v_T, verbose=False)
for k in keys:
acc_epoch_blc[k] += acc[k]/len(dataloader)
# eval linear baseline
# Apply linear update to locations.
bl_pred[:, 0, :] = dT * g.ndata['v'][:, 0, :]
acc = get_acc(bl_pred, x_T, v_T, verbose=False)
for k in keys:
acc_epoch_bll[k] += acc[k] / len(dataloader)
print(f"...[{epoch}|test] loss: {loss_epoch:.5f}")
wandb.log({"Test loss": loss_epoch}, commit=False)
for k in keys:
wandb.log({"Test " + k: acc_epoch[k]}, commit=False)
wandb.log({'Const. BL pos_mse': acc_epoch_blc['pos_mse']}, commit=False)
wandb.log({'Linear BL pos_mse': acc_epoch_bll['pos_mse']}, commit=False)
wandb.log({'Linear BL vel_mse': acc_epoch_bll['vel_mse']}, commit=False)
class RandomRotation(object):
def __init__(self):
pass
def __call__(self, x):
M = np.random.randn(3, 3)
Q, __ = np.linalg.qr(M)
return x @ Q
def collate(samples):
graphs, y1, y2 = map(list, zip(*samples))
batched_graph = dgl.batch(graphs)
return batched_graph, torch.stack(y1), torch.stack(y2)
def main(FLAGS, UNPARSED_ARGV):
# Prepare data
train_dataset = RIDataset(FLAGS, split='train')
train_loader = DataLoader(train_dataset,
batch_size=FLAGS.batch_size,
shuffle=True,
collate_fn=collate,
num_workers=FLAGS.num_workers,
drop_last=True)
test_dataset = RIDataset(FLAGS, split='test')
# drop_last is only here so that we can count accuracy correctly;
test_loader = DataLoader(test_dataset,
batch_size=FLAGS.batch_size,
shuffle=False,
collate_fn=collate,
num_workers=FLAGS.num_workers,
drop_last=True)
# time steps
assert train_dataset.data['delta_T'] == test_dataset.data['delta_T']
assert train_dataset.data['sample_freq'] == test_dataset.data['sample_freq']
print(f'deltaT: {train_dataset.data["delta_T"]}, '
f'freq: {train_dataset.data["sample_freq"]}, '
f'FLAGS.ri_delta_t: {FLAGS.ri_delta_t}')
dT = train_dataset.data['delta_T'] * train_dataset.data[
'sample_freq'] * FLAGS.ri_delta_t
FLAGS.train_size = len(train_dataset)
FLAGS.test_size = len(test_dataset)
assert len(test_dataset) < len(train_dataset)
model = models.__dict__.get(FLAGS.model)(FLAGS.num_layers, FLAGS.num_channels, num_degrees=FLAGS.num_degrees,
div=FLAGS.div, n_heads=FLAGS.head, si_m=FLAGS.simid, si_e=FLAGS.siend,
x_ij=FLAGS.xij)
utils_logging.write_info_file(model, FLAGS=FLAGS, UNPARSED_ARGV=UNPARSED_ARGV, wandb_log_dir=wandb.run.dir)
if FLAGS.restore is not None:
model.load_state_dict(torch.load(FLAGS.restore))
model.to(FLAGS.device)
# Optimizer settings
optimizer = optim.Adam(model.parameters(), lr=FLAGS.lr)
scheduler = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, FLAGS.num_epochs, eta_min=1e-4)
criterion = nn.MSELoss()
criterion = criterion.to(FLAGS.device)
task_loss = criterion
# Save path
save_path = os.path.join(FLAGS.save_dir, FLAGS.name + '.pt')
# Run training
print('Begin training')
for epoch in range(FLAGS.num_epochs):
torch.save(model.state_dict(), save_path)
print(f"Saved: {save_path}")
train_epoch(epoch, model, task_loss, train_loader, optimizer, scheduler, FLAGS)
test_epoch(epoch, model, task_loss, test_loader, FLAGS, dT)
if __name__ == '__main__':
FLAGS, UNPARSED_ARGV = get_flags()
os.makedirs(FLAGS.save_dir, exist_ok=True)
# Log all args to wandb
wandb.init(project='equivariant-attention', name=FLAGS.name, config=FLAGS)
wandb.save('*.txt')
# Where the magic is
try:
main(FLAGS, UNPARSED_ARGV)
except Exception:
import pdb, traceback
traceback.print_exc()
pdb.post_mortem()
|
[
"wandb.log",
"pdb.post_mortem",
"utils.utils_logging.write_info_file",
"numpy.linalg.qr",
"numpy.mean",
"experiments.nbody.nbody_models.__dict__.get",
"os.path.join",
"torch.isnan",
"torch.nn.MSELoss",
"traceback.print_exc",
"warnings.simplefilter",
"torch.utils.data.DataLoader",
"experiments.nbody.nbody_flags.get_flags",
"numpy.random.randn",
"torch.load",
"torch.zeros_like",
"wandb.save",
"sys.exit",
"os.makedirs",
"torch.stack",
"dgl.batch",
"torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"wandb.init",
"pdb.set_trace",
"experiments.nbody.nbody_dataloader.RIDataset"
] |
[((126, 188), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (147, 188), False, 'import warnings\n'), ((1014, 1026), 'numpy.mean', 'np.mean', (['_sq'], {}), '(_sq)\n', (1021, 1026), True, 'import numpy as np\n'), ((1091, 1103), 'numpy.mean', 'np.mean', (['_sq'], {}), '(_sq)\n', (1098, 1103), True, 'import numpy as np\n'), ((1281, 1345), 'wandb.log', 'wandb.log', (["{'lr': optimizer.param_groups[0]['lr']}"], {'commit': '(False)'}), "({'lr': optimizer.param_groups[0]['lr']}, commit=False)\n", (1290, 1345), False, 'import wandb\n'), ((2506, 2563), 'wandb.log', 'wandb.log', (["{'Train Epoch Loss': loss_epoch}"], {'commit': '(False)'}), "({'Train Epoch Loss': loss_epoch}, commit=False)\n", (2515, 2563), False, 'import wandb\n'), ((3867, 3917), 'wandb.log', 'wandb.log', (["{'Test loss': loss_epoch}"], {'commit': '(False)'}), "({'Test loss': loss_epoch}, commit=False)\n", (3876, 3917), False, 'import wandb\n'), ((4002, 4074), 'wandb.log', 'wandb.log', (["{'Const. BL pos_mse': acc_epoch_blc['pos_mse']}"], {'commit': '(False)'}), "({'Const. BL pos_mse': acc_epoch_blc['pos_mse']}, commit=False)\n", (4011, 4074), False, 'import wandb\n'), ((4079, 4151), 'wandb.log', 'wandb.log', (["{'Linear BL pos_mse': acc_epoch_bll['pos_mse']}"], {'commit': '(False)'}), "({'Linear BL pos_mse': acc_epoch_bll['pos_mse']}, commit=False)\n", (4088, 4151), False, 'import wandb\n'), ((4156, 4228), 'wandb.log', 'wandb.log', (["{'Linear BL vel_mse': acc_epoch_bll['vel_mse']}"], {'commit': '(False)'}), "({'Linear BL vel_mse': acc_epoch_bll['vel_mse']}, commit=False)\n", (4165, 4228), False, 'import wandb\n'), ((4503, 4520), 'dgl.batch', 'dgl.batch', (['graphs'], {}), '(graphs)\n', (4512, 4520), False, 'import dgl\n'), ((4653, 4684), 'experiments.nbody.nbody_dataloader.RIDataset', 'RIDataset', (['FLAGS'], {'split': '"""train"""'}), "(FLAGS, split='train')\n", (4662, 4684), False, 'from experiments.nbody.nbody_dataloader import RIDataset\n'), ((4704, 4843), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'FLAGS.batch_size', 'shuffle': '(True)', 'collate_fn': 'collate', 'num_workers': 'FLAGS.num_workers', 'drop_last': '(True)'}), '(train_dataset, batch_size=FLAGS.batch_size, shuffle=True,\n collate_fn=collate, num_workers=FLAGS.num_workers, drop_last=True)\n', (4714, 4843), False, 'from torch.utils.data import DataLoader\n'), ((5010, 5040), 'experiments.nbody.nbody_dataloader.RIDataset', 'RIDataset', (['FLAGS'], {'split': '"""test"""'}), "(FLAGS, split='test')\n", (5019, 5040), False, 'from experiments.nbody.nbody_dataloader import RIDataset\n'), ((5129, 5268), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'batch_size': 'FLAGS.batch_size', 'shuffle': '(False)', 'collate_fn': 'collate', 'num_workers': 'FLAGS.num_workers', 'drop_last': '(True)'}), '(test_dataset, batch_size=FLAGS.batch_size, shuffle=False,\n collate_fn=collate, num_workers=FLAGS.num_workers, drop_last=True)\n', (5139, 5268), False, 'from torch.utils.data import DataLoader\n'), ((6278, 6390), 'utils.utils_logging.write_info_file', 'utils_logging.write_info_file', (['model'], {'FLAGS': 'FLAGS', 'UNPARSED_ARGV': 'UNPARSED_ARGV', 'wandb_log_dir': 'wandb.run.dir'}), '(model, FLAGS=FLAGS, UNPARSED_ARGV=\n UNPARSED_ARGV, wandb_log_dir=wandb.run.dir)\n', (6307, 6390), False, 'from utils import utils_logging\n'), ((6607, 6702), 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', 'optim.lr_scheduler.CosineAnnealingWarmRestarts', (['optimizer', 'FLAGS.num_epochs'], {'eta_min': '(0.0001)'}), '(optimizer, FLAGS.num_epochs,\n eta_min=0.0001)\n', (6653, 6702), False, 'from torch import optim\n'), ((6713, 6725), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (6723, 6725), True, 'import torch.nn as nn\n'), ((6828, 6876), 'os.path.join', 'os.path.join', (['FLAGS.save_dir', "(FLAGS.name + '.pt')"], {}), "(FLAGS.save_dir, FLAGS.name + '.pt')\n", (6840, 6876), False, 'import os\n'), ((7268, 7279), 'experiments.nbody.nbody_flags.get_flags', 'get_flags', ([], {}), '()\n', (7277, 7279), False, 'from experiments.nbody.nbody_flags import get_flags\n'), ((7284, 7326), 'os.makedirs', 'os.makedirs', (['FLAGS.save_dir'], {'exist_ok': '(True)'}), '(FLAGS.save_dir, exist_ok=True)\n', (7295, 7326), False, 'import os\n'), ((7360, 7434), 'wandb.init', 'wandb.init', ([], {'project': '"""equivariant-attention"""', 'name': 'FLAGS.name', 'config': 'FLAGS'}), "(project='equivariant-attention', name=FLAGS.name, config=FLAGS)\n", (7370, 7434), False, 'import wandb\n'), ((7439, 7458), 'wandb.save', 'wandb.save', (['"""*.txt"""'], {}), "('*.txt')\n", (7449, 7458), False, 'import wandb\n'), ((937, 949), 'numpy.mean', 'np.mean', (['_sq'], {}), '(_sq)\n', (944, 949), True, 'import numpy as np\n'), ((1530, 1560), 'torch.stack', 'torch.stack', (['[x_T, v_T]'], {'dim': '(1)'}), '([x_T, v_T], dim=1)\n', (1541, 1560), False, 'import torch\n'), ((1741, 1758), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (1752, 1758), False, 'import torch\n'), ((3384, 3406), 'torch.zeros_like', 'torch.zeros_like', (['pred'], {}), '(pred)\n', (3400, 3406), False, 'import torch\n'), ((3945, 3999), 'wandb.log', 'wandb.log', (["{('Test ' + k): acc_epoch[k]}"], {'commit': '(False)'}), "({('Test ' + k): acc_epoch[k]}, commit=False)\n", (3954, 3999), False, 'import wandb\n'), ((4338, 4359), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (4353, 4359), True, 'import numpy as np\n'), ((4376, 4391), 'numpy.linalg.qr', 'np.linalg.qr', (['M'], {}), '(M)\n', (4388, 4391), True, 'import numpy as np\n'), ((4547, 4562), 'torch.stack', 'torch.stack', (['y1'], {}), '(y1)\n', (4558, 4562), False, 'import torch\n'), ((4564, 4579), 'torch.stack', 'torch.stack', (['y2'], {}), '(y2)\n', (4575, 4579), False, 'import torch\n'), ((5994, 6026), 'experiments.nbody.nbody_models.__dict__.get', 'models.__dict__.get', (['FLAGS.model'], {}), '(FLAGS.model)\n', (6013, 6026), True, 'from experiments.nbody import nbody_models as models\n'), ((1795, 1810), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1808, 1810), False, 'import pdb, traceback\n'), ((2360, 2370), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2368, 2370), False, 'import sys\n'), ((6451, 6476), 'torch.load', 'torch.load', (['FLAGS.restore'], {}), '(FLAGS.restore)\n', (6461, 6476), False, 'import torch\n'), ((7589, 7610), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7608, 7610), False, 'import pdb, traceback\n'), ((7619, 7636), 'pdb.post_mortem', 'pdb.post_mortem', ([], {}), '()\n', (7634, 7636), False, 'import pdb, traceback\n'), ((3025, 3055), 'torch.stack', 'torch.stack', (['[x_T, v_T]'], {'dim': '(1)'}), '([x_T, v_T], dim=1)\n', (3036, 3055), False, 'import torch\n')]
|
from keras import applications
import keras
import numpy as np
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt
from keras.applications.imagenet_utils import decode_predictions
import os
from keras.models import model_from_json
from keras.models import load_model
import sys
import requests
import io
from PIL import Image
import boto3
import json
os.environ['KMP_DUPLICATE_LIB_OK']='True'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# model = applications.resnet50.ResNet50(
# include_top=True,
# weights='imagenet',
# input_tensor=None,
# input_shape=None,
# pooling=None,
# classes=1000)
# # serialize model to HDF5
# model.save("model.h5")
# print("Saved model to disk")
# filename = 'cat.jpeg'
# print('PIL image size = ', original_image.size)
# print('NumPy image size = ', numpy_image.shape)
# print('Input image size = ', input_image.shape)
# plt.imshow(np.uint8(input_image[0]))
def predict_keras(model_fname, img):
loaded_model, processed_image = load_keras(model_fname, img)
# resnet50
predictions_resnet50 = loaded_model.predict(processed_image)
label_resnet50 = decode_predictions(predictions_resnet50)
preds = {}
preds["label"] = str(label_resnet50[0][0][1])
preds["confidence"] = str(label_resnet50[0][0][2])
preds_json = json.dumps(preds)
print(preds_json)
def load_keras(model_fname, img):
img = img.resize((224, 224))
# load model
loaded_model = load_model(model_fname)
# convert the PIL image (width, height) to a NumPy array (height, width, channel)
numpy_image = img_to_array(img)
# Convert the image into 4D Tensor (samples, height, width, channels) by adding an extra dimension to the axis 0.
input_image = np.expand_dims(numpy_image, axis=0)
#preprocess for resnet50
processed_image_resnet50 = applications.resnet50.preprocess_input(input_image.copy())
return loaded_model, processed_image_resnet50
|
[
"keras.models.load_model",
"keras.applications.imagenet_utils.decode_predictions",
"numpy.expand_dims",
"json.dumps",
"keras.preprocessing.image.img_to_array"
] |
[((1187, 1227), 'keras.applications.imagenet_utils.decode_predictions', 'decode_predictions', (['predictions_resnet50'], {}), '(predictions_resnet50)\n', (1205, 1227), False, 'from keras.applications.imagenet_utils import decode_predictions\n'), ((1353, 1370), 'json.dumps', 'json.dumps', (['preds'], {}), '(preds)\n', (1363, 1370), False, 'import json\n'), ((1486, 1509), 'keras.models.load_model', 'load_model', (['model_fname'], {}), '(model_fname)\n', (1496, 1509), False, 'from keras.models import load_model\n'), ((1608, 1625), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1620, 1625), False, 'from keras.preprocessing.image import img_to_array\n'), ((1756, 1791), 'numpy.expand_dims', 'np.expand_dims', (['numpy_image'], {'axis': '(0)'}), '(numpy_image, axis=0)\n', (1770, 1791), True, 'import numpy as np\n')]
|
"""
numpy and scipy based backend.
Transparently handles scipy.sparse matrices as input.
"""
from __future__ import division, absolute_import
import numpy as np
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
def inv(matrix):
"""
Calculate the inverse of a matrix.
Uses the standard ``numpy.linalg.inv`` if *matrix* is dense. If it is
sparse (from ``scipy.sparse``) then will use ``scipy.sparse.linalg.inv``.
"""
if scipy.sparse.issparse(matrix):
return scipy.sparse.linalg.inv(matrix)
else:
return scipy.linalg.inv(matrix)
def solve(matrix, vector):
"""
Solve a linear system.
"""
if scipy.sparse.issparse(matrix) or scipy.sparse.issparse(vector):
estimate, status = scipy.sparse.linalg.cg(matrix, vector)
if status >= 0:
return estimate
else:
raise ValueError('CGS exited with input error')
else:
return scipy.linalg.solve(matrix, vector)
def dot(a, b):
"""
Make the dot product using the appropriate method.
"""
return a.dot(b)
def diagonal(matrix):
"""
Get the diagonal of a matrix using the appropriate method.
"""
if scipy.sparse.issparse(matrix):
return np.array(matrix.diagonal())
else:
return np.diagonal(matrix)
|
[
"numpy.diagonal"
] |
[((1303, 1322), 'numpy.diagonal', 'np.diagonal', (['matrix'], {}), '(matrix)\n', (1314, 1322), True, 'import numpy as np\n')]
|
from PIL import Image
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import precision_recall_curve
import matplotlib.pyplot as plt
from inspect import signature
from scipy import ndimage
from numpy import random,argsort,sqrt
from sklearn.metrics import jaccard_score, recall_score, precision_score
#from scipy.ndimage import erosion
from skimage.morphology import erosion, disk
from scipy.ndimage.morphology import distance_transform_edt
### im1, im2 should be 2D grayscale ndarray, range is (0, 255) unnormalized.
### You can use "im1 = np.array(Image.open("ILSVRC2012_test_00000025.png").convert("L"))"
### im1 = GT salient map
### im2 = pred. salient map
def getMAE(im1, im2):
h, w = im1.shape
im_diff = im1 - im2
im_diff = np.absolute(im_diff)
im_sum = np.sum(im_diff)
mae = im_sum / (h * w)
return mae
### im1 = GT salient map
### im2 = pred. salient map
def getPRCurve(im1, im2):
im1 = im1 / 255.0 ### Normalize
im2 = im2 / 255.0
if im1.shape != im2.shape:
print("im1 and im2 don't have the same shape!")
return
h, w = im1.shape
n1 = im1.flatten()
n2 = im2.flatten()
### Binarized map
n1[n1 > 0] = 1
n2[n2 > 0] = 1
n1 = n1.astype(int)
n2 = n2.astype(int)
average_precision = average_precision_score(n1, n2)
precision, recall, _ = precision_recall_curve(n1, n2)
return precision, recall
# step_kwargs = ({'step': 'post'}
# if 'step' in signature(plt.fill_between).parameters
# else {})
# plt.step(recall, precision, color='b', alpha=0.1, where='post')
# plt.fill_between(recall, precision, alpha=0.1, color='b', **step_kwargs)
# plt.xlabel('Recall')
# plt.ylabel('Precision')
# plt.ylim([0.0, 1.05])
# plt.xlim([0.0, 1.0])
# plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(average_precision))
# plt.show()
### Returns two numbers only!
### Returns the precision and recall of two images
### im1 = GT salient map
### im2 = pred. salient map
# def getImagePrecRecall(im1, im2):
# im1 = im1 / 255.0 ### Normalize
# im2 = im2 / 255.0
# if im1.shape != im2.shape:
# print("im1 and im2 don't have the same shape!")
# return
# h, w = im1.shape
# n1 = im1.flatten()
# n2 = im2.flatten()
# n1[n1 >= 0.5] = 1
# n2[n2 >= 0.5] = 1
# n1 = n1.astype(int)
# n2 = n2.astype(int)
# prec = precision_score(n1, n2, average='macro')
# rec = recall_score(n1, n2, average='macro')
# return prec, rec
### Compute precision, recall from getPRCurve
### precision and recall should be single numbers, not arrays!
### fmeasure - should only be a single number
def getMaxFMeasure(precision, recall):
beta2 = 0.3
denom = (beta2 * precision + recall)
denom[denom <= 0] = 100000
fmeasure = ((1+beta2)* precision * recall) / denom
return fmeasure
def knn_search(x, D, K):
""" find K nearest neighbours of data among D """
ndata = D.shape[1]
K = K if K < ndata else ndatagetPRCurve
# euclidean distances from the other points
sqd = sqrt(((D - x[:,:ndata])**2).sum(axis=0))
idx = argsort(sqd) # sorting
# return the indexes of K nearest neighbours and the euclidean distance of the nearest points.
# sqd[idx[:K][0]] = 1 means that the nearest point picked from D to x has euclidean distance = 1
return idx[:K], sqd[idx[:K][0]]
def boundary_extraction(mask):
(h,w) = mask.shape
mask_pad = np.zeros((h+10,w+10))
mask_pad[5:h+5,5:w+5] = mask
mask_pad_erd = erosion(mask_pad,disk(1))
mask_pad_edge = np.logical_xor(mask_pad, mask_pad_erd)
return mask_pad_edge[5:h+5,5:w+5]
def compute_align_error(gt,pd):
gt_bw = np.zeros(gt.shape)
pd_bw = np.zeros(pd.shape)
#binaries gt
gte = 127 #2*np.mean(gte)
gt_bw[gt>gte]=1
gt_bw[gt<=gte]=0
#binaries pd
pde = 127
pd_bw[pd>pde]=1
pd_bw[pd<=pde]=0
gt_edge = boundary_extraction(gt_bw)
pd_edge = boundary_extraction(pd_bw)
gt_dist = distance_transform_edt(np.logical_not(gt_edge))
pd_dist = distance_transform_edt(np.logical_not(pd_edge))
buffer = 3
#precision
pd_edge_buffer = np.zeros(pd_edge.shape)
pd_edge_buffer = pd_edge.copy()
try:
pd_edge_buffer[gt_dist>buffer] = 0
except:
return 0.0, 0.0, 0.0
#recall
gt_edge_buffer = np.zeros(gt_edge.shape)
gt_edge_buffer = gt_edge.copy()
try:
gt_edge_buffer[pd_dist>buffer] = 0
except:
return 0.0, 0.0, 0.0
precision_edge =np.sum(pd_edge_buffer).astype(np.float)/(np.sum(pd_edge).astype(np.float)+1e-8)
recall_edge =np.sum(gt_edge_buffer).astype(np.float)/(np.sum(gt_edge).astype(np.float)+1e-8)
f1_edge =(1+0.3)*precision_edge*recall_edge/(0.3*precision_edge+recall_edge+1e-8)
return precision_edge, recall_edge, f1_edge #meanAl
def own_RelaxedFMeasure(im1,im2): ##own version of relaxed measure based from basnet forum
rprecission,rrecall,rrfmeasure=compute_align_error(im1,im2)
return rrfmeasure
def getRelaxedFMeasure(im1, im2):
#im1 = im1 / 255.0 ### Normalize
#im2 = im2 / 255.0
#if im1.shape != im2.shape:
# print("im1 and im2 don't have the same shape!")
# return
#h, w = im1.shape
### Binarized map
#im1[im1 >= 0.5] = 1
#im2[im2 >= 0.5] = 1
#im1[im1 < 1] = 0
#im2[im2 < 1] = 0
### Get one-pixel boundary
gt_onepix_mask = np.logical_xor(im1, ndimage.binary_erosion(im1).astype(im1.dtype))
pred_onepix_mask = np.logical_xor(im2, ndimage.binary_erosion(im2).astype(im2.dtype))
# pilBrr = im2 * 255.0
# pilBImg = Image.fromarray(pilBrr)
# pilBImg.show()
### Will return a tuple of (array([0, 1, 1]), array([1, 0, 1])) where array([0, 1, 1]) are the row indices and col indices, repectively.
### For example, the value gt_ones_px_coords[0][0]=0 and gt_ones_px_coords[1][0]=0 gives out the coords (in index form) of the 2D image in x,y (row, col) form.
### In this coord, there is a corresponding white pixel = 1 in the GT saliency map. The tuple's first and second arrays will always have the same length (obviously).
gt_ones_px_coords = np.where(gt_onepix_mask == 1)
pred_onepix_mask = np.where(pred_onepix_mask == 1)
if len(gt_ones_px_coords[0]) == 0:
print("gt_ones_px_coords has no white pixel boundary")
exit()
if len(pred_onepix_mask[0]) == 0:
print("pred_onepix_mask has no white pixel boundary")
exit()
stacked_gt_whitepx_coords = np.vstack((gt_ones_px_coords[0], gt_ones_px_coords[1])) ### Stack everything into a (2, n) ndarray
stacked_pred_whitepx_coords = np.vstack((pred_onepix_mask[0], pred_onepix_mask[1])) ### Stack everything into a (2, n) ndarray
rho_px = 3 ### In BASNet paper. For a true positive to happen, dist between gt and pred pixel should be less than rho_px or less
### Compute relaxed precision = fraction of predicted boundary pixels within a range of ρ=3 pixels from ground truth boundary pixels
relaxed_precTP = 0
for idx_pixcoord in range(0, stacked_pred_whitepx_coords.shape[1]): ### Iterate all 2D pixels
_, nearest_px_dist = knn_search(stacked_pred_whitepx_coords[:,idx_pixcoord].reshape((2,1)), stacked_gt_whitepx_coords, 1)
if nearest_px_dist <= rho_px: relaxed_precTP += 1 ### compare distance of the nearest pixel
relaxed_prec = relaxed_precTP / stacked_gt_whitepx_coords.shape[1]
### Compute relaxed recall = fraction of ground truth boundary pixels that are within ρ=3 pixels of predicted boundary pixels
relaxed_recTP = 0
for idx_pixcoord in range(0, stacked_gt_whitepx_coords.shape[1]): ### Iterate all 2D pixels
_, nearest_px_dist = knn_search(stacked_gt_whitepx_coords[:,idx_pixcoord].reshape((2,1)), stacked_pred_whitepx_coords, 1)
if nearest_px_dist <= rho_px: relaxed_recTP += 1 ### compare distance of the nearest pixel
relaxed_rec = relaxed_recTP / stacked_pred_whitepx_coords.shape[1]
### Calculate final f-measure
beta2 = 0.3
if beta2 * relaxed_prec + relaxed_rec == 0: return 0
fmeasure = ((1+beta2)* relaxed_prec * relaxed_rec) / (beta2 * relaxed_prec + relaxed_rec)
return fmeasure
# imA = np.array(Image.open("./DUTS/DUTS-TE/DUTS-TE-Mask/ILSVRC2012_test_00000003.png").convert("L"))
# imB = np.array(Image.open("./DUTS/DUTS-TE/DUTS-TE-STRUCT/ILSVRC2012_test_00000003.png").convert("L"))
# # precision, recall = getPRCurve(imA, imB)
# #prec, rec = getPRCurve(imA, imB)
# #fmeasure = getMaxFMeasure(prec, rec)
# #mae = getMAE(imA,imB)
# #print("prec: ", prec)
# #print("rec: ", rec)
# #print("fmeasure: ", fmeasure)
# #print("mae: ", mae)
# x=own_RelaxedFMeasure(imA,imB)
# print(x)
|
[
"numpy.absolute",
"numpy.sum",
"scipy.ndimage.binary_erosion",
"numpy.logical_not",
"numpy.zeros",
"skimage.morphology.disk",
"sklearn.metrics.precision_recall_curve",
"numpy.argsort",
"numpy.logical_xor",
"numpy.where",
"sklearn.metrics.average_precision_score",
"numpy.vstack"
] |
[((785, 805), 'numpy.absolute', 'np.absolute', (['im_diff'], {}), '(im_diff)\n', (796, 805), True, 'import numpy as np\n'), ((819, 834), 'numpy.sum', 'np.sum', (['im_diff'], {}), '(im_diff)\n', (825, 834), True, 'import numpy as np\n'), ((1316, 1347), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['n1', 'n2'], {}), '(n1, n2)\n', (1339, 1347), False, 'from sklearn.metrics import average_precision_score\n'), ((1375, 1405), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['n1', 'n2'], {}), '(n1, n2)\n', (1397, 1405), False, 'from sklearn.metrics import precision_recall_curve\n'), ((3196, 3208), 'numpy.argsort', 'argsort', (['sqd'], {}), '(sqd)\n', (3203, 3208), False, 'from numpy import random, argsort, sqrt\n'), ((3544, 3570), 'numpy.zeros', 'np.zeros', (['(h + 10, w + 10)'], {}), '((h + 10, w + 10))\n', (3552, 3570), True, 'import numpy as np\n'), ((3665, 3703), 'numpy.logical_xor', 'np.logical_xor', (['mask_pad', 'mask_pad_erd'], {}), '(mask_pad, mask_pad_erd)\n', (3679, 3703), True, 'import numpy as np\n'), ((3789, 3807), 'numpy.zeros', 'np.zeros', (['gt.shape'], {}), '(gt.shape)\n', (3797, 3807), True, 'import numpy as np\n'), ((3820, 3838), 'numpy.zeros', 'np.zeros', (['pd.shape'], {}), '(pd.shape)\n', (3828, 3838), True, 'import numpy as np\n'), ((4261, 4284), 'numpy.zeros', 'np.zeros', (['pd_edge.shape'], {}), '(pd_edge.shape)\n', (4269, 4284), True, 'import numpy as np\n'), ((4448, 4471), 'numpy.zeros', 'np.zeros', (['gt_edge.shape'], {}), '(gt_edge.shape)\n', (4456, 4471), True, 'import numpy as np\n'), ((6260, 6289), 'numpy.where', 'np.where', (['(gt_onepix_mask == 1)'], {}), '(gt_onepix_mask == 1)\n', (6268, 6289), True, 'import numpy as np\n'), ((6313, 6344), 'numpy.where', 'np.where', (['(pred_onepix_mask == 1)'], {}), '(pred_onepix_mask == 1)\n', (6321, 6344), True, 'import numpy as np\n'), ((6609, 6664), 'numpy.vstack', 'np.vstack', (['(gt_ones_px_coords[0], gt_ones_px_coords[1])'], {}), '((gt_ones_px_coords[0], gt_ones_px_coords[1]))\n', (6618, 6664), True, 'import numpy as np\n'), ((6742, 6795), 'numpy.vstack', 'np.vstack', (['(pred_onepix_mask[0], pred_onepix_mask[1])'], {}), '((pred_onepix_mask[0], pred_onepix_mask[1]))\n', (6751, 6795), True, 'import numpy as np\n'), ((3636, 3643), 'skimage.morphology.disk', 'disk', (['(1)'], {}), '(1)\n', (3640, 3643), False, 'from skimage.morphology import erosion, disk\n'), ((4122, 4145), 'numpy.logical_not', 'np.logical_not', (['gt_edge'], {}), '(gt_edge)\n', (4136, 4145), True, 'import numpy as np\n'), ((4184, 4207), 'numpy.logical_not', 'np.logical_not', (['pd_edge'], {}), '(pd_edge)\n', (4198, 4207), True, 'import numpy as np\n'), ((4624, 4646), 'numpy.sum', 'np.sum', (['pd_edge_buffer'], {}), '(pd_edge_buffer)\n', (4630, 4646), True, 'import numpy as np\n'), ((4722, 4744), 'numpy.sum', 'np.sum', (['gt_edge_buffer'], {}), '(gt_edge_buffer)\n', (4728, 4744), True, 'import numpy as np\n'), ((5536, 5563), 'scipy.ndimage.binary_erosion', 'ndimage.binary_erosion', (['im1'], {}), '(im1)\n', (5558, 5563), False, 'from scipy import ndimage\n'), ((5626, 5653), 'scipy.ndimage.binary_erosion', 'ndimage.binary_erosion', (['im2'], {}), '(im2)\n', (5648, 5653), False, 'from scipy import ndimage\n'), ((4665, 4680), 'numpy.sum', 'np.sum', (['pd_edge'], {}), '(pd_edge)\n', (4671, 4680), True, 'import numpy as np\n'), ((4763, 4778), 'numpy.sum', 'np.sum', (['gt_edge'], {}), '(gt_edge)\n', (4769, 4778), True, 'import numpy as np\n')]
|
import uuid
import json
import pandas as pd
import numpy as np
from gibbon.utility import Convert
class Buildings:
def __init__(self, sensor, path=None):
self.sensor = sensor
self.df = None
self.selected = None
if path:
self.load_dataframe(path)
def load_dataframe(self, path):
buildings = list()
with open(path, 'r', encoding='utf-8') as f:
data = json.load(f)
features = data['features']
for f in features:
try:
floors = f['properties']['Floor']
coords = f['geometry']['coordinates'][0][0][:-1]
building = {'coords': coords, 'floors': floors}
buildings.append(building)
except Exception as e:
pass
uids = [uuid.uuid4() for i in range(len(buildings))]
df = pd.DataFrame(buildings, index=uids)
f = np.vectorize(lambda a, b: np.average(np.array(a), axis=0).tolist()[b])
df['lng'], df['lat'] = f(df['coords'], 0), f(df['coords'], 1)
self.df = df
def create(self):
def f(lnglats):
return [Convert.lnglat_to_mercator(ll, self.sensor.origin) for ll in lnglats]
if self.df is not None:
selected = self.df[
(self.df['lng'] > self.sensor.llbounds[0][0]) &
(self.df['lng'] < self.sensor.llbounds[1][0]) &
(self.df['lat'] > self.sensor.llbounds[0][1]) &
(self.df['lat'] < self.sensor.llbounds[1][1])
]
selected['position'] = selected['coords'].apply(f)
selected['height'] = selected['floors'] * 3000
self.selected = selected
@property
def extrusion(self):
return [
{
'tp': 'extrude',
'l': data['position'],
'h': data['height']
} for i, data in self.selected.iterrows()
]
def dump_extrusion(self, path):
with open(path, 'w', encoding='utf-8') as f:
json.dump(self.extrusion, f)
if __name__ == '__main__':
from gibbon.maps import MapSensor
path = r'F:\02_projects\YangDaShiTouBiao\geojson\Changsha.geojson'
path_out = r'F:\02_projects\YangDaShiTouBiao\geojson\YangDaShiTouBiao.json'
origin = [113.058780, 28.201170]
radius = 2000
sensor = MapSensor(origin, radius)
bmap = Buildings(sensor, path)
print(bmap.df)
bmap.create()
bmap.dump_extrusion(path_out)
|
[
"pandas.DataFrame",
"json.dump",
"json.load",
"gibbon.maps.MapSensor",
"uuid.uuid4",
"gibbon.utility.Convert.lnglat_to_mercator",
"numpy.array"
] |
[((2421, 2446), 'gibbon.maps.MapSensor', 'MapSensor', (['origin', 'radius'], {}), '(origin, radius)\n', (2430, 2446), False, 'from gibbon.maps import MapSensor\n'), ((918, 953), 'pandas.DataFrame', 'pd.DataFrame', (['buildings'], {'index': 'uids'}), '(buildings, index=uids)\n', (930, 953), True, 'import pandas as pd\n'), ((434, 446), 'json.load', 'json.load', (['f'], {}), '(f)\n', (443, 446), False, 'import json\n'), ((860, 872), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (870, 872), False, 'import uuid\n'), ((2104, 2132), 'json.dump', 'json.dump', (['self.extrusion', 'f'], {}), '(self.extrusion, f)\n', (2113, 2132), False, 'import json\n'), ((1196, 1246), 'gibbon.utility.Convert.lnglat_to_mercator', 'Convert.lnglat_to_mercator', (['ll', 'self.sensor.origin'], {}), '(ll, self.sensor.origin)\n', (1222, 1246), False, 'from gibbon.utility import Convert\n'), ((1004, 1015), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1012, 1015), True, 'import numpy as np\n')]
|
# coding: utf8
import copy
import os
import pytest
import numpy as np
import numpy.testing as npt
import openturns as ot
import matplotlib.pyplot as plt
from batman.space import (Space, Doe, dists_to_ot)
from batman.functions import Ishigami
from batman.surrogate import SurrogateModel
from batman.space.refiner import Refiner
def test_dists_to_ot():
dists = dists_to_ot(['Uniform(12, 15)', 'Normal(400, 10)'])
out = [ot.Uniform(12, 15), ot.Normal(400, 10)]
assert dists == out
with pytest.raises(AttributeError):
dists_to_ot(['Uniorm(12, 15)'])
def test_space(settings_ishigami, seed):
corners = settings_ishigami['space']['corners']
space = Space(corners)
assert space.max_points_nb == np.inf
space = Space(corners, sample=10)
assert space.max_points_nb == 10
space = Space(corners, sample=10, nrefine=6,
plabels=['x', 'y', 'z'])
assert space.max_points_nb == 16
space += (1, 2, 3)
npt.assert_array_equal(space.values, [(1, 2, 3)])
space.empty()
npt.assert_array_equal(space.values, np.empty((0, 3)))
space += [(1, 2, 3), (1, 1, 3)]
npt.assert_array_equal(space.values, [(1, 2, 3), (1, 1, 3)])
space2 = Space(corners, space.values)
npt.assert_array_equal(space2.values, [(1, 2, 3), (1, 1, 3)])
s1 = space.sampling()
assert len(s1) == 10
space2 = Space(corners,
sample=settings_ishigami['space']['sampling']['init_size'],
nrefine=settings_ishigami['space']['resampling']['resamp_size'])
s2 = space2.sampling(10, kind='lhsc')
assert len(s2) == 10
assert np.any(s1 != s2)
space.empty()
space += (1, 2, 3)
space += (1, 2, 3)
assert len(space) == 1
space = Space(corners, sample=16, duplicate=True)
space += (1, 2, 3)
space += (1, 2, 3)
assert len(space) == 2
with pytest.raises(ValueError):
space += (1, 2)
assert len(space) == 2
space += (1, 7, 3)
assert len(space) == 2
space.sampling(17)
assert len(space) == 16
space.empty()
dists = ['Uniform(0., 1.)', 'Uniform(-1., 2.)', 'Uniform(-2., 3.)']
space.sampling(5, kind='halton', dists=dists)
out = [(0.5, 0.0, -1.0), (0.25, 1.0, 0.0), (0.75, -0.67, 1.0),
(0.125, 0.33, 2.0), (0.625, 1.33, -1.8)]
npt.assert_almost_equal(space, out, decimal=1)
space = Space(corners, sample=np.array([(1, 2, 3), (1, 1, 3)]))
assert space.doe_init == 2
assert space.max_points_nb == 2
test_settings = copy.deepcopy(settings_ishigami)
test_settings['space']['corners'][1] = [np.pi, -np.pi, np.pi]
with pytest.raises(ValueError):
Space(test_settings['space']['corners'])
def test_space_evaluation(settings_ishigami):
f_3d = Ishigami()
space = Space(settings_ishigami['space']['corners'])
space.sampling(2, 'halton')
targets_space = f_3d(space)
f_data_base = np.array([5.25, 4.2344145]).reshape(2, 1)
npt.assert_almost_equal(targets_space, f_data_base)
def test_doe(seed):
bounds = np.array([[0, 2], [10, 5]])
n = 5
doe = Doe(n, bounds, 'uniform', discrete=0)
sample = doe.generate()
out = [[0., 2.], [10., 2.], [0., 5.], [10., 5.]]
npt.assert_almost_equal(sample, out, decimal=1)
doe = Doe(n, bounds, 'halton', discrete=0)
sample = doe.generate()
out = [[5., 3.], [2., 4.], [8., 2.3], [1., 3.3], [6., 4.3]]
npt.assert_almost_equal(sample, out, decimal=1)
doe = Doe(n, bounds, 'halton', discrete=1)
sample = doe.generate()
out = [[5, 3], [2.5, 4], [7.5, 2], [1.25, 3], [6.25, 5]]
npt.assert_almost_equal(sample, out, decimal=1)
doe = Doe(n, np.array([[0, 2, -2], [10, 5, -1]]), 'halton', discrete=1)
sample = doe.generate()
out = [[5, 3, -1.8], [2.5, 4, -1.6], [7.5, 2, -1.4],
[1.25, 3, -1.2], [6.25, 5, -1.96]]
npt.assert_almost_equal(sample, out, decimal=1)
doe = Doe(n, bounds, 'halton')
sample = doe.generate()
out = [[5., 3.], [2.5, 4.], [7.5, 2.3], [1.25, 3.3], [6.25, 4.3]]
npt.assert_almost_equal(sample, out, decimal=1)
doe = Doe(n, bounds, 'sobolscramble', discrete=0)
sample = doe.generate()
doe = Doe(n, bounds, 'olhs')
sample = doe.generate()
out = [[6.149, 2.343], [9.519, 3.497], [1.991, 4.058],
[5.865, 4.995], [2.551, 2.737]]
npt.assert_almost_equal(sample, out, decimal=1)
bounds = [[15.0, 2500.0], [60.0, 6000.0]]
with pytest.raises(AttributeError):
dists = ['Um(15., 60.)', 'Normal(4035., 400.)']
doe = Doe(n, bounds, 'halton', dists)
dists = ['Uniform(15., 60.)', 'Normal(4035., 400.)']
doe = Doe(n, bounds, 'halton', dists)
sample = doe.generate()
out = np.array([[37.5, 3862.709], [26.25, 4207.291], [48.75, 3546.744],
[20.625, 3979.116], [43.125, 4340.884]])
npt.assert_almost_equal(sample, out, decimal=1)
dists = ['Uniform(15., 60.)', 'Normal(4035., 400.)']
doe = Doe(13, bounds, 'saltelli', dists)
sample = doe.generate()
assert (len(sample) == 12) or (len(sample) == 8)
doe = Doe(10, bounds, 'saltelli', dists)
sample = doe.generate()
assert (len(sample) == 6) or (len(sample) == 4)
def plot_hypercube(hypercube):
"""Plot an hypercube.
:param array_like hypercube ([min, n_features], [max, n_features]).
"""
hypercube = hypercube.T
plt.plot([hypercube[0, 0], hypercube[0, 0],
hypercube[0, 0], hypercube[1, 0],
hypercube[1, 0], hypercube[1, 0],
hypercube[0, 0], hypercube[1, 0]],
[hypercube[0, 1], hypercube[1, 1],
hypercube[1, 1], hypercube[1, 1],
hypercube[1, 1], hypercube[0, 1],
hypercube[0, 1], hypercube[0, 1]])
@pytest.mark.xfail(raises=AssertionError, reason='Global optimization')
def test_refiner_basics(tmp, branin_data, settings_ishigami, seed):
f_2d = branin_data.func
space = branin_data.space
space.sampling(11, 'halton')
surrogate = SurrogateModel('kriging', space.corners, space.plabels)
surrogate.fit(space, f_2d(space))
refiner = Refiner(surrogate, space.corners, delta_space=0.08)
distance_min = refiner.distance_min(refiner.space.values[0])
assert distance_min == pytest.approx(0.163461, abs=0.001)
hypercube = refiner.hypercube_distance(refiner.space.values[0], distance_min)
npt.assert_almost_equal(hypercube, [[-0.62, 3.62], [3.04, 6.96]], decimal=2)
hypercube_optim = refiner.hypercube_optim(refiner.space.values[0])
npt.assert_almost_equal(hypercube_optim,
[[-0.61, 5.74], [1.0, 11.66]], decimal=2)
# Plotting
# import os
# import itertools
# import matplotlib.pyplot as plt
# from matplotlib import cm
# num = 25
# x = np.linspace(-7, 10, num=num)
# y = np.linspace(0, 15, num=num)
# points = np.array([(float(i), float(j)) for i, j in itertools.product(x, y)])
# x = points[:, 0].flatten()
# y = points[:, 1].flatten()
# pred, _ = surrogate(points)
# pred = np.array(pred).flatten()
# space = np.array(space[:])
# plt.figure()
# plt.tricontourf(x, y, pred, antialiased=True, cmap=cm.viridis)
# cbar = plt.colorbar()
# cbar.set_label(r'$f(x_1, x_2)$')
# plt.scatter(space[:11, 0], space[:11, 1], label='initial sample')
# # plt.scatter(space[4, 0], space[4, 1], label='Anchor point')
# plot_hypercube(refiner.corners)
# plot_hypercube(hypercube)
# plot_hypercube(hypercube_optim)
# plt.xlabel(r'$x_1$', fontsize=24)
# plt.ylabel(r'$x_2$', fontsize=24)
# plt.tick_params(axis='y')
# for txt, point in enumerate(space):
# plt.annotate(txt, point, xycoords='offset points')
# plt.legend(fontsize=21, bbox_to_anchor=(1.3, 1), borderaxespad=0)
# plt.show()
@pytest.mark.xfail(raises=AssertionError, reason='Global optimization')
def test_resampling(tmp, branin_data, settings_ishigami, seed):
f_2d = branin_data.func
space = branin_data.space
test_settings = copy.deepcopy(settings_ishigami)
test_settings['snapshot']['plabels'] = ['x1', 'x2']
space.empty()
max_points_nb = 5
space.sampling(max_points_nb, 'halton')
space.max_points_nb = 100
surrogate = SurrogateModel('kriging', space.corners, space.plabels)
surrogate.fit(space, f_2d(space))
# Larger dataset to ensure stable results
space.empty()
max_points_nb = 11
space.sampling(max_points_nb, 'halton')
surrogate = SurrogateModel('kriging', space.corners, space.plabels)
surrogate.fit(space, f_2d(space))
for _ in range(2):
space.refine(surrogate, 'sigma')
surrogate.fit(space, f_2d(space))
assert len(space) == 13
refiner = Refiner(surrogate, space.corners, delta_space=0.15)
point_loo = refiner.space.values[5]
loo_si = refiner.leave_one_out_sigma(point_loo)
npt.assert_almost_equal(loo_si, [-2.76, 2.], decimal=2)
loo_so = refiner.leave_one_out_sobol(point_loo, ['Uniform(-5, 0)',
'Uniform(10, 15)'])
npt.assert_almost_equal(loo_so, [-2.86, 2.28], decimal=2)
sigma = refiner.sigma()
npt.assert_almost_equal(sigma, [4.85, 6.561], decimal=1)
optim_EI_min = refiner.optimization(method='EI')
npt.assert_almost_equal(optim_EI_min, [-2.176, 9.208], decimal=1)
optim_EI_max = refiner.optimization(extremum='max')
npt.assert_almost_equal(optim_EI_max, [6.59, 12.999], decimal=1)
optim_PI = refiner.optimization(method='PI')
npt.assert_almost_equal(optim_PI, [-2.328, 9.441], decimal=1)
disc = refiner.discrepancy()
npt.assert_almost_equal(disc, [7, 13.], decimal=1)
extrema = np.array(refiner.extrema([])[0])
# npt.assert_almost_equal(extrema, [[-2.694, 2.331], [2.576, 2.242]], decimal=1)
base_sigma_disc = refiner.sigma_discrepancy()
npt.assert_almost_equal(base_sigma_disc,
refiner.sigma_discrepancy([0.5, 0.5]), decimal=1)
assert (base_sigma_disc != refiner.sigma_discrepancy([-0.1, 1.])).any()
# Refiner without surrogate
refiner = Refiner(surrogate, space.corners, delta_space=0.1)
disc2 = refiner.discrepancy()
npt.assert_almost_equal(disc2, [8., 13.], decimal=1)
# Plotting
# import os
# import itertools
# import matplotlib.pyplot as plt
# from matplotlib import cm
# num = 25
# x = np.linspace(-7, 10, num=num)
# y = np.linspace(0, 15, num=num)
# points = np.array([(float(i), float(j)) for i, j in itertools.product(x, y)])
# x = points[:, 0].flatten()
# y = points[:, 1].flatten()
# pred, si = surrogate(points)
# si = np.array(si).flatten()
# pred = np.array(pred).flatten()
# space = np.array(space[:])
# plt.figure()
# plt.tricontourf(x, y, si, antialiased=True, cmap=cm.viridis)
# cbar = plt.colorbar()
# cbar.set_label(r'$f(x_1, x_2)$')
# plt.show()
# plt.figure()
# plt.tricontourf(x, y, pred, antialiased=True, cmap=cm.viridis)
# cbar = plt.colorbar()
# cbar.set_label(r'$f(x_1, x_2)$')
# plt.scatter(space[:11, 0], space[:11, 1], label='initial sample')
# plt.scatter(space[11:, 0], space[11:, 1], label='firsts sigma')
# plt.scatter(-3.68928528, 13.62998774, label='global extrema')
# hypercube_optim = refiner.hypercube_optim(refiner.space.values[5])
# plot_hypercube(hypercube_optim)
# plot_hypercube(refiner.corners)
# plt.scatter(loo_so[0], loo_so[1], label='LOO-sigma')
# plt.scatter(loo_si[0], loo_si[1], label='LOO-sobol')
# plt.scatter(sigma[0], sigma[1], label='sigma')
# plt.scatter(optim_EI_min[0], optim_EI_min[1], label='optimization EI min')
# plt.scatter(optim_EI_max[0], optim_EI_max[1], label='optimization EI max')
# plt.scatter(optim_PI[0], optim_PI[1], label='optimization PI')
# plt.scatter(disc[0], disc[1], label='discrepancy')
# plt.scatter(disc2[0], disc2[1], label='discrepancy without surrogate')
# # plt.scatter(extrema[:, 0], extrema[:, 1], label='extrema')
# plt.scatter(base_sigma_disc[0], base_sigma_disc[1], label='sigma+discrepancy')
# plt.xlabel(r'$x_1$', fontsize=24)
# plt.ylabel(r'$x_2$', fontsize=24)
# plt.tick_params(axis='y')
# for txt, point in enumerate(space):
# plt.annotate(txt, point, xycoords='offset points')
# plt.legend(fontsize=21, bbox_to_anchor=(1.3, 1), borderaxespad=0)
# plt.show()
def test_discrepancy():
corners = [[0.5, 0.5], [6.5, 6.5]]
space_1 = Space(corners)
space_2 = Space(corners)
space_1 += [[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]
space_2 += [[1, 5], [2, 4], [3, 3], [4, 2], [5, 1], [6, 6]]
assert Space.discrepancy(space_1, space_1.corners) == pytest.approx(0.0081, abs=1e-4)
assert Space.discrepancy(space_2, space_2.corners) == pytest.approx(0.0105, abs=1e-4)
space_1 = (2.0 * space_1.values - 1.0) / (2.0 * 6.0)
assert Space.discrepancy(space_1) == pytest.approx(0.0081, abs=1e-4)
space = np.array([[2, 1, 1, 2, 2, 2],
[1, 2, 2, 2, 2, 2],
[2, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 2, 2],
[1, 2, 2, 2, 1, 1],
[2, 2, 2, 2, 1, 1],
[2, 2, 2, 1, 2, 2]])
space = (2.0 * space - 1.0) / (2.0 * 2.0)
assert Space.discrepancy(space, method='MD') == pytest.approx(2.5000, abs=1e-4)
assert Space.discrepancy(space, method='WD') == pytest.approx(1.3680, abs=1e-4)
assert Space.discrepancy(space, method='CD') == pytest.approx(0.3172, abs=1e-4)
def test_mst(tmp):
sample = np.array([[0.25, 0.5], [0.6, 0.4], [0.7, 0.2]])
mean, std, edges = Space.mst(sample, fname=os.path.join(tmp, 'mst.pdf'))
assert mean == pytest.approx(0.2938, abs=1e-4)
assert std == pytest.approx(0.0702, abs=1e-4)
npt.assert_equal(edges, [[0, 1], [1, 2]])
|
[
"batman.space.Space",
"batman.space.refiner.Refiner",
"numpy.empty",
"batman.space.dists_to_ot",
"os.path.join",
"numpy.testing.assert_almost_equal",
"pytest.raises",
"batman.functions.Ishigami",
"openturns.Uniform",
"numpy.testing.assert_equal",
"copy.deepcopy",
"numpy.testing.assert_array_equal",
"batman.space.Space.discrepancy",
"batman.space.Doe",
"pytest.approx",
"pytest.mark.xfail",
"matplotlib.pyplot.plot",
"openturns.Normal",
"numpy.any",
"batman.surrogate.SurrogateModel",
"numpy.array"
] |
[((5774, 5844), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'AssertionError', 'reason': '"""Global optimization"""'}), "(raises=AssertionError, reason='Global optimization')\n", (5791, 5844), False, 'import pytest\n'), ((7846, 7916), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'AssertionError', 'reason': '"""Global optimization"""'}), "(raises=AssertionError, reason='Global optimization')\n", (7863, 7916), False, 'import pytest\n'), ((365, 416), 'batman.space.dists_to_ot', 'dists_to_ot', (["['Uniform(12, 15)', 'Normal(400, 10)']"], {}), "(['Uniform(12, 15)', 'Normal(400, 10)'])\n", (376, 416), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((680, 694), 'batman.space.Space', 'Space', (['corners'], {}), '(corners)\n', (685, 694), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((749, 774), 'batman.space.Space', 'Space', (['corners'], {'sample': '(10)'}), '(corners, sample=10)\n', (754, 774), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((825, 886), 'batman.space.Space', 'Space', (['corners'], {'sample': '(10)', 'nrefine': '(6)', 'plabels': "['x', 'y', 'z']"}), "(corners, sample=10, nrefine=6, plabels=['x', 'y', 'z'])\n", (830, 886), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((971, 1020), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['space.values', '[(1, 2, 3)]'], {}), '(space.values, [(1, 2, 3)])\n', (993, 1020), True, 'import numpy.testing as npt\n'), ((1140, 1200), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['space.values', '[(1, 2, 3), (1, 1, 3)]'], {}), '(space.values, [(1, 2, 3), (1, 1, 3)])\n', (1162, 1200), True, 'import numpy.testing as npt\n'), ((1215, 1243), 'batman.space.Space', 'Space', (['corners', 'space.values'], {}), '(corners, space.values)\n', (1220, 1243), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((1248, 1309), 'numpy.testing.assert_array_equal', 'npt.assert_array_equal', (['space2.values', '[(1, 2, 3), (1, 1, 3)]'], {}), '(space2.values, [(1, 2, 3), (1, 1, 3)])\n', (1270, 1309), True, 'import numpy.testing as npt\n'), ((1375, 1518), 'batman.space.Space', 'Space', (['corners'], {'sample': "settings_ishigami['space']['sampling']['init_size']", 'nrefine': "settings_ishigami['space']['resampling']['resamp_size']"}), "(corners, sample=settings_ishigami['space']['sampling']['init_size'],\n nrefine=settings_ishigami['space']['resampling']['resamp_size'])\n", (1380, 1518), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((1632, 1648), 'numpy.any', 'np.any', (['(s1 != s2)'], {}), '(s1 != s2)\n', (1638, 1648), True, 'import numpy as np\n'), ((1754, 1795), 'batman.space.Space', 'Space', (['corners'], {'sample': '(16)', 'duplicate': '(True)'}), '(corners, sample=16, duplicate=True)\n', (1759, 1795), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((2324, 2370), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['space', 'out'], {'decimal': '(1)'}), '(space, out, decimal=1)\n', (2347, 2370), True, 'import numpy.testing as npt\n'), ((2528, 2560), 'copy.deepcopy', 'copy.deepcopy', (['settings_ishigami'], {}), '(settings_ishigami)\n', (2541, 2560), False, 'import copy\n'), ((2771, 2781), 'batman.functions.Ishigami', 'Ishigami', ([], {}), '()\n', (2779, 2781), False, 'from batman.functions import Ishigami\n'), ((2794, 2838), 'batman.space.Space', 'Space', (["settings_ishigami['space']['corners']"], {}), "(settings_ishigami['space']['corners'])\n", (2799, 2838), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((2967, 3018), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['targets_space', 'f_data_base'], {}), '(targets_space, f_data_base)\n', (2990, 3018), True, 'import numpy.testing as npt\n'), ((3054, 3081), 'numpy.array', 'np.array', (['[[0, 2], [10, 5]]'], {}), '([[0, 2], [10, 5]])\n', (3062, 3081), True, 'import numpy as np\n'), ((3103, 3140), 'batman.space.Doe', 'Doe', (['n', 'bounds', '"""uniform"""'], {'discrete': '(0)'}), "(n, bounds, 'uniform', discrete=0)\n", (3106, 3140), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((3226, 3273), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sample', 'out'], {'decimal': '(1)'}), '(sample, out, decimal=1)\n', (3249, 3273), True, 'import numpy.testing as npt\n'), ((3285, 3321), 'batman.space.Doe', 'Doe', (['n', 'bounds', '"""halton"""'], {'discrete': '(0)'}), "(n, bounds, 'halton', discrete=0)\n", (3288, 3321), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((3418, 3465), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sample', 'out'], {'decimal': '(1)'}), '(sample, out, decimal=1)\n', (3441, 3465), True, 'import numpy.testing as npt\n'), ((3477, 3513), 'batman.space.Doe', 'Doe', (['n', 'bounds', '"""halton"""'], {'discrete': '(1)'}), "(n, bounds, 'halton', discrete=1)\n", (3480, 3513), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((3607, 3654), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sample', 'out'], {'decimal': '(1)'}), '(sample, out, decimal=1)\n', (3630, 3654), True, 'import numpy.testing as npt\n'), ((3867, 3914), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sample', 'out'], {'decimal': '(1)'}), '(sample, out, decimal=1)\n', (3890, 3914), True, 'import numpy.testing as npt\n'), ((3926, 3950), 'batman.space.Doe', 'Doe', (['n', 'bounds', '"""halton"""'], {}), "(n, bounds, 'halton')\n", (3929, 3950), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((4053, 4100), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sample', 'out'], {'decimal': '(1)'}), '(sample, out, decimal=1)\n', (4076, 4100), True, 'import numpy.testing as npt\n'), ((4112, 4155), 'batman.space.Doe', 'Doe', (['n', 'bounds', '"""sobolscramble"""'], {'discrete': '(0)'}), "(n, bounds, 'sobolscramble', discrete=0)\n", (4115, 4155), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((4195, 4217), 'batman.space.Doe', 'Doe', (['n', 'bounds', '"""olhs"""'], {}), "(n, bounds, 'olhs')\n", (4198, 4217), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((4352, 4399), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sample', 'out'], {'decimal': '(1)'}), '(sample, out, decimal=1)\n', (4375, 4399), True, 'import numpy.testing as npt\n'), ((4658, 4689), 'batman.space.Doe', 'Doe', (['n', 'bounds', '"""halton"""', 'dists'], {}), "(n, bounds, 'halton', dists)\n", (4661, 4689), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((4728, 4839), 'numpy.array', 'np.array', (['[[37.5, 3862.709], [26.25, 4207.291], [48.75, 3546.744], [20.625, 3979.116],\n [43.125, 4340.884]]'], {}), '([[37.5, 3862.709], [26.25, 4207.291], [48.75, 3546.744], [20.625, \n 3979.116], [43.125, 4340.884]])\n', (4736, 4839), True, 'import numpy as np\n'), ((4859, 4906), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sample', 'out'], {'decimal': '(1)'}), '(sample, out, decimal=1)\n', (4882, 4906), True, 'import numpy.testing as npt\n'), ((4975, 5009), 'batman.space.Doe', 'Doe', (['(13)', 'bounds', '"""saltelli"""', 'dists'], {}), "(13, bounds, 'saltelli', dists)\n", (4978, 5009), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((5102, 5136), 'batman.space.Doe', 'Doe', (['(10)', 'bounds', '"""saltelli"""', 'dists'], {}), "(10, bounds, 'saltelli', dists)\n", (5105, 5136), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((5389, 5686), 'matplotlib.pyplot.plot', 'plt.plot', (['[hypercube[0, 0], hypercube[0, 0], hypercube[0, 0], hypercube[1, 0],\n hypercube[1, 0], hypercube[1, 0], hypercube[0, 0], hypercube[1, 0]]', '[hypercube[0, 1], hypercube[1, 1], hypercube[1, 1], hypercube[1, 1],\n hypercube[1, 1], hypercube[0, 1], hypercube[0, 1], hypercube[0, 1]]'], {}), '([hypercube[0, 0], hypercube[0, 0], hypercube[0, 0], hypercube[1, 0\n ], hypercube[1, 0], hypercube[1, 0], hypercube[0, 0], hypercube[1, 0]],\n [hypercube[0, 1], hypercube[1, 1], hypercube[1, 1], hypercube[1, 1],\n hypercube[1, 1], hypercube[0, 1], hypercube[0, 1], hypercube[0, 1]])\n', (5397, 5686), True, 'import matplotlib.pyplot as plt\n'), ((6020, 6075), 'batman.surrogate.SurrogateModel', 'SurrogateModel', (['"""kriging"""', 'space.corners', 'space.plabels'], {}), "('kriging', space.corners, space.plabels)\n", (6034, 6075), False, 'from batman.surrogate import SurrogateModel\n'), ((6129, 6180), 'batman.space.refiner.Refiner', 'Refiner', (['surrogate', 'space.corners'], {'delta_space': '(0.08)'}), '(surrogate, space.corners, delta_space=0.08)\n', (6136, 6180), False, 'from batman.space.refiner import Refiner\n'), ((6396, 6472), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['hypercube', '[[-0.62, 3.62], [3.04, 6.96]]'], {'decimal': '(2)'}), '(hypercube, [[-0.62, 3.62], [3.04, 6.96]], decimal=2)\n', (6419, 6472), True, 'import numpy.testing as npt\n'), ((6549, 6635), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['hypercube_optim', '[[-0.61, 5.74], [1.0, 11.66]]'], {'decimal': '(2)'}), '(hypercube_optim, [[-0.61, 5.74], [1.0, 11.66]],\n decimal=2)\n', (6572, 6635), True, 'import numpy.testing as npt\n'), ((8059, 8091), 'copy.deepcopy', 'copy.deepcopy', (['settings_ishigami'], {}), '(settings_ishigami)\n', (8072, 8091), False, 'import copy\n'), ((8279, 8334), 'batman.surrogate.SurrogateModel', 'SurrogateModel', (['"""kriging"""', 'space.corners', 'space.plabels'], {}), "('kriging', space.corners, space.plabels)\n", (8293, 8334), False, 'from batman.surrogate import SurrogateModel\n'), ((8521, 8576), 'batman.surrogate.SurrogateModel', 'SurrogateModel', (['"""kriging"""', 'space.corners', 'space.plabels'], {}), "('kriging', space.corners, space.plabels)\n", (8535, 8576), False, 'from batman.surrogate import SurrogateModel\n'), ((8764, 8815), 'batman.space.refiner.Refiner', 'Refiner', (['surrogate', 'space.corners'], {'delta_space': '(0.15)'}), '(surrogate, space.corners, delta_space=0.15)\n', (8771, 8815), False, 'from batman.space.refiner import Refiner\n'), ((8913, 8969), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['loo_si', '[-2.76, 2.0]'], {'decimal': '(2)'}), '(loo_si, [-2.76, 2.0], decimal=2)\n', (8936, 8969), True, 'import numpy.testing as npt\n'), ((9119, 9176), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['loo_so', '[-2.86, 2.28]'], {'decimal': '(2)'}), '(loo_so, [-2.86, 2.28], decimal=2)\n', (9142, 9176), True, 'import numpy.testing as npt\n'), ((9211, 9267), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['sigma', '[4.85, 6.561]'], {'decimal': '(1)'}), '(sigma, [4.85, 6.561], decimal=1)\n', (9234, 9267), True, 'import numpy.testing as npt\n'), ((9327, 9392), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['optim_EI_min', '[-2.176, 9.208]'], {'decimal': '(1)'}), '(optim_EI_min, [-2.176, 9.208], decimal=1)\n', (9350, 9392), True, 'import numpy.testing as npt\n'), ((9454, 9518), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['optim_EI_max', '[6.59, 12.999]'], {'decimal': '(1)'}), '(optim_EI_max, [6.59, 12.999], decimal=1)\n', (9477, 9518), True, 'import numpy.testing as npt\n'), ((9574, 9635), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['optim_PI', '[-2.328, 9.441]'], {'decimal': '(1)'}), '(optim_PI, [-2.328, 9.441], decimal=1)\n', (9597, 9635), True, 'import numpy.testing as npt\n'), ((9674, 9725), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['disc', '[7, 13.0]'], {'decimal': '(1)'}), '(disc, [7, 13.0], decimal=1)\n', (9697, 9725), True, 'import numpy.testing as npt\n'), ((10155, 10205), 'batman.space.refiner.Refiner', 'Refiner', (['surrogate', 'space.corners'], {'delta_space': '(0.1)'}), '(surrogate, space.corners, delta_space=0.1)\n', (10162, 10205), False, 'from batman.space.refiner import Refiner\n'), ((10244, 10298), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['disc2', '[8.0, 13.0]'], {'decimal': '(1)'}), '(disc2, [8.0, 13.0], decimal=1)\n', (10267, 10298), True, 'import numpy.testing as npt\n'), ((12565, 12579), 'batman.space.Space', 'Space', (['corners'], {}), '(corners)\n', (12570, 12579), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((12594, 12608), 'batman.space.Space', 'Space', (['corners'], {}), '(corners)\n', (12599, 12608), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((13063, 13217), 'numpy.array', 'np.array', (['[[2, 1, 1, 2, 2, 2], [1, 2, 2, 2, 2, 2], [2, 1, 1, 1, 1, 1], [1, 1, 1, 1, 2,\n 2], [1, 2, 2, 2, 1, 1], [2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 2, 2]]'], {}), '([[2, 1, 1, 2, 2, 2], [1, 2, 2, 2, 2, 2], [2, 1, 1, 1, 1, 1], [1, 1,\n 1, 1, 2, 2], [1, 2, 2, 2, 1, 1], [2, 2, 2, 2, 1, 1], [2, 2, 2, 1, 2, 2]])\n', (13071, 13217), True, 'import numpy as np\n'), ((13679, 13726), 'numpy.array', 'np.array', (['[[0.25, 0.5], [0.6, 0.4], [0.7, 0.2]]'], {}), '([[0.25, 0.5], [0.6, 0.4], [0.7, 0.2]])\n', (13687, 13726), True, 'import numpy as np\n'), ((13910, 13951), 'numpy.testing.assert_equal', 'npt.assert_equal', (['edges', '[[0, 1], [1, 2]]'], {}), '(edges, [[0, 1], [1, 2]])\n', (13926, 13951), True, 'import numpy.testing as npt\n'), ((428, 446), 'openturns.Uniform', 'ot.Uniform', (['(12)', '(15)'], {}), '(12, 15)\n', (438, 446), True, 'import openturns as ot\n'), ((448, 466), 'openturns.Normal', 'ot.Normal', (['(400)', '(10)'], {}), '(400, 10)\n', (457, 466), True, 'import openturns as ot\n'), ((502, 531), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (515, 531), False, 'import pytest\n'), ((541, 572), 'batman.space.dists_to_ot', 'dists_to_ot', (["['Uniorm(12, 15)']"], {}), "(['Uniorm(12, 15)'])\n", (552, 572), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((1081, 1097), 'numpy.empty', 'np.empty', (['(0, 3)'], {}), '((0, 3))\n', (1089, 1097), True, 'import numpy as np\n'), ((1879, 1904), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1892, 1904), False, 'import pytest\n'), ((2636, 2661), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2649, 2661), False, 'import pytest\n'), ((2671, 2711), 'batman.space.Space', 'Space', (["test_settings['space']['corners']"], {}), "(test_settings['space']['corners'])\n", (2676, 2711), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((3673, 3708), 'numpy.array', 'np.array', (['[[0, 2, -2], [10, 5, -1]]'], {}), '([[0, 2, -2], [10, 5, -1]])\n', (3681, 3708), True, 'import numpy as np\n'), ((4457, 4486), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (4470, 4486), False, 'import pytest\n'), ((4558, 4589), 'batman.space.Doe', 'Doe', (['n', 'bounds', '"""halton"""', 'dists'], {}), "(n, bounds, 'halton', dists)\n", (4561, 4589), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((6274, 6308), 'pytest.approx', 'pytest.approx', (['(0.163461)'], {'abs': '(0.001)'}), '(0.163461, abs=0.001)\n', (6287, 6308), False, 'import pytest\n'), ((12750, 12793), 'batman.space.Space.discrepancy', 'Space.discrepancy', (['space_1', 'space_1.corners'], {}), '(space_1, space_1.corners)\n', (12767, 12793), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((12797, 12830), 'pytest.approx', 'pytest.approx', (['(0.0081)'], {'abs': '(0.0001)'}), '(0.0081, abs=0.0001)\n', (12810, 12830), False, 'import pytest\n'), ((12840, 12883), 'batman.space.Space.discrepancy', 'Space.discrepancy', (['space_2', 'space_2.corners'], {}), '(space_2, space_2.corners)\n', (12857, 12883), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((12887, 12920), 'pytest.approx', 'pytest.approx', (['(0.0105)'], {'abs': '(0.0001)'}), '(0.0105, abs=0.0001)\n', (12900, 12920), False, 'import pytest\n'), ((12988, 13014), 'batman.space.Space.discrepancy', 'Space.discrepancy', (['space_1'], {}), '(space_1)\n', (13005, 13014), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((13018, 13051), 'pytest.approx', 'pytest.approx', (['(0.0081)'], {'abs': '(0.0001)'}), '(0.0081, abs=0.0001)\n', (13031, 13051), False, 'import pytest\n'), ((13404, 13441), 'batman.space.Space.discrepancy', 'Space.discrepancy', (['space'], {'method': '"""MD"""'}), "(space, method='MD')\n", (13421, 13441), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((13445, 13475), 'pytest.approx', 'pytest.approx', (['(2.5)'], {'abs': '(0.0001)'}), '(2.5, abs=0.0001)\n', (13458, 13475), False, 'import pytest\n'), ((13488, 13525), 'batman.space.Space.discrepancy', 'Space.discrepancy', (['space'], {'method': '"""WD"""'}), "(space, method='WD')\n", (13505, 13525), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((13529, 13561), 'pytest.approx', 'pytest.approx', (['(1.368)'], {'abs': '(0.0001)'}), '(1.368, abs=0.0001)\n', (13542, 13561), False, 'import pytest\n'), ((13572, 13609), 'batman.space.Space.discrepancy', 'Space.discrepancy', (['space'], {'method': '"""CD"""'}), "(space, method='CD')\n", (13589, 13609), False, 'from batman.space import Space, Doe, dists_to_ot\n'), ((13613, 13646), 'pytest.approx', 'pytest.approx', (['(0.3172)'], {'abs': '(0.0001)'}), '(0.3172, abs=0.0001)\n', (13626, 13646), False, 'import pytest\n'), ((13824, 13857), 'pytest.approx', 'pytest.approx', (['(0.2938)'], {'abs': '(0.0001)'}), '(0.2938, abs=0.0001)\n', (13837, 13857), False, 'import pytest\n'), ((13874, 13907), 'pytest.approx', 'pytest.approx', (['(0.0702)'], {'abs': '(0.0001)'}), '(0.0702, abs=0.0001)\n', (13887, 13907), False, 'import pytest\n'), ((2406, 2438), 'numpy.array', 'np.array', (['[(1, 2, 3), (1, 1, 3)]'], {}), '([(1, 2, 3), (1, 1, 3)])\n', (2414, 2438), True, 'import numpy as np\n'), ((2921, 2948), 'numpy.array', 'np.array', (['[5.25, 4.2344145]'], {}), '([5.25, 4.2344145])\n', (2929, 2948), True, 'import numpy as np\n'), ((13774, 13802), 'os.path.join', 'os.path.join', (['tmp', '"""mst.pdf"""'], {}), "(tmp, 'mst.pdf')\n", (13786, 13802), False, 'import os\n')]
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = True
from Cython.Distutils import build_ext
import os
import sys
import shutil
import numpy
folder = "."
if len(sys.argv) >3:
folder = sys[argv[3]]
elif len(sys.argv) <3:
print("ARGUMENTS ERROR, put the script direction parameter")
exit()
direction = 1
try:
direction = int(sys.argv[2])
except:
pass
os.chdir(folder)
try:
os.makedirs("backfiles", 0x755 );
except:
pass
print("usage")
print("python.exe .\setup.py build_ext 1 #for unbuilding")
print("python.exe .\setup.py build_ext 0 #for building")
if direction == 1:
#change pyx to py....
with os.scandir() as i:
for entry in i:
if entry.name == os.path.basename(__file__):
continue
print(entry.name)
if entry.is_file() and (entry.name.endswith(".pyd") or entry.name.endswith(".c") or entry.name.endswith(".pyx") or entry.name.endswith(".html")):
os.remove(entry.name)
elif entry.is_dir() and entry.name == "build":
shutil.rmtree(entry.name)
elif entry.is_dir() and entry.name == "__pycache__":
shutil.rmtree(entry.name)
os.chdir("backfiles")
with os.scandir() as i:
for entry in i:
print("jake "+ entry.name)
if entry.is_file():
print("moving")
shutil.move(entry.name, "../"+entry.name)
elif entry.is_dir() and entry.name == "build":
shutil.movetree(entry.name, "../"+entry.name)
exit()
else:
#change py to pyx
ext_modules=[]
with os.scandir() as i:
for entry in i:
if entry.name == os.path.basename(__file__):
continue
if entry.is_file() and entry.name.endswith(".py"):
name = os.path.splitext(entry.name)[0]
shutil.copy(entry.name, name+'.pyx')
if entry.name.startswith("__") is True:
continue #don't add files like __init__.pyx to the extensions...
print(name + " --- "+name+'.pyx')
shutil.move(entry.name, "backfiles/"+entry.name)
ext_modules.append(Extension(name, [name+'.pyx'], include_dirs = ['.']))
print(entry.name)
"""
ext_modules=[
Extension("utils", ["utils.pyx"], include_dirs = ['.']),
Extension("embedder", ["embedder.pyx"], include_dirs = ['.']),
Extension("classifier", ["classifier.pyx"], include_dirs = ['.']),
Extension("image_loader", ["image_loader.pyx"], include_dirs = ['.']),
Extension("context", ["context.pyx"], include_dirs = ['.']),
Extension("model_predictor", ["model_predictor.pyx"], include_dirs = ['.']),
]
"""
setup(
name="embedder_classifier",
ext_modules=cythonize(
ext_modules,
compiler_directives={'language_level' : "3"}) , # or "2" or "3str" ,
include_dirs=[numpy.get_include()],
cmdclass = {'build_ext': build_ext},
script_args = ['build_ext'],
options = {'build_ext':{'inplace':True, 'force':True}}
)
#all files in a folder
#setup(
# name = 'embedder_classifier',
# cmdclass = {'build_ext': build_ext},
# ext_modules = ext_modules, #cythonize(["*.pyx"]),
#)
#setup(
#name = 'embedder_classifier',
# cmdclass = {'build_ext': build_ext},
# ext_modules = cythonize(["*.pyx"]),
#)
|
[
"os.remove",
"Cython.Build.cythonize",
"os.makedirs",
"os.path.basename",
"distutils.extension.Extension",
"numpy.get_include",
"os.path.splitext",
"shutil.move",
"shutil.copy",
"shutil.movetree",
"shutil.rmtree",
"os.chdir",
"os.scandir"
] |
[((516, 532), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (524, 532), False, 'import os\n'), ((544, 574), 'os.makedirs', 'os.makedirs', (['"""backfiles"""', '(1877)'], {}), "('backfiles', 1877)\n", (555, 574), False, 'import os\n'), ((1348, 1369), 'os.chdir', 'os.chdir', (['"""backfiles"""'], {}), "('backfiles')\n", (1356, 1369), False, 'import os\n'), ((790, 802), 'os.scandir', 'os.scandir', ([], {}), '()\n', (800, 802), False, 'import os\n'), ((1378, 1390), 'os.scandir', 'os.scandir', ([], {}), '()\n', (1388, 1390), False, 'import os\n'), ((1763, 1775), 'os.scandir', 'os.scandir', ([], {}), '()\n', (1773, 1775), False, 'import os\n'), ((3014, 3081), 'Cython.Build.cythonize', 'cythonize', (['ext_modules'], {'compiler_directives': "{'language_level': '3'}"}), "(ext_modules, compiler_directives={'language_level': '3'})\n", (3023, 3081), False, 'from Cython.Build import cythonize\n'), ((860, 886), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (876, 886), False, 'import os\n'), ((1111, 1132), 'os.remove', 'os.remove', (['entry.name'], {}), '(entry.name)\n', (1120, 1132), False, 'import os\n'), ((1533, 1576), 'shutil.move', 'shutil.move', (['entry.name', "('../' + entry.name)"], {}), "(entry.name, '../' + entry.name)\n", (1544, 1576), False, 'import shutil\n'), ((1833, 1859), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (1849, 1859), False, 'import os\n'), ((2014, 2052), 'shutil.copy', 'shutil.copy', (['entry.name', "(name + '.pyx')"], {}), "(entry.name, name + '.pyx')\n", (2025, 2052), False, 'import shutil\n'), ((2255, 2305), 'shutil.move', 'shutil.move', (['entry.name', "('backfiles/' + entry.name)"], {}), "(entry.name, 'backfiles/' + entry.name)\n", (2266, 2305), False, 'import shutil\n'), ((3138, 3157), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (3155, 3157), False, 'import numpy\n'), ((1208, 1233), 'shutil.rmtree', 'shutil.rmtree', (['entry.name'], {}), '(entry.name)\n', (1221, 1233), False, 'import shutil\n'), ((1650, 1697), 'shutil.movetree', 'shutil.movetree', (['entry.name', "('../' + entry.name)"], {}), "(entry.name, '../' + entry.name)\n", (1665, 1697), False, 'import shutil\n'), ((1967, 1995), 'os.path.splitext', 'os.path.splitext', (['entry.name'], {}), '(entry.name)\n', (1983, 1995), False, 'import os\n'), ((2338, 2390), 'distutils.extension.Extension', 'Extension', (['name', "[name + '.pyx']"], {'include_dirs': "['.']"}), "(name, [name + '.pyx'], include_dirs=['.'])\n", (2347, 2390), False, 'from distutils.extension import Extension\n'), ((1315, 1340), 'shutil.rmtree', 'shutil.rmtree', (['entry.name'], {}), '(entry.name)\n', (1328, 1340), False, 'import shutil\n')]
|
from operator import itemgetter
import Plane
import Polygon
import Receiver
import numpy as np
class Space(object):
def __init__(self):
self.polygons = []
self.__axes = np.zeros((3, 3))
def vertical_plane(self, origin, facing_angle): # compass direction, degrees
angle = (90 - facing_angle) * np.pi / 180
sin_a = np.sin(angle)
cos_a = np.cos(angle)
self.__axes[0,:] = [-sin_a, cos_a, 0]
self.__axes[1,:] = [ 0, 0, 1] # face y-axis is vertical (scene-z)
self.__axes[2,:] = [ cos_a, sin_a, 0] # face z-axis is the normal
return Plane.Plane(origin, self.__axes)
def horizontal_plane(self, origin, facing_up): # boolean: True for facing up
if facing_up:
self.__axes[0,:] = [ 1, 0, 0 ]
self.__axes[1,:] = [ 0, 1, 0 ]
self.__axes[2,:] = [ 0, 0, 1 ]
else:
self.__axes[0,:] = [ 1, 0, 0 ]
self.__axes[1,:] = [ 0, -1, 0 ] # both y- & z-axes get reflected
self.__axes[2,:] = [ 0, 0, -1 ]
return Plane.Plane(origin, self.__axes)
def plane_from_points(self, P1, P2, P3):
Bi = P2 - P1
Bk = np.cross(Bi, P3 - P2)
Bj = np.cross(Bk, Bi)
self.__axes[0,:] = Bi / np.linalg.norm(Bi)
self.__axes[1,:] = Bj / np.linalg.norm(Bj)
self.__axes[2,:] = Bk / np.linalg.norm(Bk)
return Plane.Plane(P1, self.__axes)
def add_poly(self, polygon):
if polygon.material.is_illustrative():
if polygon.ill_only is None:
polygon.ill_only = [0,0,0]
self.polygons.append(polygon)
def __make_poly(self, verts, indices, material):
plane = self.plane_from_points(verts[indices[0],:], verts[indices[1],:], verts[indices[2],:])
polygon = Polygon.Polygon(plane, len(indices), material)
pi = 0
for i in indices:
xy, z = plane.project(verts[i,:])
polygon.set_vertex(pi, xy)
pi += 1
self.add_poly(polygon)
return polygon
def __make_zone(self, polygon, vert1, vert2, zone_material, zone_width):
Bk = polygon.plane.basis_k
Bi = vert2 - vert1
Bj = np.cross(Bk, Bi)
Bi /= np.linalg.norm(Bi)
Bj /= np.linalg.norm(Bj)
offset = zone_width * (Bk * np.sqrt(3) - Bj) / 2
verts = np.zeros((4,3))
verts[0,:] = vert2
verts[1,:] = vert2 + offset
verts[2,:] = vert1 + offset
verts[3,:] = vert1
self.__make_poly(verts, range(0, 4), zone_material)
def __make_zones(self, polygon, verts, indices, zone_material, zone_widths):
for i1 in range(0, polygon.count):
if zone_widths[i1] > 0:
i2 = i1 + 1
if i2 == polygon.count:
i2 = 0
self.__make_zone(polygon, verts[indices[i1],:], verts[indices[i2],:], zone_material, zone_widths[i1])
def add_box(self, base_center, base_wh, height, facing_angle, material, diffraction_zones=None):
# Diffraction zones: two planes 30 degrees apart, at 30 degrees to closest walls
# effective change of origin, halving distance?
# - a list of zero-or-positive zone sizes corresponding to each of the 12 box edges
# - a material for the zones
# e.g., diffraction_zones=(material, [0,0,0,0,3,3,3,3,1,1,1,1])
angle = -facing_angle * np.pi / 180
sin_a = np.sin(angle)
cos_a = np.cos(angle)
w, h = base_wh
verts = np.zeros((8,3))
verts[0,0:2] = [( w / 2) * cos_a - ( h / 2) * sin_a, ( w / 2) * sin_a + ( h / 2) * cos_a]
verts[1,0:2] = [(-w / 2) * cos_a - ( h / 2) * sin_a, (-w / 2) * sin_a + ( h / 2) * cos_a]
verts[2,0:2] = [(-w / 2) * cos_a - (-h / 2) * sin_a, (-w / 2) * sin_a + (-h / 2) * cos_a]
verts[3,0:2] = [( w / 2) * cos_a - (-h / 2) * sin_a, ( w / 2) * sin_a + (-h / 2) * cos_a]
verts[4:8,0:2] = verts[0:4,0:2]
verts[4:8,2] = height
verts += base_center
p_base = self.__make_poly(verts, [0,3,2,1], material)
p_roof = self.__make_poly(verts, [4,5,6,7], material)
p_front = self.__make_poly(verts, [0,1,5,4], material)
p_right = self.__make_poly(verts, [1,2,6,5], material)
p_back = self.__make_poly(verts, [2,3,7,6], material)
p_left = self.__make_poly(verts, [3,0,4,7], material)
if diffraction_zones is not None:
zone_material, edge_list = diffraction_zones
self.__make_zones(p_base, verts, [0,3,2,1], zone_material, itemgetter(3, 2, 1, 0)(edge_list))
self.__make_zones(p_roof, verts, [4,5,6,7], zone_material, itemgetter(8, 9,10,11)(edge_list))
self.__make_zones(p_front, verts, [0,1,5,4], zone_material, itemgetter(0, 4, 8, 5)(edge_list))
self.__make_zones(p_right, verts, [1,2,6,5], zone_material, itemgetter(1, 5, 9, 6)(edge_list))
self.__make_zones(p_back, verts, [2,3,7,6], zone_material, itemgetter(2, 6,10, 7)(edge_list))
self.__make_zones(p_left, verts, [3,0,4,7], zone_material, itemgetter(3, 7,11, 4)(edge_list))
def cube(self, center, cube_dimension, material, add_to_scene=True):
polygons = []
origin = np.asarray(center)
plane = self.horizontal_plane(origin + [0,0,cube_dimension/2], True)
polygons.append(Polygon.Polygon(plane, 4, material))
plane = self.horizontal_plane(origin - [0,0,cube_dimension/2], False)
polygons.append(Polygon.Polygon(plane, 4, material))
plane = self.vertical_plane(origin + [0,cube_dimension/2,0], 0)
polygons.append(Polygon.Polygon(plane, 4, material))
plane = self.vertical_plane(origin + [cube_dimension/2,0,0], 90)
polygons.append(Polygon.Polygon(plane, 4, material))
plane = self.vertical_plane(origin - [0,cube_dimension/2,0], 180)
polygons.append(Polygon.Polygon(plane, 4, material))
plane = self.vertical_plane(origin - [cube_dimension/2,0,0], 270)
polygons.append(Polygon.Polygon(plane, 4, material))
for p in polygons:
p.square(cube_dimension)
if add_to_scene:
self.add_poly(p)
return polygons
def make_receiver(self, origin, cube_dimension, material):
return Receiver.Receiver(self, origin, cube_dimension, material)
|
[
"Polygon.Polygon",
"Receiver.Receiver",
"numpy.asarray",
"numpy.zeros",
"numpy.cross",
"numpy.sin",
"numpy.linalg.norm",
"numpy.cos",
"Plane.Plane",
"operator.itemgetter",
"numpy.sqrt"
] |
[((193, 209), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (201, 209), True, 'import numpy as np\n'), ((358, 371), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (364, 371), True, 'import numpy as np\n'), ((388, 401), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (394, 401), True, 'import numpy as np\n'), ((621, 653), 'Plane.Plane', 'Plane.Plane', (['origin', 'self.__axes'], {}), '(origin, self.__axes)\n', (632, 653), False, 'import Plane\n'), ((1097, 1129), 'Plane.Plane', 'Plane.Plane', (['origin', 'self.__axes'], {}), '(origin, self.__axes)\n', (1108, 1129), False, 'import Plane\n'), ((1210, 1231), 'numpy.cross', 'np.cross', (['Bi', '(P3 - P2)'], {}), '(Bi, P3 - P2)\n', (1218, 1231), True, 'import numpy as np\n'), ((1245, 1261), 'numpy.cross', 'np.cross', (['Bk', 'Bi'], {}), '(Bk, Bi)\n', (1253, 1261), True, 'import numpy as np\n'), ((1432, 1460), 'Plane.Plane', 'Plane.Plane', (['P1', 'self.__axes'], {}), '(P1, self.__axes)\n', (1443, 1460), False, 'import Plane\n'), ((2238, 2254), 'numpy.cross', 'np.cross', (['Bk', 'Bi'], {}), '(Bk, Bi)\n', (2246, 2254), True, 'import numpy as np\n'), ((2270, 2288), 'numpy.linalg.norm', 'np.linalg.norm', (['Bi'], {}), '(Bi)\n', (2284, 2288), True, 'import numpy as np\n'), ((2303, 2321), 'numpy.linalg.norm', 'np.linalg.norm', (['Bj'], {}), '(Bj)\n', (2317, 2321), True, 'import numpy as np\n'), ((2397, 2413), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (2405, 2413), True, 'import numpy as np\n'), ((3502, 3515), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3508, 3515), True, 'import numpy as np\n'), ((3532, 3545), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3538, 3545), True, 'import numpy as np\n'), ((3587, 3603), 'numpy.zeros', 'np.zeros', (['(8, 3)'], {}), '((8, 3))\n', (3595, 3603), True, 'import numpy as np\n'), ((5338, 5356), 'numpy.asarray', 'np.asarray', (['center'], {}), '(center)\n', (5348, 5356), True, 'import numpy as np\n'), ((6411, 6468), 'Receiver.Receiver', 'Receiver.Receiver', (['self', 'origin', 'cube_dimension', 'material'], {}), '(self, origin, cube_dimension, material)\n', (6428, 6468), False, 'import Receiver\n'), ((1295, 1313), 'numpy.linalg.norm', 'np.linalg.norm', (['Bi'], {}), '(Bi)\n', (1309, 1313), True, 'import numpy as np\n'), ((1346, 1364), 'numpy.linalg.norm', 'np.linalg.norm', (['Bj'], {}), '(Bj)\n', (1360, 1364), True, 'import numpy as np\n'), ((1397, 1415), 'numpy.linalg.norm', 'np.linalg.norm', (['Bk'], {}), '(Bk)\n', (1411, 1415), True, 'import numpy as np\n'), ((5459, 5494), 'Polygon.Polygon', 'Polygon.Polygon', (['plane', '(4)', 'material'], {}), '(plane, 4, material)\n', (5474, 5494), False, 'import Polygon\n'), ((5599, 5634), 'Polygon.Polygon', 'Polygon.Polygon', (['plane', '(4)', 'material'], {}), '(plane, 4, material)\n', (5614, 5634), False, 'import Polygon\n'), ((5735, 5770), 'Polygon.Polygon', 'Polygon.Polygon', (['plane', '(4)', 'material'], {}), '(plane, 4, material)\n', (5750, 5770), False, 'import Polygon\n'), ((5871, 5906), 'Polygon.Polygon', 'Polygon.Polygon', (['plane', '(4)', 'material'], {}), '(plane, 4, material)\n', (5886, 5906), False, 'import Polygon\n'), ((6007, 6042), 'Polygon.Polygon', 'Polygon.Polygon', (['plane', '(4)', 'material'], {}), '(plane, 4, material)\n', (6022, 6042), False, 'import Polygon\n'), ((6143, 6178), 'Polygon.Polygon', 'Polygon.Polygon', (['plane', '(4)', 'material'], {}), '(plane, 4, material)\n', (6158, 6178), False, 'import Polygon\n'), ((4654, 4676), 'operator.itemgetter', 'itemgetter', (['(3)', '(2)', '(1)', '(0)'], {}), '(3, 2, 1, 0)\n', (4664, 4676), False, 'from operator import itemgetter\n'), ((4761, 4785), 'operator.itemgetter', 'itemgetter', (['(8)', '(9)', '(10)', '(11)'], {}), '(8, 9, 10, 11)\n', (4771, 4785), False, 'from operator import itemgetter\n'), ((4868, 4890), 'operator.itemgetter', 'itemgetter', (['(0)', '(4)', '(8)', '(5)'], {}), '(0, 4, 8, 5)\n', (4878, 4890), False, 'from operator import itemgetter\n'), ((4975, 4997), 'operator.itemgetter', 'itemgetter', (['(1)', '(5)', '(9)', '(6)'], {}), '(1, 5, 9, 6)\n', (4985, 4997), False, 'from operator import itemgetter\n'), ((5082, 5105), 'operator.itemgetter', 'itemgetter', (['(2)', '(6)', '(10)', '(7)'], {}), '(2, 6, 10, 7)\n', (5092, 5105), False, 'from operator import itemgetter\n'), ((5189, 5212), 'operator.itemgetter', 'itemgetter', (['(3)', '(7)', '(11)', '(4)'], {}), '(3, 7, 11, 4)\n', (5199, 5212), False, 'from operator import itemgetter\n'), ((2359, 2369), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (2366, 2369), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
def make_info_str(args):
s = ''
for k in vars(args):
s += '# ' + str(k) + ': ' + str(getattr(args,k)) + '\n'
return s
def print_stats(steps,dm, meta=False):
from time import strftime
from time import time
if isinstance(meta, str):
meta = ' | {:s}'.format(meta)
else:
meta = ''
print(
'{:s} | stp: {:d} sec: {:.2f} v: {:d} e: {:d} f: {:d}{:s}'
.format(
strftime('%d/%m/%y %H:%M:%S'),
steps,
time()-dm.get_start_time(),
dm.get_vnum(),
dm.get_henum(),
dm.get_fnum(),
meta
)
)
return
def get_exporter(dm, fn, nmax):
from numpy import zeros
from .geometry import move_scale
from iutils.ioOBJ import export
np_verts = zeros((nmax, 3), 'float')
np_tris = zeros((nmax, 3), 'int')
np_int = zeros(nmax, 'float')
def e():
vnum = dm.np_get_vertices(np_verts)
tnum = dm.np_get_triangles_vertices(np_tris)
dm.np_get_triangles_intensity(np_int)
move_scale(np_verts[:vnum, :], s=1000)
export(
'thing_mesh',
fn.name(),
verts=np_verts[:vnum, :],
tris=np_tris[:tnum, :]
)
return e
def get_surface_vertices(dm):
res = []
for he in range(dm.get_henum()):
e = dm.is_surface_edge(he)
if e > 0:
d = dm.get_edge_dict(he)
res.append(d['first'])
res.append(d['last'])
return list(set(res))
def get_seed_selector(dm, t, sr=None):
from numpy import array
from numpy import arange
from numpy import ones
from numpy.random import random
if sr is not None:
get_mask = lambda n, sr: (random(size=n) < sr).nonzero()[0]
else:
get_mask = lambda n, sr: ones(n, 'bool')
if t == 'surface':
def f():
vertices = array(get_surface_vertices(dm))
rm = get_mask(len(vertices), sr)
if len(rm) < 1:
return array([])
return vertices[rm]
elif t == 'random':
def f():
vn = dm.get_vnum()
vertices = arange(vn)
rm = get_mask(len(vertices), sr)
if len(rm) < 1:
return array([])
return vertices[rm]
else:
raise ValueError('use "surface" or "random".')
return f
|
[
"numpy.zeros",
"numpy.ones",
"time.strftime",
"time.time",
"numpy.random.random",
"numpy.array",
"numpy.arange"
] |
[((780, 805), 'numpy.zeros', 'zeros', (['(nmax, 3)', '"""float"""'], {}), "((nmax, 3), 'float')\n", (785, 805), False, 'from numpy import zeros\n'), ((818, 841), 'numpy.zeros', 'zeros', (['(nmax, 3)', '"""int"""'], {}), "((nmax, 3), 'int')\n", (823, 841), False, 'from numpy import zeros\n'), ((853, 873), 'numpy.zeros', 'zeros', (['nmax', '"""float"""'], {}), "(nmax, 'float')\n", (858, 873), False, 'from numpy import zeros\n'), ((431, 460), 'time.strftime', 'strftime', (['"""%d/%m/%y %H:%M:%S"""'], {}), "('%d/%m/%y %H:%M:%S')\n", (439, 460), False, 'from time import strftime\n'), ((1711, 1726), 'numpy.ones', 'ones', (['n', '"""bool"""'], {}), "(n, 'bool')\n", (1715, 1726), False, 'from numpy import ones\n'), ((489, 495), 'time.time', 'time', ([], {}), '()\n', (493, 495), False, 'from time import time\n'), ((1887, 1896), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1892, 1896), False, 'from numpy import array\n'), ((2001, 2011), 'numpy.arange', 'arange', (['vn'], {}), '(vn)\n', (2007, 2011), False, 'from numpy import arange\n'), ((2088, 2097), 'numpy.array', 'array', (['[]'], {}), '([])\n', (2093, 2097), False, 'from numpy import array\n'), ((1640, 1654), 'numpy.random.random', 'random', ([], {'size': 'n'}), '(size=n)\n', (1646, 1654), False, 'from numpy.random import random\n')]
|
# Read a data file and apply min/max scaling to a
# selected column, writing the min/max values to a proto.
# Ultimately not used because too slow compared to C++, and
# would need to implement unscaling and have the ability to
# read from a pipe.
import csv
from absl import app
from absl import flags
from google.protobuf.json_format import MessageToJson
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from Utilities.General import feature_scaling_pb2
FLAGS = flags.FLAGS
flags.DEFINE_string("input", None, "Input data file to process")
flags.DEFINE_string("output", None, "proto file that will contain the range")
flags.DEFINE_string("sep", ' ', "Input file token delimiter")
flags.DEFINE_integer("header", 1, "Number of header records")
flags.DEFINE_integer("col", 2, "The column to process")
flags.DEFINE_string("proto", None, "Name of FeatureScaling proto to create")
def feature_scaling(unused_argv):
"""Apply min/max scaling to
"""
del unused_argv
col = FLAGS.col - 1
data = pd.read_csv(FLAGS.input, sep=FLAGS.sep, header=FLAGS.header, usecols=[0, col])
mycol = np.array(data.iloc[:,col])
mymin = np.min(mycol)
mymax = np.max(mycol)
data.iloc[:,col] = (mycol - mymin) / (mymax - mymin)
data.to_csv(FLAGS.output, FLAGS.sep, index=False)
if not FLAGS.proto:
return
proto = feature_scaling_pb2.FeatureScaling()
proto.min = mymin
proto.max = mymax
proto.nsamples = mycol.shape[0]
proto.mean = np.mean(mycol)
with open(FLAGS.proto, "wb") as output:
output.write(proto.SerializeToString())
print(MessageToJson(proto))
if __name__ == '__main__':
flags.mark_flag_as_required('input')
flags.mark_flag_as_required('output')
app.run(feature_scaling)
|
[
"pandas.read_csv",
"Utilities.General.feature_scaling_pb2.FeatureScaling",
"absl.flags.mark_flag_as_required",
"absl.flags.DEFINE_string",
"numpy.min",
"numpy.max",
"absl.flags.DEFINE_integer",
"numpy.array",
"numpy.mean",
"absl.app.run",
"google.protobuf.json_format.MessageToJson"
] |
[((518, 582), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""input"""', 'None', '"""Input data file to process"""'], {}), "('input', None, 'Input data file to process')\n", (537, 582), False, 'from absl import flags\n'), ((583, 660), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output"""', 'None', '"""proto file that will contain the range"""'], {}), "('output', None, 'proto file that will contain the range')\n", (602, 660), False, 'from absl import flags\n'), ((661, 722), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""sep"""', '""" """', '"""Input file token delimiter"""'], {}), "('sep', ' ', 'Input file token delimiter')\n", (680, 722), False, 'from absl import flags\n'), ((723, 784), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""header"""', '(1)', '"""Number of header records"""'], {}), "('header', 1, 'Number of header records')\n", (743, 784), False, 'from absl import flags\n'), ((785, 840), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""col"""', '(2)', '"""The column to process"""'], {}), "('col', 2, 'The column to process')\n", (805, 840), False, 'from absl import flags\n'), ((841, 917), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""proto"""', 'None', '"""Name of FeatureScaling proto to create"""'], {}), "('proto', None, 'Name of FeatureScaling proto to create')\n", (860, 917), False, 'from absl import flags\n'), ((1041, 1119), 'pandas.read_csv', 'pd.read_csv', (['FLAGS.input'], {'sep': 'FLAGS.sep', 'header': 'FLAGS.header', 'usecols': '[0, col]'}), '(FLAGS.input, sep=FLAGS.sep, header=FLAGS.header, usecols=[0, col])\n', (1052, 1119), True, 'import pandas as pd\n'), ((1131, 1158), 'numpy.array', 'np.array', (['data.iloc[:, col]'], {}), '(data.iloc[:, col])\n', (1139, 1158), True, 'import numpy as np\n'), ((1168, 1181), 'numpy.min', 'np.min', (['mycol'], {}), '(mycol)\n', (1174, 1181), True, 'import numpy as np\n'), ((1192, 1205), 'numpy.max', 'np.max', (['mycol'], {}), '(mycol)\n', (1198, 1205), True, 'import numpy as np\n'), ((1358, 1394), 'Utilities.General.feature_scaling_pb2.FeatureScaling', 'feature_scaling_pb2.FeatureScaling', ([], {}), '()\n', (1392, 1394), False, 'from Utilities.General import feature_scaling_pb2\n'), ((1484, 1498), 'numpy.mean', 'np.mean', (['mycol'], {}), '(mycol)\n', (1491, 1498), True, 'import numpy as np\n'), ((1645, 1681), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""input"""'], {}), "('input')\n", (1672, 1681), False, 'from absl import flags\n'), ((1684, 1721), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""output"""'], {}), "('output')\n", (1711, 1721), False, 'from absl import flags\n'), ((1724, 1748), 'absl.app.run', 'app.run', (['feature_scaling'], {}), '(feature_scaling)\n', (1731, 1748), False, 'from absl import app\n'), ((1593, 1613), 'google.protobuf.json_format.MessageToJson', 'MessageToJson', (['proto'], {}), '(proto)\n', (1606, 1613), False, 'from google.protobuf.json_format import MessageToJson\n')]
|
import cv2
import mxnet as mx
import numpy as np
import scipy as sc
from utils.math import Distances
from dataProcessor.tiffReader import GEOMAP
from validation.osmClasses import OSMClasses
from utils.labelProcessor import LabelProcessor
from validation.clcClasses import CLCClasses
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import mean_squared_log_error, normalized_mutual_info_score
from lib.mapar.mapar import Mapar
from sklearn.neighbors import BallTree
from sklearn.neighbors import DistanceMetric
class MultiClassValidation():
'''
Each tile is represented by the proportion taken by each class in the image.
A simple regressor trained supervised on the real proportion of labels. The quality of embeddings
is measured by the quality of the regressor.
MAP@R and gSil determine the score without a additional regressor.
'''
def __init__(self, size, classifiers={'knn': KNeighborsClassifier()}, distance=Distances.L2_Dist, validation_map=GEOMAP.OSM):
self.distance = distance
self.classifiers = classifiers
self.labelProcessor = LabelProcessor(size, validation_map)
if validation_map == GEOMAP.OSM:
self.colorToLabel = OSMClasses.getLabel
self.nb_labels = 13
elif validation_map == GEOMAP.CLC:
self.colorToLabel = CLCClasses.getLabel
self.nb_labels = 45
def train(self, embeddings, class_imgs):
data, labels = self.getTrainData(embeddings, class_imgs)
for key, classifier in self.classifiers.items():
classifier.fit(data, labels)
return self
def predict(self, embeddings, class_imgs):
data, labels = self.getTrainData(embeddings, class_imgs)
for key, classifier in self.classifiers.items():
pred_labels = classifier.predict(data)
for clabel in range(0, len(labels)):
print(labels[clabel], pred_labels[clabel])
def silhouette(X, y):
y = y+1e-8
weights = y.sum(axis=1)
y = y/np.expand_dims(y.sum(axis=1), axis=1)
out = 0
x_tree = BallTree(
X, leaf_size=2, metric=DistanceMetric.get_metric("l2"))
xdist, xind = x_tree.query(X, k=X.shape[0], sort_results=True)
a_dists = []
b_dists = []
for cluster in range(0, y.shape[1]):
intra = np.min([np.repeat(y[xind[:, :1]][:, :, cluster],
xind[:,1:].shape[1], axis=1), y[xind[:, 1:]][:, :, cluster]], axis=0)
# print(intra.shape)
a_dist = 1.0/intra.sum(axis=1) * \
np.sum(xdist[:, 1:] * intra, axis=1)
a_dists.append(a_dist)
for compcluster in range(0, y.shape[1]):
if cluster != compcluster:
inter1 = np.repeat(
y[xind[:, :1]][:, :, cluster], xind[:, 1:].shape[1], axis=1)
inter2 = y[xind[:, 1:]][:, :, compcluster]
inter12 = np.min([inter1, inter2], axis=0)
inter3 = np.repeat(
y[xind[:, :1]][:, :, compcluster], xind[:, 1:].shape[1], axis=1)
inter4 = y[xind[:, 1:]][:, :, cluster]
inter34 = np.min([inter3, inter4], axis=0)
inter = np.max([inter12, inter34], axis=0)
b_dist = 1.0/inter.sum(axis=1) * \
np.sum(xdist[:, 1:] * inter, axis=1)
b_dists.append(b_dist)
a_dist = np.min(a_dists, axis=0)
b_dist = np.min(b_dists, axis=0)
out = ((b_dist-a_dist)/np.max([b_dist, a_dist], axis=(0)))
return (out*weights).sum()/weights.sum()
def scores(self, embeddings, class_imgs):
tsum_error = {}
nmi = {}
silhouette_scores = {}
ccc = {}
mapar1 = {}
mapar5 = {}
mapar10 = {}
data, labels = self.getTrainData(embeddings, class_imgs)
for key, classifier in self.classifiers.items():
pred_labels = classifier.predict(data)
pred_labels = pred_labels / \
np.expand_dims(pred_labels.sum(axis=1), axis=1)
pred_labels = np.clip(pred_labels, 0, 1)
#err = mean_squared_log_error(pred_labels, labels)
sum_error = 0
for batch in range(0, len(labels)):
sum_error += np.sum(np.abs(pred_labels[batch] - labels[batch]))
sum_error = sum_error / len(labels)
tsum_error[key] = sum_error
mean_nmi_score = 0
for batch in range(0, len(pred_labels)):
score = normalized_mutual_info_score(pred_labels[batch].astype(
int), labels[batch].astype(int), average_method='arithmetic')
mean_nmi_score += score
nmi[key] = mean_nmi_score/len(pred_labels)
silhouette_scores[key] = MultiClassValidation.silhouette(
data, labels)
dendro = sc.cluster.hierarchy.linkage(labels, 'single')
dists = sc.spatial.distance.pdist(data)
cophe_dists = sc.cluster.hierarchy.cophenet(dendro)
ccc[key] = np.corrcoef(dists, cophe_dists)[0, 1]
mapar1[key] = Mapar.score(data, labels, k=1)
mapar5[key] = Mapar.score(data, labels, k=5)
mapar10[key] = Mapar.score(data, labels, k=10)
return tsum_error, nmi, silhouette_scores, ccc, mapar1, mapar5, mapar10
def getTrainData(self, embeddings, class_imgs):
data = None
labels = None
np_class_imgs = class_imgs
np_embeddings = embeddings
for np_class_img in range(0, len(np_class_imgs)):
label = self.labelProcessor.getLabels(
np_class_imgs[np_class_img], processed=True)
# print(idx)
if label.sum() > 0:
if data is None:
data = np.expand_dims(np_embeddings[np_class_img], axis=0)
labels = np.expand_dims(label, axis=0)
else:
data = np.concatenate((data, np.expand_dims(
np_embeddings[np_class_img], axis=0)), axis=0)
labels = np.concatenate(
(labels, np.expand_dims(label, axis=0)), axis=0)
labels = labels/np.expand_dims(labels.sum(axis=1), axis=1)
return data, labels
def unitTest():
def mean(img):
return img.mean(axis=(0, 1), exclude=True)
images, coords, px_coords, valid = batchSampler.unitTest(32, 64)
emb_pred, emb_pos, emb_neg = images
class_pred, class_pos, class_neg = valid
embs = nd.concat(mean(emb_pred), mean(emb_pos), dim=0)
class_imgs = nd.concat(class_pred, class_pos, dim=0)
embs = nd.concat(embs, mean(emb_neg), dim=0)
class_imgs = nd.concat(class_imgs, class_neg, dim=0)
validator = Validation(embs[:int(len(embs)/2)],
class_imgs[:int(len(embs)/2)])
validator.train()
acc = validator.accurancy(
embs[int(len(embs)/2):], class_imgs[int(len(embs)/2):])
print(acc)
|
[
"numpy.sum",
"numpy.abs",
"numpy.corrcoef",
"lib.mapar.mapar.Mapar.score",
"scipy.cluster.hierarchy.linkage",
"sklearn.neighbors.DistanceMetric.get_metric",
"numpy.expand_dims",
"numpy.clip",
"utils.labelProcessor.LabelProcessor",
"numpy.min",
"numpy.max",
"sklearn.neighbors.KNeighborsClassifier",
"scipy.spatial.distance.pdist",
"scipy.cluster.hierarchy.cophenet",
"numpy.repeat"
] |
[((1138, 1174), 'utils.labelProcessor.LabelProcessor', 'LabelProcessor', (['size', 'validation_map'], {}), '(size, validation_map)\n', (1152, 1174), False, 'from utils.labelProcessor import LabelProcessor\n'), ((3552, 3575), 'numpy.min', 'np.min', (['a_dists'], {'axis': '(0)'}), '(a_dists, axis=0)\n', (3558, 3575), True, 'import numpy as np\n'), ((3593, 3616), 'numpy.min', 'np.min', (['b_dists'], {'axis': '(0)'}), '(b_dists, axis=0)\n', (3599, 3616), True, 'import numpy as np\n'), ((955, 977), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {}), '()\n', (975, 977), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3648, 3680), 'numpy.max', 'np.max', (['[b_dist, a_dist]'], {'axis': '(0)'}), '([b_dist, a_dist], axis=0)\n', (3654, 3680), True, 'import numpy as np\n'), ((4238, 4264), 'numpy.clip', 'np.clip', (['pred_labels', '(0)', '(1)'], {}), '(pred_labels, 0, 1)\n', (4245, 4264), True, 'import numpy as np\n'), ((5036, 5082), 'scipy.cluster.hierarchy.linkage', 'sc.cluster.hierarchy.linkage', (['labels', '"""single"""'], {}), "(labels, 'single')\n", (5064, 5082), True, 'import scipy as sc\n'), ((5103, 5134), 'scipy.spatial.distance.pdist', 'sc.spatial.distance.pdist', (['data'], {}), '(data)\n', (5128, 5134), True, 'import scipy as sc\n'), ((5161, 5198), 'scipy.cluster.hierarchy.cophenet', 'sc.cluster.hierarchy.cophenet', (['dendro'], {}), '(dendro)\n', (5190, 5198), True, 'import scipy as sc\n'), ((5288, 5318), 'lib.mapar.mapar.Mapar.score', 'Mapar.score', (['data', 'labels'], {'k': '(1)'}), '(data, labels, k=1)\n', (5299, 5318), False, 'from lib.mapar.mapar import Mapar\n'), ((5345, 5375), 'lib.mapar.mapar.Mapar.score', 'Mapar.score', (['data', 'labels'], {'k': '(5)'}), '(data, labels, k=5)\n', (5356, 5375), False, 'from lib.mapar.mapar import Mapar\n'), ((5403, 5434), 'lib.mapar.mapar.Mapar.score', 'Mapar.score', (['data', 'labels'], {'k': '(10)'}), '(data, labels, k=10)\n', (5414, 5434), False, 'from lib.mapar.mapar import Mapar\n'), ((2193, 2224), 'sklearn.neighbors.DistanceMetric.get_metric', 'DistanceMetric.get_metric', (['"""l2"""'], {}), "('l2')\n", (2218, 2224), False, 'from sklearn.neighbors import DistanceMetric\n'), ((2637, 2673), 'numpy.sum', 'np.sum', (['(xdist[:, 1:] * intra)'], {'axis': '(1)'}), '(xdist[:, 1:] * intra, axis=1)\n', (2643, 2673), True, 'import numpy as np\n'), ((5223, 5254), 'numpy.corrcoef', 'np.corrcoef', (['dists', 'cophe_dists'], {}), '(dists, cophe_dists)\n', (5234, 5254), True, 'import numpy as np\n'), ((2413, 2483), 'numpy.repeat', 'np.repeat', (['y[xind[:, :1]][:, :, cluster]', 'xind[:, 1:].shape[1]'], {'axis': '(1)'}), '(y[xind[:, :1]][:, :, cluster], xind[:, 1:].shape[1], axis=1)\n', (2422, 2483), True, 'import numpy as np\n'), ((2834, 2904), 'numpy.repeat', 'np.repeat', (['y[xind[:, :1]][:, :, cluster]', 'xind[:, 1:].shape[1]'], {'axis': '(1)'}), '(y[xind[:, :1]][:, :, cluster], xind[:, 1:].shape[1], axis=1)\n', (2843, 2904), True, 'import numpy as np\n'), ((3024, 3056), 'numpy.min', 'np.min', (['[inter1, inter2]'], {'axis': '(0)'}), '([inter1, inter2], axis=0)\n', (3030, 3056), True, 'import numpy as np\n'), ((3087, 3161), 'numpy.repeat', 'np.repeat', (['y[xind[:, :1]][:, :, compcluster]', 'xind[:, 1:].shape[1]'], {'axis': '(1)'}), '(y[xind[:, :1]][:, :, compcluster], xind[:, 1:].shape[1], axis=1)\n', (3096, 3161), True, 'import numpy as np\n'), ((3277, 3309), 'numpy.min', 'np.min', (['[inter3, inter4]'], {'axis': '(0)'}), '([inter3, inter4], axis=0)\n', (3283, 3309), True, 'import numpy as np\n'), ((3338, 3372), 'numpy.max', 'np.max', (['[inter12, inter34]'], {'axis': '(0)'}), '([inter12, inter34], axis=0)\n', (3344, 3372), True, 'import numpy as np\n'), ((4439, 4481), 'numpy.abs', 'np.abs', (['(pred_labels[batch] - labels[batch])'], {}), '(pred_labels[batch] - labels[batch])\n', (4445, 4481), True, 'import numpy as np\n'), ((5969, 6020), 'numpy.expand_dims', 'np.expand_dims', (['np_embeddings[np_class_img]'], {'axis': '(0)'}), '(np_embeddings[np_class_img], axis=0)\n', (5983, 6020), True, 'import numpy as np\n'), ((6050, 6079), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (6064, 6079), True, 'import numpy as np\n'), ((3453, 3489), 'numpy.sum', 'np.sum', (['(xdist[:, 1:] * inter)'], {'axis': '(1)'}), '(xdist[:, 1:] * inter, axis=1)\n', (3459, 3489), True, 'import numpy as np\n'), ((6151, 6202), 'numpy.expand_dims', 'np.expand_dims', (['np_embeddings[np_class_img]'], {'axis': '(0)'}), '(np_embeddings[np_class_img], axis=0)\n', (6165, 6202), True, 'import numpy as np\n'), ((6316, 6345), 'numpy.expand_dims', 'np.expand_dims', (['label'], {'axis': '(0)'}), '(label, axis=0)\n', (6330, 6345), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.