markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
LSTM with Word2Vec Embedding
2v = Word2Vec.load("w2v_300features_10minwordcounts_10context") embedding_matrix = w2v.wv.syn0 print("Shape of embedding matrix : ", embedding_matrix.shape) top_words = embedding_matrix.shape[0] #4016 maxlen = 300 batch_size = 62 nb_classes = 4 nb_epoch = 7 # Vectorize X_train and X_test to 2D tensor tokenizer = Tokenizer(nb_words=top_words) #only consider top 20000 words in the corpse tokenizer.fit_on_texts(X_train) # tokenizer.word_index #access word-to-index dictionary of trained tokenizer sequences_train = tokenizer.texts_to_sequences(X_train) sequences_test = tokenizer.texts_to_sequences(X_test) X_train_seq1 = sequence.pad_sequences(sequences_train, maxlen=maxlen) X_test_seq1 = sequence.pad_sequences(sequences_test, maxlen=maxlen) # one-hot encoding of y_train and y_test y_train_seq1 = np_utils.to_categorical(y_train, nb_classes) y_test_seq1 = np_utils.to_categorical(y_test, nb_classes) print('X_train shape:', X_train_seq1.shape) print("========================================") print('X_test shape:', X_test_seq1.shape) print("========================================") print('y_train shape:', y_train_seq1.shape) print("========================================") print('y_test shape:', y_test_seq1.shape) print("========================================") len(X_train_seq1),len(y_train_seq1) embedding_layer = Embedding(embedding_matrix.shape[0], #4016 embedding_matrix.shape[1], #300 weights=[embedding_matrix]) model2 = Sequential() model2.add(embedding_layer) model2.add(LSTM(128, dropout_W=0.2, dropout_U=0.2)) model2.add(Dense(nb_classes)) model2.add(Activation('softmax')) model2.summary() model2.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model2.fit(X_train_seq1, y_train_seq1, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1) # Model evaluation score = model2.evaluate(X_test_seq1, y_test_seq1, batch_size=batch_size) print('Test loss : {:.4f}'.format(score[0])) print('Test accuracy : {:.4f}'.format(score[1])) print("Size of weight matrix in the embedding layer : ", \ model2.layers[0].get_weights()[0].shape) print("Size of weight matrix in the hidden layer : ", \ model2.layers[1].get_weights()[0].shape) print("Size of weight matrix in the output layer : ", \ model2.layers[2].get_weights()[0].shape)
_____no_output_____
MIT
IMDB Reviews NLP.ipynb
gsingh1629/SentAnalysis
Implementing TF-IDF------------------------------------Here we implement TF-IDF, (Text Frequency - Inverse Document Frequency) for the spam-ham text data.We will use a hybrid approach of encoding the texts with sci-kit learn's TFIDF vectorizer. Then we will use the regular TensorFlow logistic algorithm outline.Creating the TF-IDF vectors requires us to load all the text into memory and count the occurrences of each word before we can start training our model. Because of this, it is not implemented fully in Tensorflow, so we will use Scikit-learn for creating our TF-IDF embedding, but use Tensorflow to fit the logistic model.We start by loading the necessary libraries.
import tensorflow as tf import matplotlib.pyplot as plt import csv import numpy as np import os import string import requests import io import nltk from zipfile import ZipFile from sklearn.feature_extraction.text import TfidfVectorizer from tensorflow.python.framework import ops ops.reset_default_graph()
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Start a computational graph session.
sess = tf.Session()
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
We set two parameters, `batch_size` and `max_features`. `batch_size` is the size of the batch we will train our logistic model on, and `max_features` is the maximum number of tf-idf textual words we will use in our logistic regression.
batch_size = 200 max_features = 1000
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Check if data was downloaded, otherwise download it and save for future use
save_file_name = 'temp_spam_data.csv' if os.path.isfile(save_file_name): text_data = [] with open(save_file_name, 'r') as temp_output_file: reader = csv.reader(temp_output_file) for row in reader: text_data.append(row) else: zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip' r = requests.get(zip_url) z = ZipFile(io.BytesIO(r.content)) file = z.read('SMSSpamCollection') # Format Data text_data = file.decode() text_data = text_data.encode('ascii',errors='ignore') text_data = text_data.decode().split('\n') text_data = [x.split('\t') for x in text_data if len(x)>=1] # And write to csv with open(save_file_name, 'w') as temp_output_file: writer = csv.writer(temp_output_file) writer.writerows(text_data)
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
We now clean our texts. This will decrease our vocabulary size by converting everything to lower case, removing punctuation and getting rid of numbers.
texts = [x[1] for x in text_data] target = [x[0] for x in text_data] # Relabel 'spam' as 1, 'ham' as 0 target = [1. if x=='spam' else 0. for x in target] # Normalize text # Lower case texts = [x.lower() for x in texts] # Remove punctuation texts = [''.join(c for c in x if c not in string.punctuation) for x in texts] # Remove numbers texts = [''.join(c for c in x if c not in '0123456789') for x in texts] # Trim extra whitespace texts = [' '.join(x.split()) for x in texts]
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Define tokenizer function and create the TF-IDF vectors with SciKit-Learn.
import nltk nltk.download('punkt') def tokenizer(text): words = nltk.word_tokenize(text) return words # Create TF-IDF of texts tfidf = TfidfVectorizer(tokenizer=tokenizer, stop_words='english', max_features=max_features) sparse_tfidf_texts = tfidf.fit_transform(texts)
/srv/venv/lib/python3.6/site-packages/sklearn/feature_extraction/text.py:1089: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Split up data set into train/test.
train_indices = np.random.choice(sparse_tfidf_texts.shape[0], round(0.8*sparse_tfidf_texts.shape[0]), replace=False) test_indices = np.array(list(set(range(sparse_tfidf_texts.shape[0])) - set(train_indices))) texts_train = sparse_tfidf_texts[train_indices] texts_test = sparse_tfidf_texts[test_indices] target_train = np.array([x for ix, x in enumerate(target) if ix in train_indices]) target_test = np.array([x for ix, x in enumerate(target) if ix in test_indices])
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Now we create the variables and placeholders necessary for logistic regression. After which, we declare our logistic regression operation. Remember that the sigmoid part of the logistic regression will be in the loss function.
# Create variables for logistic regression A = tf.Variable(tf.random_normal(shape=[max_features,1])) b = tf.Variable(tf.random_normal(shape=[1,1])) # Initialize placeholders x_data = tf.placeholder(shape=[None, max_features], dtype=tf.float32) y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) # Declare logistic model (sigmoid in loss function) model_output = tf.add(tf.matmul(x_data, A), b)
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Next, we declare the loss function (which has the sigmoid in it), and the prediction function. The prediction function will have to have a sigmoid inside of it because it is not in the model output.
# Declare loss function (Cross Entropy loss) loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=model_output, labels=y_target)) # Prediction prediction = tf.round(tf.sigmoid(model_output)) predictions_correct = tf.cast(tf.equal(prediction, y_target), tf.float32) accuracy = tf.reduce_mean(predictions_correct)
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Now we create the optimization function and initialize the model variables.
# Declare optimizer my_opt = tf.train.GradientDescentOptimizer(0.0025) train_step = my_opt.minimize(loss) # Intitialize Variables init = tf.global_variables_initializer() sess.run(init)
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Finally, we perform our logisitic regression on the 1000 TF-IDF features.
train_loss = [] test_loss = [] train_acc = [] test_acc = [] i_data = [] for i in range(10000): rand_index = np.random.choice(texts_train.shape[0], size=batch_size) rand_x = texts_train[rand_index].todense() rand_y = np.transpose([target_train[rand_index]]) sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) # Only record loss and accuracy every 100 generations if (i+1)%100==0: i_data.append(i+1) train_loss_temp = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) train_loss.append(train_loss_temp) test_loss_temp = sess.run(loss, feed_dict={x_data: texts_test.todense(), y_target: np.transpose([target_test])}) test_loss.append(test_loss_temp) train_acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x, y_target: rand_y}) train_acc.append(train_acc_temp) test_acc_temp = sess.run(accuracy, feed_dict={x_data: texts_test.todense(), y_target: np.transpose([target_test])}) test_acc.append(test_acc_temp) if (i+1)%500==0: acc_and_loss = [i+1, train_loss_temp, test_loss_temp, train_acc_temp, test_acc_temp] acc_and_loss = [np.round(x,2) for x in acc_and_loss] print('Generation # {}. Train Loss (Test Loss): {:.2f} ({:.2f}). Train Acc (Test Acc): {:.2f} ({:.2f})'.format(*acc_and_loss))
Generation # 500. Train Loss (Test Loss): 0.92 (0.93). Train Acc (Test Acc): 0.39 (0.40) Generation # 1000. Train Loss (Test Loss): 0.71 (0.74). Train Acc (Test Acc): 0.56 (0.56) Generation # 1500. Train Loss (Test Loss): 0.58 (0.62). Train Acc (Test Acc): 0.66 (0.66) Generation # 2000. Train Loss (Test Loss): 0.59 (0.56). Train Acc (Test Acc): 0.67 (0.74) Generation # 2500. Train Loss (Test Loss): 0.58 (0.52). Train Acc (Test Acc): 0.74 (0.77) Generation # 3000. Train Loss (Test Loss): 0.55 (0.49). Train Acc (Test Acc): 0.76 (0.79) Generation # 3500. Train Loss (Test Loss): 0.47 (0.47). Train Acc (Test Acc): 0.80 (0.81) Generation # 4000. Train Loss (Test Loss): 0.47 (0.46). Train Acc (Test Acc): 0.81 (0.83) Generation # 4500. Train Loss (Test Loss): 0.44 (0.45). Train Acc (Test Acc): 0.84 (0.83) Generation # 5000. Train Loss (Test Loss): 0.47 (0.45). Train Acc (Test Acc): 0.82 (0.84) Generation # 5500. Train Loss (Test Loss): 0.46 (0.44). Train Acc (Test Acc): 0.84 (0.84) Generation # 6000. Train Loss (Test Loss): 0.47 (0.44). Train Acc (Test Acc): 0.82 (0.85) Generation # 6500. Train Loss (Test Loss): 0.46 (0.44). Train Acc (Test Acc): 0.84 (0.85) Generation # 7000. Train Loss (Test Loss): 0.45 (0.44). Train Acc (Test Acc): 0.86 (0.85) Generation # 7500. Train Loss (Test Loss): 0.48 (0.44). Train Acc (Test Acc): 0.84 (0.85) Generation # 8000. Train Loss (Test Loss): 0.37 (0.44). Train Acc (Test Acc): 0.88 (0.85) Generation # 8500. Train Loss (Test Loss): 0.42 (0.44). Train Acc (Test Acc): 0.88 (0.85) Generation # 9000. Train Loss (Test Loss): 0.38 (0.44). Train Acc (Test Acc): 0.89 (0.85) Generation # 9500. Train Loss (Test Loss): 0.49 (0.44). Train Acc (Test Acc): 0.81 (0.85) Generation # 10000. Train Loss (Test Loss): 0.50 (0.44). Train Acc (Test Acc): 0.84 (0.85)
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Here is matplotlib code to plot the loss and accuracies.
# Plot loss over time plt.plot(i_data, train_loss, 'k-', label='Train Loss') plt.plot(i_data, test_loss, 'r--', label='Test Loss', linewidth=4) plt.title('Cross Entropy Loss per Generation') plt.xlabel('Generation') plt.ylabel('Cross Entropy Loss') plt.legend(loc='upper right') plt.show() # Plot train and test accuracy plt.plot(i_data, train_acc, 'k-', label='Train Set Accuracy') plt.plot(i_data, test_acc, 'r--', label='Test Set Accuracy', linewidth=4) plt.title('Train and Test Accuracy') plt.xlabel('Generation') plt.ylabel('Accuracy') plt.legend(loc='lower right') plt.show() test complete; Gopal
_____no_output_____
MIT
tests/tf/03_implementing_tf_idf.ipynb
gopala-kr/ds-notebooks
Table of Contents 1  Texte d'oral de modélisation - Agrégation Option Informatique1.1  Préparation à l'agrégation - ENS de Rennes, 2016-171.2  À propos de ce document1.3  Implémentation1.3.1  Une bonne structure de donnée pour des intervalles et des graphes d'intervales1.3.2  Algorithme de coloriage de graphe d'intervalles1.3.3  Algorithme pour calculer le stable maximum d'un graphe d'intervalles1.4  Exemples1.4.1  Qui a tué le Duc de Densmore ?1.4.1.1  Comment résoudre ce problème ?1.4.1.2  Solution1.4.2  Le problème des frigos1.4.3  Le problème du CSA1.4.4  Le problème du wagon restaurant1.4.4.1  Solution via l'algorithme de coloriage de graphe d'intervalles1.5  Bonus ?1.5.1  Visualisation des graphes définis dans les exemples1.6  Conclusion Texte d'oral de modélisation - Agrégation Option Informatique Préparation à l'agrégation - ENS de Rennes, 2016-17- *Date* : 3 avril 2017- *Auteur* : [Lilian Besson](https://GitHub.com/Naereen/notebooks/)- *Texte*: Annale 2006, "Crime Parfait" À propos de ce document- Ceci est une *proposition* de correction, partielle et probablement non-optimale, pour la partie implémentation d'un [texte d'annale de l'agrégation de mathématiques, option informatique](http://Agreg.org/Textes/).- Ce document est un [notebook Jupyter](https://www.Jupyter.org/), et [est open-source sous Licence MIT sur GitHub](https://github.com/Naereen/notebooks/tree/master/agreg/), comme les autres solutions de textes de modélisation que [j](https://GitHub.com/Naereen)'ai écrite cette année.- L'implémentation sera faite en OCaml, version 4+ :
Sys.command "ocaml -version";;
The OCaml toplevel, version 4.04.2
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
---- ImplémentationLa question d'implémentation était la question 2) en page 7.> « Proposer une structure de donnée adaptée pour représenter un graphe d'intervalles dont une représentation sous forme de famille d’intervalles est connue.> Implémenter de manière efficace l’algorithme de coloriage de graphes d'intervalles et illustrer cet algorithme sur une application bien choisie citée dans le texte. »Nous allons donc d'abord définir une structure de donnée pour une famille d'intervalles ainsi que pour un graphe d'intervalle, ainsi qu'une fonction convertissant l'un en l'autre.Cela permettra de facilement définr les différents exemples du texte, et de les résoudre. Une bonne structure de donnée pour des intervalles et des graphes d'intervales- Pour des **intervalles** à valeurs réelles, on se restreint par convénience à des valeurs entières.
type intervalle = (int * int);; type intervalles = intervalle list;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
- Pour des **graphes d'intervalles**, on utilise une simple représentation sous forme de liste d'adjacence, plus facile à mettre en place en OCaml qu'une représentation sous forme de matrice. Ici, tous nos graphes ont pour sommets $0 \dots n - 1$.
type sommet = int;; type voisins = sommet list;; type graphe_intervalle = voisins list;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
> *Note:* j'ai préféré garder une structure très simple, pour les intervalles, les graphes d'intervalles et les coloriages, mais on perd un peu en lisibilité dans la fonction coloriage.> > Implicitement, dès qu'une liste d'intervalles est fixée, de taille $n$, ils sont numérotés de $0$ à $n-1$. Le graphe `g` aura pour sommet $0 \dots n-1$, et le coloriage sera un simple tableau de couleurs `c` (i.e., d'entiers), donnant en `c[i]` la couleur de l'intervalle numéro `i`.>> Une solution plus intelligente aurait été d'utiliser des tables d'association, cf. le module [Map](http://caml.inria.fr/pub/docs/manual-ocaml/libref/Map.html) de OCaml, et le code proposé par Julien durant son oral. - On peut rapidement écrire une fonction qui va convertir une liste d'intervalle (`intervalles`) en un graphe d'intervalle. On crée les sommets du graphes, via `index_intvls` qui associe un intervalle à son indice, et ensuite on ajoute les arêtes au graphe selon les contraintes définissant un graphe d'intervalle : $$ \forall I, I' \in V, (I,I') \in E \Leftrightarrow I \neq I' \;\text{and}\; I \cap I' \neq \emptyset $$ Donc avec des intervales $I = [x,y]$ et $I' = [a,b]$, cela donne : $$ \forall I = [x,y], I' = [a,b] \in V, (I,I') \in E \Leftrightarrow (x,y) \neq (a,b) \;\text{and}\; \neg (b < x \;\text{or}\; y < a) $$
let graphe_depuis_intervalles (intvls : intervalles) : graphe_intervalle = let n = List.length intvls in (* Nomber de sommet *) let array_intvls = Array.of_list intvls in (* Tableau des intervalles *) let index_intvls = Array.to_list ( Array.init n (fun i -> ( array_intvls.(i), i) (* Associe un intervalle à son indice *) ) ) in let gr = List.map (fun (a, b) -> (* Pour chaque intervalle [a, b] *) List.filter (fun (x, y) -> (* On ajoute [x, y] s'il intersecte [a, b] *) (x, y) <> (a, b) (* Intervalle différent *) && not ( (b < x) || (y < a) ) (* pas x---y a---b ni a---b x---y *) ) intvls ) intvls in (* On transforme la liste de liste d'intervalles en une liste de liste d'entiers *) List.map (fun voisins -> List.map (fun sommet -> (* Grace au tableau index_intvls *) List.assoc sommet index_intvls ) voisins ) gr ;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Algorithme de coloriage de graphe d'intervalles> Étant donné un graphe $G = (V, E)$, on cherche un entier $n$ minimal et une fonction $c : V \to \{1, \cdots, n\}$ telle que si $(v_1 , v_2) \in E$, alors $c(v_1) \neq c(v_2)$.On suit les indications de l'énoncé pour implémenter facilement cet algorithme.> Une *heuristique* simple pour résoudre ce problème consiste à appliquer l’algorithme glouton suivant :> - tant qu'il reste reste des sommets non coloriés,> + en choisir un> + et le colorier avec le plus petit entier qui n’apparait pas dans les voisins déjà coloriés.> En choisissant bien le nouveau sommet à colorier à chaque fois, cette heuristique se révelle optimale pour les graphes d’intervalles.On peut d'abord définir un type de donnée pour un coloriage, sous la forme d'une liste de couple d'intervalle et de couleur.Ainsi, `List.assoc` peut être utilisée pour donner le coloriage de chaque intervalle.
type couleur = int;; type coloriage = (intervalle * couleur) list;; let coloriage_depuis_couleurs (intvl : intervalles) (c : couleur array) : coloriage = Array.to_list (Array.init (Array.length c) (fun i -> (List.nth intvl i), c.(i)));; let quelle_couleur (intvl : intervalle) (colors : coloriage) = List.assoc intvl colors ;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Ensuite, l'ordre partiel $\prec_i$ sur les intervalles est défini comme ça :$$ I = (a,b) \prec_i J=(x, y) \Longleftrightarrow a < x $$
let ordre_partiel ((a, _) : intervalle) ((x, _) : intervalle) = a < x ;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
On a ensuite besoin d'une fonction qui va calculer l'inf de $\mathbb{N} \setminus \{x : x \in \mathrm{valeurs} \}$:
let inf_N_minus valeurs = let res = ref 0 in (* Très important d'utiliser une référence ! *) while List.mem !res valeurs do incr res; done; !res ;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
On vérifie rapidement sur deux exemples :
inf_N_minus [0; 1; 3];; (* 2 *) inf_N_minus [0; 1; 2; 3; 4; 5; 6; 10];; (* 7 *)
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Enfin, on a besoin d'une fonction pour trouver l'intervalle $I \in V$, minimal pour $\prec_i$, tel que $c(I) = +\infty$.
let trouve_min_interval intvl (c : coloriage) (inf : couleur) = let colorie inter = quelle_couleur inter c in (* D'abord on extraie {I : c(I) = +oo} *) let intvl2 = List.filter (fun i -> (colorie i) = inf) intvl in (* Puis on parcourt la liste et on garde le plus petit pour l'ordre *) let i0 = ref 0 in for j = 1 to (List.length intvl2) - 1 do if ordre_partiel (List.nth intvl2 j) (List.nth intvl2 !i0) then i0 := j; done; List.nth intvl2 !i0; ;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Et donc tout cela permet de finir l'algorithme, tel que décrit dans le texte :
let coloriage_intervalles (intvl : intervalles) : coloriage = let n = List.length intvl in (* Nombre d'intervalles *) let array_intvls = Array.of_list intvl in (* Tableau des intervalles *) let index_intvls = Array.to_list ( Array.init n (fun i -> ( array_intvls.(i), i) (* Associe un intervalle à son indice *) ) ) in let gr = graphe_depuis_intervalles intvl in let inf = n + 10000 in (* Grande valeur, pour +oo *) let c = Array.make n inf in (* Liste des couleurs, c(I) = +oo pour tout I *) let maxarray = Array.fold_left max (-inf - 10000) in (* Initialisé à -oo *) while maxarray c = inf do (* Il reste un I in V tel que c(I) = +oo *) begin (* C'est la partie pas élégante *) (* On récupère le coloriage depuis la liste de couleurs actuelle *) let coloriage = (coloriage_depuis_couleurs intvl c) in (* Puis la fonction [colorie] pour associer une couleur à un intervalle *) let colorie inter = quelle_couleur inter coloriage in (* On choisit un I, minimal pour ordre_partiel, tel que c(I) = +oo *) let inter = trouve_min_interval intvl coloriage inf in (* On trouve son indice *) let i = List.assoc inter index_intvls in (* On trouve les voisins de i dans le graphe *) let adj_de_i = List.nth gr i in (* Puis les voisins de I en tant qu'intervalles *) let adj_de_I = List.map (fun j -> List.nth intvl j) adj_de_i in (* Puis on récupère leurs couleurs *) let valeurs = List.map colorie adj_de_I in (* c(I) = inf(N - {c(J) : J adjacent a I} ) *) c.(i) <- inf_N_minus valeurs; end; done; coloriage_depuis_couleurs intvl c; ;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Une fois qu'on a un coloriage, à valeurs dans $0,\dots,k$ on récupère le nombre de couleurs comme $1 + \max c$, i.e., $k+1$.
let max_valeurs = List.fold_left max 0;; let nombre_chromatique (colorg : coloriage) : int = 1 + max_valeurs (List.map snd colorg) ;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Algorithme pour calculer le *stable maximum* d'un graphe d'intervallesOn répond ici à la question 7.> « Proposer un algorithme efficace pour construire un stable maximum (i.e., un ensemble de sommets indépendants) d'un graphe d’intervalles dont on connaı̂t une représentation sous forme d'intervalles.> On pourra chercher à quelle condition l'intervalle dont l'extrémité droite est la plus à gauche appartient à un stable maximum. » **FIXME, je ne l'ai pas encore fait.** ---- ExemplesOn traite ici l'exemple introductif, ainsi que les trois autres exemples proposés. Qui a tué le Duc de Densmore ?> On ne rappelle pas le problème, mais les données :> - Ann dit avoir vu Betty, Cynthia, Emily, Felicia et Georgia.- Betty dit avoir vu Ann, Cynthia et Helen.- Cynthia dit avoir vu Ann, Betty, Diana, Emily et Helen.- Diana dit avoir vu Cynthia et Emily.- Emily dit avoir vu Ann, Cynthia, Diana et Felicia.- Felicia dit avoir vu Ann et Emily.- Georgia dit avoir vu Ann et Helen.- Helen dit avoir vu Betty, Cynthia et Georgia.Transcrit sous forme de graphe, cela donne :
(* On définit des entiers, c'est plus simple *) let ann = 0 and betty = 1 and cynthia = 2 and diana = 3 and emily = 4 and felicia = 5 and georgia = 6 and helen = 7;; let graphe_densmore = [ [betty; cynthia; emily; felicia; georgia]; (* Ann *) [ann; cynthia; helen]; (* Betty *) [ann; betty; diana; emily; helen]; (* Cynthia *) [cynthia; emily]; (* Diana *) [ann; cynthia; diana; felicia]; (* Emily *) [ann; emily]; (* Felicia *) [ann; helen]; (* Georgia *) [betty; cynthia; georgia] (* Helen *) ];;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
![images/densmore.png](images/densmore.png)> Figure 1. Graphe d'intervalle pour le problème de l'assassinat du duc de Densmore. Avec les prénoms plutôt que des numéros, cela donne : ![images/densmore2.png](images/densmore2.png)> Figure 2. Graphe d'intervalle pour le problème de l'assassinat du duc de Densmore. Comment résoudre ce problème ?> Il faut utiliser la caractérisation du théorème 2 du texte, et la définition des graphes parfaits.- Définition + Théorème 2 (point 1) :On sait qu'un graphe d'intervalle est parfait, et donc tous ses graphes induits le sont aussi.La caractérisation via les cordes sur les cycles de taille $\geq 4$ permet de dire qu'un quadrilatère (cycle de taille $4$) n'est pas un graphe d'intervalle.Donc un graphe qui contient un graphe induit étant un quadrilatère ne peut être un graphe d'intervalle.Ainsi, sur cet exemple, comme on a deux quadrilatères $A B H G$ et $A G H C$, on en déduit que $A$, $G$, ou $H$ ont menti.- Théorème 2 (point 2) :Ensuite, si on enlève $G$ ou $H$, le graphe ne devient pas un graphe d'intervalle, par les considérations suivantes, parce que son complémentaire n'est pas un graphe de comparaison.En effet, par exemple si on enlève $G$, $A$ et $H$ et $D$ forment une clique dans le complémentaire $\overline{G}$ de $G$, et l'irréflexivité d'une éventuelle relation $R$ rend cela impossible. Pareil si on enlève $H$, avec $G$ et $B$ et $D$ qui formet une clique dans $\overline{G}$.Par contre, si on enlève $A$, le graphe devient triangulé (et de comparaison, mais c'est plus dur à voir !).Donc seule $A$ reste comme potentielle menteuse. > « Mais... Ça semble difficile de programmer une résolution automatique de ce problème ? »En fait, il suffit d'écrire une fonction de vérification qu'un graphe est un graphe d'intervalle, puis on essaie d'enlever chaque sommet, tant que le graphe n'est pas un graphe d'intervalle.Si le graphe devient valide en enlevant un seul sommet, et qu'il n'y en a qu'un seul qui fonctionne, alors il y a un(e) seul(e) menteur(se) dans le graphe, et donc un(e) seul(e) coupable ! SolutionC'est donc $A$, i.e., Ann l'unique menteuse et donc la coupable.> Ce n'est pas grave de ne pas avoir réussi à répondre durant l'oral !> Au contraire, vous avez le droit de vous détacher du problème initial du texte ! > Une solution bien expliquée peut être trouvée dans [cette vidéo](https://youtu.be/ZGhSyVvOelg) : Le problème des frigos> Dans un grand hopital, les réductions de financement public poussent le gestionnaire du service d'immunologie à faire des économies sur le nombre de frigos à acheter pour stocker les vaccins. A peu de chose près, il lui faut stocker les vaccins suivants :> | Numéro | Nom du vaccin | Température de conservation| :-----: | :------------ | -------------------------: || 0 | Rougeole-Rubéole-Oreillons (RRO) | $4 \cdots 12$ °C| 1 | BCG | $8 \cdots 15$ °C| 2 | Di-Te-Per | $0 \cdots 20$ °C| 3 | Anti-polio | $2 \cdots 3$ °C| 4 | Anti-hépatite B | $-3 \cdots 6$ °C| 5 | Anti-amarile | $-10 \cdots 10$ °C| 6 | Variole | $6 \cdots 20$ °C| 7 | Varicelle | $-5 \cdots 2$ °C| 8 | Antihaemophilus | $-2 \cdots 8$ °C> Combien le gestionaire doit-il acheter de frigos, et sur quelles températures doit-il les régler ?
let vaccins : intervalles = [ (4, 12); (8, 15); (0, 20); (2, 3); (-3, 6); (-10, 10); (6, 20); (-5, 2); (-2, 8) ]
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Qu'on peut visualiser sous forme de graphe facilement :
let graphe_vaccins = graphe_depuis_intervalles vaccins;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
![images/vaccins.png](images/vaccins.png)> Figure 3. Graphe d'intervalle pour le problème des frigos et des vaccins. Avec des intervalles au lieu de numéro : ![images/vaccins2.png](images/vaccins2.png)> Figure 4. Graphe d'intervalle pour le problème des frigos et des vaccins. On peut récupérer une coloriage minimal pour ce graphe :
coloriage_intervalles vaccins;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
La couleur la plus grande est `5`, donc le nombre chromatique de ce graphe est `6`.
nombre_chromatique (coloriage_intervalles vaccins);;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Par contre, la solution au problème des frigos et des vaccins réside dans le nombre de couverture de cliques, $k(G)$, pas dans le nombre chromatique $\chi(G)$.On peut le résoudre en répondant à la question 7, qui demandait de mettre au point un algorithme pour construire un *stable maximum* pour un graphe d'intervalle. Le problème du CSA> Le Conseil Supérieur de l’Audiovisuel doit attribuer de nouvelles bandes de fréquences d’émission pour la stéréophonie numérique sous-terraine (SNS).> Cette technologie de pointe étant encore à l'état expérimental, les appareils capables d'émettre ne peuvent utiliser que les bandes de fréquences FM suivantes :> | Bandes de fréquence | Intervalle (kHz) || :-----------------: | ---------: || 0 | $32 \cdots 36$ || 1 | $24 \cdots 30$ || 2 | $28 \cdots 33$ || 3 | $22 \cdots 26$ || 4 | $20 \cdots 25$ || 5 | $30 \cdots 33$ || 6 | $31 \cdots 34$ || 7 | $27 \cdots 31$ |> Quelles bandes de fréquences doit-on retenir pour permettre à le plus d'appareils possibles d'être utilisés, sachant que deux appareils dont les bandes de fréquences s'intersectent pleinement (pas juste sur les extrémités) sont incompatibles.
let csa : intervalles = [ (32, 36); (24, 30); (28, 33); (22, 26); (20, 25); (30, 33); (31, 34); (27, 31) ];; let graphe_csa = graphe_depuis_intervalles csa;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
![images/csa.png](images/csa.png)> Figure 5. Graphe d'intervalle pour le problème du CSA. Avec des intervalles au lieu de numéro : ![images/csa2.png](images/csa2.png)> Figure 6. Graphe d'intervalle pour le problème du CSA. On peut récupérer une coloriage minimal pour ce graphe :
coloriage_intervalles csa;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
La couleur la plus grande est `3`, donc le nombre chromatique de ce graphe est `4`.
nombre_chromatique (coloriage_intervalles csa);;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Par contre, la solution au problème CSA réside dans le nombre de couverture de cliques, $k(G)$, pas dans le nombre chromatique $\chi(G)$.On peut le résoudre en répondant à la question 7, qui demandait de mettre au point un algorithme pour construire un *stable maximum* pour un graphe d'intervalle. Le problème du wagon restaurant> Le chef de train de l'Orient Express doit aménager le wagon restaurant avant le départ du train. Ce wagon est assez petit et doit être le moins encombré de tables possibles, mais il faut prévoir suffisemment de tables pour accueillir toutes personnes qui ont réservé :> | Numéro | Personnage(s) | Heures de dîner | En secondes || :----------------- | --------- | :---------: | :---------: || 0 | Le baron et la baronne Von Haussplatz | 19h30 .. 20h14 | $1170 \cdots 1214$| 1 | Le général Cook | 20h30 .. 21h59 | $1230 \cdots 1319$| 2 | Les époux Steinberg | 19h .. 19h59 | $1140 \cdots 1199$| 3 | La duchesse de Colombart | 20h15 .. 20h59 | $1215 \cdots 1259$| 4 | Le marquis de Carquamba | 21h .. 21h59 | $1260 \cdots 1319$| 5 | La Vociafiore | 19h15 .. 20h29 | $1155 \cdots 1229$| 6 | Le colonel Ferdinand | 20h .. 20h59 | $1200 \cdots 1259$> Combien de tables le chef de train doit-il prévoir ?
let restaurant = [ (1170, 1214); (1230, 1319); (1140, 1199); (1215, 1259); (1260, 1319); (1155, 1229); (1200, 1259) ];; let graphe_restaurant = graphe_depuis_intervalles restaurant;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
![images/restaurant.png](images/restaurant.png)> Figure 7. Graphe d'intervalle pour le problème du wagon restaurant. Avec des intervalles au lieu de numéro : ![images/restaurant2.png](images/restaurant2.png)> Figure 8. Graphe d'intervalle pour le problème du wagon restaurant.
coloriage_intervalles restaurant;;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
La couleur la plus grande est `2`, donc le nombre chromatique de ce graphe est `3`.
nombre_chromatique (coloriage_intervalles restaurant);;
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Solution via l'algorithme de coloriage de graphe d'intervallesPour ce problème là, la solution est effectivement donnée par le nombre chromatique.La couleur sera le numéro de table pour chaque passagers (ou couple de passagers), et donc le nombre minimal de table à installer dans le wagon restaurant est exactement le nombre chromatique.Une solution peut être la suivante, avec **3 tables** :| Numéro | Personnage(s) | Heures de dîner | Numéro de table || :----------------- | --------- | :---------: | :---------: || 0 | Le baron et la baronne Von Haussplatz | 19h30 .. 20h14 | 2| 1 | Le général Cook | 20h30 .. 21h59 | 1| 2 | Les époux Steinberg | 19h .. 19h59 | 0| 3 | La duchesse de Colombart | 20h15 .. 20h59 | 2| 4 | Le marquis de Carquamba | 21h .. 21h59 | 0| 5 | La Vociafiore | 19h15 .. 20h29 | 1| 6 | Le colonel Ferdinand | 20h .. 20h59 | 0On vérifie manuellement que la solution convient.Chaque passager devra quitter sa tableau à la minute près par contre ! On peut afficher la solution avec un graphe colorié.La table `0` sera rouge, `1` sera bleu et `2` sera jaune : ![images/restaurant3.png](images/restaurant3.png)> Figure 9. Solution pour le problème du wagon restaurant. ---- Bonus ? Visualisation des graphes définis dans les exemples- J'utilise une petite fonction facile à écrire, qui convertit un graphe (`int list list`) en une chaîne de caractère au format [DOT Graph](http://www.graphviz.org/doc/info/lang.html).- Ensuite, un appel `dot -Tpng ...` en ligne de commande convertit ce graphe en une image, que j'inclus ensuite manuellement.
(** Transforme un [graph] en une chaîne représentant un graphe décrit par le langage DOT, voir http://en.wikipedia.org/wiki/DOT_language pour plus de détails sur ce langage. @param graphname Donne le nom du graphe tel que précisé pour DOT @param directed Vrai si le graphe doit être dirigé (c'est le cas ici) faux sinon. Change le style des arêtes ([->] ou [--]) @param verb Affiche tout dans le terminal. @param onetoone Si on veut afficher le graphe en mode carré (échelle 1:1). Parfois bizarre, parfois génial. *) let graph_to_dotgraph ?(graphname = "graphname") ?(directed = false) ?(verb = false) ?(onetoone = false) (glist : int list list) = let res = ref "" in let log s = if verb then print_string s; (* Si [verb] affiche dans le terminal le résultat du graphe. *) res := !res ^ s in log (if directed then "digraph " else "graph "); log graphname; log " {"; if onetoone then log "\n size=\"1,1\";"; let g = Array.of_list (List.map Array.of_list glist) in (* On affiche directement les arc, un à un. *) for i = 0 to (Array.length g) - 1 do for j = 0 to (Array.length g.(i)) - 1 do if i < g.(i).(j) then log ("\n \"" ^ (string_of_int i) ^ "\" " ^ (if directed then "->" else "--") ^ " \"" ^ (string_of_int g.(i).(j)) ^ "\"" ); done; done; log "\n}\n// generated by OCaml with the function graphe_to_dotgraph."; !res;; (** Fonction ecrire_sortie : plus pratique que output. *) let ecrire_sortie monoutchanel machaine = output monoutchanel machaine 0 (String.length machaine); flush monoutchanel;; (** Fonction ecrire_dans_fichier : pour écrire la chaine dans le fichier à l'adresse renseignée. *) let ecrire_dans_fichier ~chaine ~adresse = let mon_out_channel = open_out adresse in ecrire_sortie mon_out_channel chaine; close_out mon_out_channel;; let s_graphe_densmore = graph_to_dotgraph ~graphname:"densmore" ~directed:false ~verb:false graphe_densmore;; let s_graphe_vaccins = graph_to_dotgraph ~graphname:"vaccins" ~directed:false ~verb:false graphe_vaccins;; let s_graphe_csa = graph_to_dotgraph ~graphname:"csa" ~directed:false ~verb:false graphe_csa;; let s_graphe_restaurant = graph_to_dotgraph ~graphname:"restaurant" ~directed:false ~verb:false graphe_restaurant;; ecrire_dans_fichier ~chaine:s_graphe_densmore ~adresse:"/tmp/densmore.dot" ;; (* Sys.command "fdp -Tpng /tmp/densmore.dot > images/densmore.png";; *) ecrire_dans_fichier ~chaine:s_graphe_vaccins ~adresse:"/tmp/vaccins.dot" ;; (* Sys.command "fdp -Tpng /tmp/vaccins.dot > images/vaccins.png";; *) ecrire_dans_fichier ~chaine:s_graphe_csa ~adresse:"/tmp/csa.dot" ;; (* Sys.command "fdp -Tpng /tmp/csa.dot > images/csa.png";; *) ecrire_dans_fichier ~chaine:s_graphe_restaurant ~adresse:"/tmp/restaurant.dot" ;; (* Sys.command "fdp -Tpng /tmp/restaurant.dot > images/restaurant.png";; *)
_____no_output_____
MIT
agreg/Crime_parfait.ipynb
doc22940/notebooks-2
Produit matriciel avec une matrice creuseLes dictionnaires sont une façon assez de représenter les matrices creuses en ne conservant que les coefficients non nuls. Comment écrire alors le produit matriciel ?
from jyquickhelper import add_notebook_menu add_notebook_menu()
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Matrice creuse et dictionnaireUne [matrice creuse](https://fr.wikipedia.org/wiki/Matrice_creuse) ou [sparse matrix](https://en.wikipedia.org/wiki/Sparse_matrix) est constituée majoritairement de 0. On utilise un dictionnaire avec les coefficients non nuls. La fonction suivante pour créer une matrice aléatoire.
import random def random_matrix(n, m, ratio=0.1): mat = {} nb = min(n * m, int(ratio * n * m + 0.5)) while len(mat) < nb: i = random.randint(0, n-1) j = random.randint(0, m-1) mat[i, j] = 1 return mat mat = random_matrix(3, 3, ratio=0.5) mat
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Calcul de la dimensionPour obtenir la dimension de la matrice, il faut parcourir toutes les clés du dictionnaire.
def dimension(mat): maxi, maxj = 0, 0 for k in mat: maxi = max(maxi, k[0]) maxj = max(maxj, k[1]) return maxi + 1, maxj + 1 dimension(mat)
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Cette fonction possède l'inconvénient de retourner une valeur fausse si la matrice ne possède aucun coefficient non nul sur la dernière ligne ou la dernière colonne. Cela peut être embarrassant, tout dépend de l'usage. Produit matriciel classiqueOn implémente le produit matriciel classique, à trois boucles.
def produit_classique(m1, m2): dim1 = dimension(m1) dim2 = dimension(m2) if dim1[1] != dim2[0]: raise Exception("Impossible de multiplier {0}, {1}".format( dim1, dim2)) res = {} for i in range(dim1[0]): for j in range(dim2[1]): s = 0 for k in range(dim1[1]): s += m1.get((i, k), 0) * m2.get((k, j), 0) if s != 0: # Pour éviter de garder les coefficients non nuls. res[i, j] = s return res simple = {(0, 1): 1, (1, 0): 1} produit_classique(simple, simple)
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Sur la matrice aléatoire...
produit_classique(mat, mat)
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Produit matriciel plus élégantA-t-on vraiment besoin de s'enquérir des dimensions de la matrice pour faire le produit matriciel ? Ne peut-on pas tout simplement faire une boucle sur les coefficients non nul ?
def produit_elegant(m1, m2): res = {} for (i, k1), v1 in m1.items(): if v1 == 0: continue for (k2, j), v2 in m2.items(): if v2 == 0: continue if k1 == k2: if (i, j) in res: res[i, j] += v1 * v2 else : res[i, j] = v1 * v2 return res produit_elegant(simple, simple) produit_elegant(mat, mat)
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Mesure du tempsA priori, la seconde méthode est plus rapide puisque son coût est proportionnel au produit du nombre de coefficients non nuls dans les deux matrices. Vérifions.
bigmat = random_matrix(100, 100) %timeit produit_classique(bigmat, bigmat) %timeit produit_elegant(bigmat, bigmat)
157 ms ± 9.33 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
C'est beaucoup mieux. Mais peut-on encore faire mieux ? Dictionnaires de dictionnairesCa sonne un peu comme [mille millions de mille sabords](https://fr.wikipedia.org/wiki/Vocabulaire_du_capitaine_Haddock) mais le dictionnaire que nous avons créé a pour clé un couple de coordonnées et valeur des coefficients. La fonction ``produit_elegant`` fait plein d'itérations inutiles en quelque sorte puisque les coefficients sont nuls. Peut-on éviter ça ?Et si on utilisait des dictionnaires de dictionnaires : ``{ ligne : { colonne : valeur } }``.
def matrice_dicodico(mat): res = {} for (i, j), v in mat.items(): if i not in res: res[i] = {j: v} else: res[i][j] = v return res matrice_dicodico(simple)
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Peut-on adapter le calcul matriciel élégant ? Il reste à associer les indices de colonnes de la première avec les indices de lignes de la seconde. Cela pose problème en l'état quand les indices de colonnes sont inaccessibles sans connaître les indices de lignes d'abord à moins d'échanger l'ordre pour la seconde matrice.
def matrice_dicodico_lc(mat, ligne=True): res = {} if ligne: for (i, j), v in mat.items(): if i not in res: res[i] = {j: v} else: res[i][j] = v else: for (j, i), v in mat.items(): if i not in res: res[i] = {j: v} else: res[i][j] = v return res matrice_dicodico_lc(simple, ligne=False)
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Maintenant qu'on a fait ça, on peut songer au produit matriciel.
def produit_elegant_rapide(m1, m2): res = {} for k, vs in m1.items(): if k in m2: for i, v1 in vs.items(): for j, v2 in m2[k].items(): if (i, j) in res: res[i, j] += v1 * v2 else : res[i, j] = v1 * v2 return res m1 = matrice_dicodico_lc(simple, ligne=False) m2 = matrice_dicodico_lc(simple) produit_elegant_rapide(m1, m2) m1 = matrice_dicodico_lc(mat, ligne=False) m2 = matrice_dicodico_lc(mat) produit_elegant_rapide(m1, m2)
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
On mesure le temps avec une grande matrice.
m1 = matrice_dicodico_lc(bigmat, ligne=False) m2 = matrice_dicodico_lc(bigmat) %timeit produit_elegant_rapide(m1, m2)
6.46 ms ± 348 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Beaucoup plus rapide, il n'y a plus besoin de tester les coefficients non nuls. La comparaison n'est pas très juste néanmoins car il faut transformer les deux matrices avant de faire le calcul. Et si on l'incluait ?
def produit_elegant_rapide_transformation(mat1, mat2): m1 = matrice_dicodico_lc(mat1, ligne=False) m2 = matrice_dicodico_lc(mat2) return produit_elegant_rapide(m1, m2) produit_elegant_rapide_transformation(simple, simple) %timeit produit_elegant_rapide_transformation(bigmat, bigmat)
7.17 ms ± 635 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
Finalement ça vaut le coup... mais est-ce le cas pour toutes les matrices.
%matplotlib inline import time mesures = [] for ratio in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.99]: big = random_matrix(100, 100, ratio=ratio) t1 = time.perf_counter() produit_elegant_rapide_transformation(big, big) t2 = time.perf_counter() dt = (t2 - t1) obs = {"dicodico": dt, "ratio": ratio} if ratio <= 0.3: # après c'est trop lent t1 = time.perf_counter() produit_elegant(big, big) t2 = time.perf_counter() dt = (t2 - t1) obs["dico"] = dt t1 = time.perf_counter() produit_classique(big, big) t2 = time.perf_counter() dt = (t2 - t1) obs["classique"] = dt mesures.append(obs) print(obs) from pandas import DataFrame df = DataFrame(mesures) ax = df.plot(x="ratio", y="dicodico", label="dico dico") df.plot(x="ratio", y="dico", label="dico", ax=ax) df.plot(x="ratio", y="classique", label="classique", ax=ax) ax.legend();
_____no_output_____
MIT
_doc/notebooks/td1a/matrix_dictionary.ipynb
Jerome-maker/ensae_teaching_cs
__Callbacks API__A __callback__ is an object that can perform actions at various stages of training (e.g. at the start or end of an epoch, before or after a single batch, etc)._You can use callbacks to:_- Write TensorBoard logs after every batch of training to monitor your metrics.- Periodically save your model to disk.- Do early stopping.- Get a view on internal states and statistics of a model during training...and more __Usage of callbacks via the built-in `fit()` loop__You can pass a list of callbacks (as the keyword argument callbacks) to the `.fit()` method of a model:```my_callbacks = [ tf.keras.callbacks.EarlyStopping(patience=2), tf.keras.callbacks.ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.h5'), tf.keras.callbacks.TensorBoard(log_dir='./logs'),]model.fit(dataset, epochs=10, callbacks=my_callbacks)```The relevant methods of the callbacks will then be called at each stage of the training. __Using custom callbacks__Creating new callbacks is a simple and powerful way to customize a training loop. Learn more about creating new callbacks in the guide [__Writing your own Callbacks__](https://keras.io/guides/writing_your_own_callbacks/), and refer to the documentation for the [__base Callback class__](https://keras.io/api/callbacks/base_callback/). __Available callbacks__```- Base Callback class- ModelCheckpoint- TensorBoard- EarlyStopping- LearningRateScheduler- ReduceLROnPlateau- RemoteMonitor- LambdaCallback- TerminateOnNaN- CSVLogger- ProgbarLogger``` > [__Writing your own callbacks__](https://www.tensorflow.org/guide/keras/custom_callback)
import tensorflow as tf # Defining the callback class class myCallback(tf.keras.callbacks.Callback): def on_epoch_end(self, epoch, logs={}): if(logs.get('accuracy')>0.6): print("\nReached 60% accuracy so cancelling training!") self.model.stop_training = True mnist = tf.keras.datasets.fashion_mnist (x_train, y_train),(x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 callbacks = myCallback() model = tf.keras.models.Sequential([ tf.keras.layers.Flatten(input_shape=(28, 28)), tf.keras.layers.Dense(512, activation=tf.nn.relu), tf.keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer=tf.optimizers.Adam(), loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-labels-idx1-ubyte.gz 32768/29515 [=================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/train-images-idx3-ubyte.gz 26427392/26421880 [==============================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-labels-idx1-ubyte.gz 8192/5148 [===============================================] - 0s 0us/step Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/t10k-images-idx3-ubyte.gz 4423680/4422102 [==============================] - 0s 0us/step Epoch 1/10 1866/1875 [============================>.] - ETA: 0s - loss: 0.4722 - accuracy: 0.8303 Reached 60% accuracy so cancelling training! 1875/1875 [==============================] - 7s 4ms/step - loss: 0.4723 - accuracy: 0.8302
MIT
03_callbacks.ipynb
mohd-faizy/03_TensorFlow_In_Practice
Loan predictions Problem StatementWe want to automate the loan eligibility process based on customer details that are provided as online application forms are being filled. You can find the dataset [here](https://drive.google.com/file/d/1h_jl9xqqqHflI5PsuiQd_soNYxzFfjKw/view?usp=sharing). These details concern the customer's Gender, Marital Status, Education, Number of Dependents, Income, Loan Amount, Credit History and other things as well. |Variable| Description||: ------------- |:-------------||Loan_ID| Unique Loan ID||Gender| Male/ Female||Married| Applicant married (Y/N)||Dependents| Number of dependents||Education| Applicant Education (Graduate/ Under Graduate)||Self_Employed| Self employed (Y/N)||ApplicantIncome| Applicant income||CoapplicantIncome| Coapplicant income||LoanAmount| Loan amount in thousands||Loan_Amount_Term| Term of loan in months||Credit_History| credit history meets guidelines||Property_Area| Urban/ Semi Urban/ Rural||Loan_Status| Loan approved (Y/N) Explore the problem in following stages:1. Hypothesis Generation – understanding the problem better by brainstorming possible factors that can impact the outcome2. Data Exploration – looking at categorical and continuous feature summaries and making inferences about the data.3. Data Cleaning – imputing missing values in the data and checking for outliers4. Feature Engineering – modifying existing variables and creating new ones for analysis5. Model Building – making predictive models on the data 1. Hypothesis GenerationGenerating a hypothesis is a major step in the process of analyzing data. This involves understanding the problem and formulating a meaningful hypothesis about what could potentially have a good impact on the outcome. This is done BEFORE looking at the data, and we end up creating a laundry list of the different analyses which we can potentially perform if data is available. Possible hypothesesWhich applicants are more likely to get a loan1. Applicants having a credit history 2. Applicants with higher applicant and co-applicant incomes3. Applicants with higher education level4. Properties in urban areas with high growth perspectivesDo more brainstorming and create some hypotheses of your own. Remember that the data might not be sufficient to test all of these, but forming these enables a better understanding of the problem.
import pandas as pd import numpy as np from matplotlib import pyplot as plt df = pd.read_csv('data.csv') df.head(10) df.shape
_____no_output_____
MIT
clf.ipynb
Ruslion/Predicting-loan-eligibility
2. Data ExplorationLet's do some basic data exploration here and come up with some inferences about the data. Go ahead and try to figure out some irregularities and address them in the next section. One of the key challenges in any data set are missing values. Lets start by checking which columns contain missing values.
df.isnull().sum()
_____no_output_____
MIT
clf.ipynb
Ruslion/Predicting-loan-eligibility
Look at some basic statistics for numerical variables.
df.dtypes df.nunique()
_____no_output_____
MIT
clf.ipynb
Ruslion/Predicting-loan-eligibility
1. How many applicants have a `Credit_History`? (`Credit_History` has value 1 for those who have a credit history and 0 otherwise)2. Is the `ApplicantIncome` distribution in line with your expectation? Similarly, what about `CoapplicantIncome`?3. Tip: Can you see a possible skewness in the data by comparing the mean to the median, i.e. the 50% figure of a feature. Let's discuss nominal (categorical) variable. Look at the number of unique values in each of them. Explore further using the frequency of different categories in each nominal variable. Exclude the ID obvious reasons. Distribution analysisStudy distribution of various variables. Plot the histogram of ApplicantIncome, try different number of bins. Look at box plots to understand the distributions. Look at the distribution of income segregated by `Education` Look at the histogram and boxplot of LoanAmount There might be some extreme values. Both `ApplicantIncome` and `LoanAmount` require some amount of data munging. `LoanAmount` has missing and well as extreme values values, while `ApplicantIncome` has a few extreme values, which demand deeper understanding. Categorical variable analysisTry to understand categorical variables in more details using `pandas.DataFrame.pivot_table` and some visualizations. 3. Data CleaningThis step typically involves imputing missing values and treating outliers. Imputing Missing ValuesMissing values may not always be NaNs. For instance, the `Loan_Amount_Term` might be 0, which does not make sense. Impute missing values for all columns. Use the values which you find most meaningful (mean, mode, median, zero.... maybe different mean values for different groups)
from sklearn.impute import SimpleImputer from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer from sklearn.metrics import accuracy_score from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder from sklearn.model_selection import GridSearchCV from sklearn.svm import SVC from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.compose import make_column_selector import pickle ohe=OneHotEncoder(drop='first', sparse=False) X= df.drop(columns=['Loan_Status', 'Loan_ID']) y=df['Loan_Status'] #y=ohe.fit_transform(df['Loan_Status'].to_numpy().reshape(-1, 1)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) column_trans = ColumnTransformer( [('imp_most_frequent', SimpleImputer(strategy='most_frequent'), ['Gender', 'Married', 'Dependents', 'Self_Employed', 'Property_Area', 'Education', 'Credit_History']), ('imp_median', SimpleImputer(strategy='median'), ['LoanAmount', 'Loan_Amount_Term']), ('scaling', StandardScaler(), make_column_selector(dtype_include=np.number)) ] ) column_enc = ColumnTransformer([('one_hot_enc', OneHotEncoder(handle_unknown='ignore'), [0, 1, 2, 3, 4, 10])]) pipeline = Pipeline(steps=[('column_transf', column_trans), ('column_enc', column_enc), ('classifier', SVC(random_state = 17))]) # Find the best hyperparameters using GridSearchCV on the train set param_grid = [ {'classifier':(SVC(random_state = 17),), 'classifier__C': [0.5, 1, 2, 4], 'classifier__kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'classifier__degree': [2, 3], 'classifier__gamma':['scale', 'auto']}, {'classifier':(LogisticRegression(random_state = 17),), 'classifier__penalty':['l1', 'l2', 'elasticnet'], 'classifier__C': [0.5, 1, 2, 4], 'classifier__solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'] }] grid = GridSearchCV(pipeline, param_grid=param_grid, verbose=2, n_jobs=5, scoring='accuracy') grid.fit(X_train, y_train) grid.best_params_ grid.best_score_ y_pred=grid.predict(X_test) accuracy_score(y_test, y_pred) pipeline = Pipeline(steps=[('column_transf', column_trans), ('column_enc', column_enc), ('classifier', grid.best_params_['classifier'])]) pipeline.fit(X_train, y_train) with open('myfile.pickle', 'wb') as file_handle: pickle.dump(pipeline, file_handle)
_____no_output_____
MIT
clf.ipynb
Ruslion/Predicting-loan-eligibility
Looking at the randomness (or otherwise) of mouse behaviour Also, the randomness (or otherwise) of trial types to know when best to start looking at 'full task' behaviour
# Import libraries import matplotlib.pyplot as plt %matplotlib inline import pandas as pd import seaborn as sns import random import copy import numpy as np from scipy.signal import resample from scipy.stats import zscore from scipy import interp from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn import metrics from sklearn import cross_validation # Load data # data loading function def data_load_and_parse(mouse_name): tt = pd.read_csv('~/work/whiskfree/data/trialtype_' + mouse_name + '.csv',header=None) ch = pd.read_csv('~/work/whiskfree/data/choice_' + mouse_name + '.csv',header=None) sess = pd.read_csv('~/work/whiskfree/data/session_' + mouse_name + '.csv',header=None) AB = pd.read_csv('~/work/whiskfree/data/AB_' + mouse_name + '.csv',header=None) clean1 = np.nan_to_num(tt) !=0 clean2 = np.nan_to_num(ch) !=0 clean = clean1&clean2 tt_c = tt[clean].values ch_c = ch[clean].values s_c = sess[clean].values ab_c = AB[clean].values return tt_c, ch_c, clean, s_c, ab_c mouse_name = '36_r' tt, ch, clean, sess, AB = data_load_and_parse(mouse_name) # work out AB/ON trials AB_pol = np.nan_to_num(AB) !=0 ON_pol = np.nan_to_num(AB) ==0 cm_AB = confusion_matrix(tt[AB_pol],ch[AB_pol]) cm_ON = confusion_matrix(tt[ON_pol],ch[ON_pol]) print(cm_AB) print(cm_ON) print(accuracy_score(tt[AB_pol],ch[AB_pol])) print(accuracy_score(tt[ON_pol],ch[ON_pol])) # Format TT/ choice data and plot fig, ax = plt.subplots(2,1,figsize=(20,5)) _ = ax[0].plot(tt[ON_pol][:100],label='TT ON') _ = ax[0].plot(ch[ON_pol][:100],label='Ch ON') ax[0].legend() _ = ax[1].plot(tt[AB_pol][:100],label='TT AB') _ = ax[1].plot(ch[AB_pol][:100],label='Ch AB') ax[1].legend() # Measure randomness and plot that # First plot cumsum of trial types. Periods of bias (of choice 1 and 3, anyway) will be seen as deviations from the mean line plt.plot(np.cumsum(tt[AB_pol][:100]),label='Cumsum TT AB') plt.plot(np.cumsum(ch[AB_pol][:100]),label='Cumsum Ch AB') plt.plot([0,99],[0,np.sum(tt[AB_pol][:100])],label='Mean cumsum') plt.legend() # How about looking at the distribution of individual states, pairs, triples. # Compare to random sequence (with no conditions) P_i = np.zeros(3) P_i[0] = len(tt[tt[AB_pol]==1]) P_i[1] = len(tt[tt[AB_pol]==2]) P_i[2] = len(tt[tt[AB_pol]==3]) with sns.axes_style("white"): _ = plt.imshow(np.expand_dims(P_i/sum(P_i),axis=0),interpolation='none') for j in range(0,3): plt.text(j, 0, P_i[j]/sum(P_i), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) # _ = ax[1].bar([0,1,2],P_i/sum(P_i)) # Pairs and triples (in dumb O(n) format) P_ij = np.zeros([3,3]) P_ijk = np.zeros([3,3,3]) for i in range(len(tt[AB_pol]) - 2): #p_i = tt[AB_pol][i] #p_j = tt[AB_pol][i+1] #p_k = tt[AB_pol][i+2] p_i = ch[AB_pol][i] p_j = ch[AB_pol][i+1] p_k = ch[AB_pol][i+2] P_ij[p_i-1,p_j-1] += 1 P_ijk[p_i-1,p_j-1,[p_k-1]] += 1 cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to green via black with sns.axes_style("white"): plt.imshow(P_ij/np.sum(P_ij),interpolation='none',cmap=cmap) for i in range(0,3): for j in range(0,3): plt.text(j, i, "{0:.2f}".format(P_ij[i,j]/np.sum(P_ij)*9), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) #plt.savefig('figs/graphs/state_transition_matrix_AB'+ mouse_name +'.png') plt.savefig('figs/graphs/choice_state_transition_matrix_AB'+ mouse_name +'.png') # Plot P(state) for all 27 triple states plt.plot(P_ijk_ON.flatten()/np.sum(P_ijk_ON)) plt.plot([0,26],[1/27,1/27],'--') 1/27 import graph_tool.all as gt # Transition probabilities between individual states, pairs, triples g = gt.Graph() g.add_edge_list(np.transpose(P_ij.nonzero())) with sns.axes_style("white"): plt.imshow(P_ij,interpolation='none') g = gt.Graph(directed = True) g.add_vertex(len(P_ij)) edge_weights = g.new_edge_property('double') edge_labels = g.new_edge_property('string') for i in range(P_ij.shape[0]): for j in range(P_ij.shape[1]): e = g.add_edge(i, j) edge_weights[e] = P_ij[i,j] edge_labels[e] = str(P_ij[i,j]) # Fancy drawing code where node colour/size is degree. Edge colour/size is betweenness deg = g.degree_property_map("in") # deg.a = 4 * (np.sqrt(deg.a) * 0.5 + 0.4) deg.a = deg.a*20 print(deg.a) ewidth = edge_weights.a / 10 #ebet.a /= ebet.a.max() / 10. #print(ebet.a) pos = gt.sfdp_layout(g) #control = g.new_edge_property("vector<double>") #for e in g.edges(): # d = np.sqrt(sum((pos[e.source()].a - pos[e.target()].a) ** 2)) # print(d) # control[e] = [10, d, 10,d] #[0.3, d, 0.7, d] cmap = sns.cubehelix_palette(as_cmap=True) # cubehelix cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to green via black # gt.graph_draw(g, pos=pos, vertex_size=deg, vertex_fill_color=deg, vorder=deg, # edge_color=ebet, eorder=eorder, edge_pen_width=ebet, # edge_control_points=control) # some curvy edges # output="graph-draw.pdf") gt.graph_draw(g, pos=pos, vertex_size=deg, vertex_color=deg, vertex_fill_color=deg, edge_color=edge_weights, edge_text=edge_labels, vcmap=cmap,ecmap=cmap, vertex_text=g.vertex_index, vertex_font_size=18,fit_view=0.5) #vcmap=plt.cm.Pastel1,ecmap=plt.cm.Pastel1 ) # edge_control_points=control) # some curvy edges # output="graph-draw.pdf") # Same as g but normalised so total trials/9 = 1 g_n = gt.Graph(directed = True) edge_weights_n = g_n.new_edge_property('double') edge_labels_n = g_n.new_edge_property('string') node_size_n = g_n.new_vertex_property('double') g_n.add_vertex(len(P_ij)) P_ij_n = P_ij /(P_ij.sum()/9) for i in range(P_ij.shape[0]): #v = g_n.add_vertex() node_size_n[i] = 3* sum(P_ij)[i] / np.sum(P_ij) for j in range(P_ij.shape[1]): e = g_n.add_edge(i, j) edge_weights_n[e] = P_ij_n[i,j] edge_labels_n[e] = "{0:.2f}".format(P_ij_n[i,j]) # Minimal drawing code, but with scaled colours/weights for network properties # Line width changes on each loop ATM. Needs fixing.. pos = gt.sfdp_layout(g_n) #deg_n = g_n.degree_property_map("in") # deg.a = 4 * (np.sqrt(deg.a) * 0.5 + 0.4) #deg_n.a = deg_n.a*20 n_size = copy.copy(node_size_n) n_size.a = 50* n_size.a/ max(n_size.a) edge_w = copy.copy(edge_weights_n) edge_w.a = edge_w.a*10 cmap = sns.cubehelix_palette(as_cmap=True) # cubehelix cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to green via black gt.graph_draw(g_n, pos=pos, vertex_color = n_size, vertex_fill_color = n_size, vertex_size = n_size, edge_pen_width=edge_w, edge_color=edge_weights_n, edge_text=edge_labels_n, vcmap=cmap,ecmap=cmap, vertex_text=g_n.vertex_index, vertex_font_size=18, output_size=(600,600), fit_view=0.4, output="figs/graphs/choice_1st_order_transition_AB.pdf") #vcmap=plt.cm.Pastel1,ecmap=plt.cm.Pastel1 ) # edge_control_points=control) # some curvy edges # output="graph-draw.pdf") current_palette = sns.color_palette("cubehelix") current_palette = sns.diverging_palette(220,10, l=50, n=7, center="dark") sns.palplot(current_palette) # Now write a loop to construct a tree-type graph # Same as g but normalised so total trials/9 = 1 t = gt.Graph(directed = False) P_ij_n = P_ij /(P_ij.sum()/9) P_ijk_n = P_ijk /(P_ijk.sum()/27) edge_weights_t = t.new_edge_property('double') edge_labels_t = t.new_edge_property('string') node_labels_t = t.new_vertex_property('string') node_size = t.new_vertex_property('double') h = t.add_vertex() node_labels_t[h] = "0" for i in range(P_ij.shape[0]): v = t.add_vertex() node_labels_t[v] = str(i) e = t.add_edge(h,v) node_size[v] = sum(P_ij_n)[i] *10 for j in range(P_ij.shape[1]): v2 = t.add_vertex() node_labels_t[v2] = str(i) + "-" + str(j) e = t.add_edge(v,v2) edge_weights_t[e] = P_ij_n[i,j]*10 edge_labels_t[e] = "{0:.2f}".format(P_ij_n[i,j]) node_size[v2] = P_ij_n[i,j]*20 for k in range(P_ijk.shape[2]): v3 = t.add_vertex() node_labels_t[v3] = str(i) + "-" + str(j) + "-" + str(k) e2 = t.add_edge(v2,v3) edge_weights_t[e2] = P_ijk_n[i,j,k]*10 edge_labels_t[e2] = "{0:.2f}".format(P_ijk_n[i,j,k]) node_size[v3] = P_ijk_n[i,j,k]*20 #pos = gt.sfdp_layout(t) #pos = gt.fruchterman_reingold_layout(t) pos = gt.radial_tree_layout(t,t.vertex(0)) cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to green via black gt.graph_draw(t,pos=pos,vertex_size=node_size,edge_pen_width=edge_weights_t, vertex_text = node_labels_t, edge_text=edge_labels_t, ecmap=cmap, edge_color = edge_weights_t, vcmap=cmap, vertex_color = node_size,vertex_fill_color = node_size, output_size=(1000, 1000), fit_view=0.8, output="figs/graphs/choice_3_step_statespace_AB.pdf") "{0:.2f}".format(P_ijk[1,1,1]) "{0:.2f}".format(P_ijk[1,1,1]) len(P_ij)
_____no_output_____
MIT
tf/.ipynb_checkpoints/Totally Random-checkpoint.ipynb
mathewzilla/whiskfree
Repeat the trick for ON policy trials
# P_ijk_ON P_ij_ON = np.zeros([3,3]) P_ijk_ON = np.zeros([3,3,3]) for i in range(len(tt[AB_pol]) - 2): # p_i = tt[ON_pol][i] # p_j = tt[ON_pol][i+1] # p_k = tt[ON_pol][i+2] p_i = ch[AB_pol][i] p_j = ch[AB_pol][i+1] p_k = ch[AB_pol][i+2] P_ij_ON[p_i-1,p_j-1] += 1 P_ijk_ON[p_i-1,p_j-1,[p_k-1]] += 1 # Make graph t_ON = gt.Graph(directed = False) P_ij_ON = P_ij_ON /(P_ij_ON.sum()/9) P_ijk_ON = P_ijk_ON /(P_ijk_ON.sum()/27) edge_weights_tON = t_ON.new_edge_property('double') edge_labels_tON = t_ON.new_edge_property('string') node_labels_tON = t_ON.new_vertex_property('string') node_size_ON = t_ON.new_vertex_property('double') h = t_ON.add_vertex() node_labels_tON[h] = "0" for i in range(P_ij_ON.shape[0]): v = t_ON.add_vertex() node_labels_tON[v] = str(i) e = t_ON.add_edge(h,v) node_size_ON[v] = sum(P_ij_ON)[i] *10 for j in range(P_ij_ON.shape[1]): v2 = t_ON.add_vertex() node_labels_tON[v2] = str(i) + "-" + str(j) e = t_ON.add_edge(v,v2) edge_weights_tON[e] = P_ij_ON[i,j]*10 edge_labels_tON[e] = "{0:.2f}".format(P_ij_ON[i,j]) node_size_ON[v2] = P_ij_ON[i,j]*20 for k in range(P_ijk_ON.shape[2]): v3 = t_ON.add_vertex() node_labels_tON[v3] = str(i) + "-" + str(j) + "-" + str(k) e2 = t_ON.add_edge(v2,v3) edge_weights_tON[e2] = P_ijk_ON[i,j,k]*10 edge_labels_tON[e2] = "{0:.2f}".format(P_ijk_ON[i,j,k]) node_size_ON[v3] = P_ijk_ON[i,j,k]*20 # Plot graph pos = gt.radial_tree_layout(t_ON,t_ON.vertex(0)) cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to green via black gt.graph_draw(t_ON,pos=pos,vertex_size=node_size_ON,edge_pen_width=edge_weights_tON, vertex_text = node_labels_tON, edge_text=edge_labels_tON, ecmap=cmap, edge_color = edge_weights_tON, vcmap=cmap, vertex_color = node_size_ON, vertex_fill_color = node_size_ON, output_size=(1000, 1000), fit_view=0.8) # output="figs/graphs/choice_3_step_statespace_AB_"+ mouse_name +".pdf") # image of ON trials transition matrix cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to green via black with sns.axes_style("white"): plt.imshow(P_ij_ON/np.sum(P_ij_ON),interpolation='none',cmap=cmap) for i in range(0,3): for j in range(0,3): plt.text(j, i, "{0:.2f}".format(P_ij_ON[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ylabels = ['Anterior','Posterior','No Go'] plt.xticks([0,1,2],ylabels) plt.yticks([0,1,2],ylabels) # plt.set_yticks([0,1,2]) # plt.set_yticklabels(ylabels) # plt.savefig('figs/graphs/choice_state_transition_matrix_AB_'+ mouse_name +'.png') # Just plot P(state) plt.figure(figsize=(16,2)) ax1 = plt.subplot2grid((1,4),(0,0)) ax1.plot(P_ij_ON.flatten()/np.sum(P_ij_ON) * 9) ax1.plot([0,8],[1,1],'--') state_names = np.empty([3,3],dtype=object) for i in range(0,3): for j in range(0,3): state_names[i,j] = str(i) + "-" + str(j) ax1.set_xticks(range(0,9)) ax1.set_xticklabels(state_names.flatten(),rotation=45) ax2 = plt.subplot2grid((1,4),(0,1),colspan=3) ax2.plot(P_ijk_ON.flatten()/np.sum(P_ijk_ON) * 27) ax2.plot([0,26],[1,1],'--') state_names = np.empty([3,3,3],dtype=object) for i in range(0,3): for j in range(0,3): for k in range(0,3): state_names[i,j,k] = str(i) + "-" + str(j) + "-" + str(k) _ = ax2.set_xticks(range(0,27)) _ = ax2.set_xticklabels(state_names.flatten(),rotation=45) plt.tight_layout() plt.savefig('figs/graphs/CH_state_prob_AB_'+ mouse_name +'.png') from scipy.stats import chisquare # chisquare(P_ij_ON.flatten()) chisquare? # First order transition graph g_ON = gt.Graph(directed = True) edge_weights_ON = g_ON.new_edge_property('double') edge_labels_ON = g_ON.new_edge_property('string') node_size_ON = g_ON.new_vertex_property('double') g_ON.add_vertex(len(P_ij_ON)) for i in range(P_ij_ON.shape[0]): #v = g_n.add_vertex() node_size_ON[i] = 3* sum(P_ij_ON)[i] / np.sum(P_ij_ON) for j in range(P_ij_ON.shape[1]): e = g_ON.add_edge(i, j) edge_weights_ON[e] = P_ij_ON[i,j] edge_labels_ON[e] = "{0:.2f}".format(P_ij_ON[i,j]) # Plot graph pos = gt.sfdp_layout(g_ON) n_size = copy.copy(node_size_ON) n_size.a = 50* n_size.a/ max(n_size.a) edge_w = copy.copy(edge_weights_ON) edge_w.a = edge_w.a*10 cmap = sns.cubehelix_palette(as_cmap=True) # cubehelix cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to red via black gt.graph_draw(g_ON, pos=pos, vertex_color = n_size, vertex_fill_color = n_size, vertex_size = n_size, edge_pen_width=edge_w, edge_color=edge_w, edge_text=edge_labels_ON, vcmap=cmap,ecmap=cmap, vertex_text=g_ON.vertex_index, vertex_font_size=18, output_size=(800, 800), fit_view=0.45, output="figs/graphs/choice_1st_order_transition_ON"+ mouse_name +".pdf")
_____no_output_____
MIT
tf/.ipynb_checkpoints/Totally Random-checkpoint.ipynb
mathewzilla/whiskfree
Finally, transition probabilities for choices - do they follow the trial types? (Actually, let's just re-run the code from above changing tt to ch) Now, let's use graphs to visualise confusion matrices
cm_AB = confusion_matrix(tt[AB_pol],ch[AB_pol]) cm_ON = confusion_matrix(tt[ON_pol],ch[ON_pol]) print(cm_AB) print(cm_ON) print(accuracy_score(tt[AB_pol],ch[AB_pol])) print(accuracy_score(tt[ON_pol],ch[ON_pol])) cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to red via black with sns.axes_style("white"): fig, ax = plt.subplots(1,2) ax[0].imshow(cm_ON/np.sum(cm_ON),interpolation='none',cmap=cmap) ax[1].imshow(cm_AB/np.sum(cm_AB),interpolation='none',cmap=cmap) for i in range(0,3): for j in range(0,3): ax[0].text(j, i, "{0:.2f}".format(cm_ON[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[1].text(j, i, "{0:.2f}".format(cm_AB[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[0].set_title('Mouse ON') ax[1].set_title('Mouse AB') # plt.savefig('figs/graphs/confusion_matrix_AB_'+ mouse_name +'.png')
_____no_output_____
MIT
tf/.ipynb_checkpoints/Totally Random-checkpoint.ipynb
mathewzilla/whiskfree
Should also look at patterns in licking wrt correct/incorrect
for v in g.vertices(): print(v) for e in g.edges(): print(e) 19.19 - 9.92 # gt.graph_draw(g,output_size=(400,400),fit_view=True,output='simple_graph.pdf') gt.graph_draw(g2,output_size=(400,400),fit_view=True) deg. # Stats... len(tt[tt[AB_pol]]) gt.graph_draw?
_____no_output_____
MIT
tf/.ipynb_checkpoints/Totally Random-checkpoint.ipynb
mathewzilla/whiskfree
Load and plot protraction/retraction trial data for one mouse
# quick load and classification of pro/ret data tt = pd.read_csv('~/work/whiskfree/data/tt_36_subset_sorted.csv',header=None) ch = pd.read_csv('~/work/whiskfree/data/ch_36_subset_sorted.csv',header=None) proret = pd.read_csv('~/work/whiskfree/data/proret_36_subset_sorted.csv',header=None) tt = tt.values.reshape(-1,1) ch = ch.values.reshape(-1,1) proret = proret.values.reshape(-1,1) cm = confusion_matrix(tt,ch) print(cm) cm_tt_t = confusion_matrix(tt,proret) cm_ch_t = confusion_matrix(ch,proret) print(cm_tt_t) print(cm_ch_t) plt.imshow(cm_tt_t,interpolation='none') with sns.axes_style("white"): fig, ax = plt.subplots(1,2,figsize=(10,6)) ax[0].imshow(cm_tt_t/np.sum(cm_tt_t),interpolation='none') ax[1].imshow(cm_ch_t/np.sum(cm_ch_t),interpolation='none') for i in range(0,3): for j in range(0,3): ax[0].text(j, i, "{0:.2f}".format(cm_tt_t[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[1].text(j, i, "{0:.2f}".format(cm_ch_t[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) xlabels = ['Retraction','Protraction','No Touch'] ylabels = ['Posterior','Anterior','No Go'] ax[0].set_title('Trialtype | touch type' + '. ' + str(int(100 * accuracy_score(tt,proret))) + '%') ax[1].set_title('Choice | touch type' + '. ' + str(int(100 * accuracy_score(ch,proret))) + '%') ax[0].set_ylabel('Trial type') ax[1].set_ylabel('Choice') for i in range(0,2): ax[i].set_xlabel('Touch type') ax[i].set_xticks([0,1,2]) ax[i].set_xticklabels(xlabels) ax[i].set_yticks([0,1,2]) ax[i].set_yticklabels(ylabels) plt.tight_layout() # plt.savefig('../figs/classification/pro_ret/310816/touchtype_confmatrix_both_32.png') plt.savefig('../figs/classification/pro_ret/36/touchtype_confmatrix_both_36.png') lr_tt = LogisticRegression(solver='lbfgs',multi_class='multinomial') lr_tt.fit(proret,tt) c_tt = lr_tt.predict(proret) print('TT prediction accuracy =',accuracy_score(tt,c_tt)) lr_ch = LogisticRegression(solver='lbfgs',multi_class='multinomial') lr_ch.fit(proret,ch) c_ch = lr_ch.predict(proret) print('Choice prediction accuracy =',accuracy_score(ch,c_ch)) print('Mouse prediction accuracy =',accuracy_score(tt,ch)) print(confusion_matrix(ch,c_ch)) print(confusion_matrix(tt,c_tt)) print(accuracy_score(ch,proret)) print(accuracy_score(tt,proret)) plt.plot(c_ch) # Confusion matrix predicting trial type based on protraction/retraction cm = confusion_matrix(tt,c_tt) cm_m = confusion_matrix(tt,ch) # xlabels = ['Retraction','Protraction','No Touch'] ylabels = ['Posterior','Anterior','No Go'] with sns.axes_style("white"): fig, ax = plt.subplots(1,2,figsize=(10,6)) ax[0].imshow(cm,interpolation='none') for i in range(0,3): for j in range(0,3): ax[0].text(j, i, "{0:.2f}".format(cm[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[0].set_title('Logistic Regression - TT' + '. ' + str(int(100 * accuracy_score(tt,c_tt))) + '%') ax[1].imshow(cm_m,interpolation='none') for i in range(0,3): for j in range(0,3): ax[1].text(j, i, "{0:.2f}".format(cm_m[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[1].set_title('Mouse' + '. ' + str(int(100 * accuracy_score(tt,ch))) + '%') for i in range(0,2): ax[i].set_ylabel('True label') ax[i].set_xlabel('Predicted label') ax[i].set_xticks([0,1,2]) ax[i].set_xticklabels(ylabels) ax[i].set_yticks([0,1,2]) ax[i].set_yticklabels(ylabels) plt.tight_layout() # plt.savefig('../figs/classification/pro_ret/310816/LR_confmatrix_TT_32.png') plt.savefig('../figs/classification/pro_ret/36/LR_confmatrix_TT_36.png') # Confusion matrix predicting choice based on protraction/retraction cm_ch = confusion_matrix(ch,c_ch) cm_m = confusion_matrix(ch,tt) # xlabels = ['Retraction','Protraction','No Touch'] ylabels = ['Posterior','Anterior','No Go'] with sns.axes_style("white"): fig, ax = plt.subplots(1,2,figsize=(10,6)) ax[0].imshow(cm_ch,interpolation='none') for i in range(0,3): for j in range(0,3): ax[0].text(j, i, "{0:.2f}".format(cm_ch[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[0].set_title('Logistic Regression - Ch' + '. ' + str(int(100 * accuracy_score(ch,c_ch))) + '%') ax[1].imshow(cm_m,interpolation='none') for i in range(0,3): for j in range(0,3): ax[1].text(j, i, "{0:.2f}".format(cm_m[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[1].set_title('Mouse' + '. ' + str(int(100 * accuracy_score(ch,tt))) + '%') for i in range(0,2): ax[i].set_ylabel('True label') ax[i].set_xlabel('Predicted label') ax[i].set_xticks([0,1,2]) ax[i].set_xticklabels(ylabels) ax[i].set_yticks([0,1,2]) ax[i].set_yticklabels(ylabels) plt.tight_layout() # plt.savefig('../figs/classification/pro_ret/310816/LR_confmatrix_Ch_32.png') plt.savefig('../figs/classification/pro_ret/36/LR_confmatrix_Ch_36.png') # Correct/incorrect correct = tt==ch errors = tt!=ch cm_c = confusion_matrix(ch[correct],proret[correct]) cm_ic = confusion_matrix(ch[errors],proret[errors]) xlabels = ['Retraction','Protraction','No Touch'] ylabels = ['Posterior','Anterior','No Go'] with sns.axes_style("white"): fig, ax = plt.subplots(1,2,figsize=(10,6)) ax[0].imshow(cm_c,interpolation='none') for i in range(0,3): for j in range(0,3): ax[0].text(j, i, "{0:.2f}".format(cm_c[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[0].set_title('Correct choice | touch type') ax[1].imshow(cm_ic,interpolation='none') for i in range(0,3): for j in range(0,3): ax[1].text(j, i, "{0:.2f}".format(cm_ic[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[1].set_title('Incorrect choice | touch type') for i in range(0,2): ax[i].set_ylabel('Choice') ax[i].set_xlabel('Touch Type') ax[i].set_xticks([0,1,2]) ax[i].set_xticklabels(xlabels) ax[i].set_yticks([0,1,2]) ax[i].set_yticklabels(ylabels) plt.tight_layout() # plt.savefig('../figs/classification/pro_ret/310816/Correct_incorrect_confmatrix_Ch_32.png') plt.savefig('../figs/classification/pro_ret/36/Correct_incorrect_confmatrix_Ch_36.png') # Try graph of trialtype/choice/touchtype plots # P_ijk_ON # import graph_tool.all as gt cm_3 = np.zeros([3,3,3]) for i in range(len(tt) - 2): cm_3[tt[i]-1,proret[i]-1 ,ch[i]-1] += 1 # Make graph cm_G = gt.Graph(directed = False) # trialtypes = ['P','A','NG'] # touchtypes = ['Ret','Pro','NT'] # choices = ['P','A','NG'] trialtypes = ['Posterior','Anterior','No Go'] touchtypes = ['Retraction','Protraction','No Touch'] choices = ['Posterior','Anterior','No Go'] edge_weights_cm_G = cm_G.new_edge_property('double') edge_labels_cm_G = cm_G.new_edge_property('string') node_labels_cm_G = cm_G.new_vertex_property('string') node_size_cm_G = cm_G.new_vertex_property('double') h = cm_G.add_vertex() node_labels_cm_G[h] = "0" for i in range(cm_3.shape[0]): v = cm_G.add_vertex() node_labels_cm_G[v] = trialtypes[i] e = cm_G.add_edge(h,v) node_size_cm_G[v] = np.sum(cm_3[i]) / 4 for j in range(cm_3.shape[1]): v2 = cm_G.add_vertex() node_labels_cm_G[v2] = touchtypes[j] e = cm_G.add_edge(v,v2) edge_weights_cm_G[e] = np.sum(cm_3[i,j]) /4 edge_labels_cm_G[e] = str(int(np.sum(cm_3[i,j]))) node_size_cm_G[v2] = np.sum(cm_3[i,j]) /4 for k in range(cm_3.shape[2]): v3 = cm_G.add_vertex() node_labels_cm_G[v3] = choices[k] e2 = cm_G.add_edge(v2,v3) edge_weights_cm_G[e2] = int(cm_3[i,j,k])/4 edge_labels_cm_G[e2] = str(int(cm_3[i,j,k])) node_size_cm_G[v3] = int(cm_3[i,j,k])/2 # Plot graph pos = gt.radial_tree_layout(cm_G,cm_G.vertex(0)) # cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to green via black cmap =plt.get_cmap('Greys') gt.graph_draw(cm_G,pos=pos,vertex_size=node_size_cm_G,edge_pen_width=edge_weights_cm_G, vertex_text = node_labels_cm_G, #vertex_text_position = 'centered', edge_text=edge_labels_cm_G, vertex_font_size = 22, vertex_font_family = 'sansserif', edge_font_size = 24, edge_font_family = 'sansserif', ecmap=cmap, vcmap=cmap, edge_color = edge_weights_cm_G, vertex_color = node_size_cm_G, vertex_fill_color = node_size_cm_G, output_size=(1500, 1500), fit_view=0.8, # output="../figs/classification/pro_ret/310816/tt_touch_ch_graph_BW_"+ mouse_name +".pdf") output="../figs/classification/pro_ret/36/tt_touch_ch_graph_BW_"+ mouse_name +".pdf") np.sum(cm_3) error_matrix choice_matrix with sns.axes_style("white"): cmap = sns.diverging_palette(220,10, l=70, as_cmap=True, center="dark") # blue to green via black fig, ax = plt.subplots(1,2) ax[0].imshow(error_matrix,interpolation='none',cmap=cmap) for i in range(0,3): for j in range(0,3): ax[0].text(j, i, "{0:.2f}".format(error_matrix[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[0].set_title('Error matrix') # + '. ' + str(int(100 * accuracy_score(tt,c_tt))) + '%') ax[0].set_ylabel('Trial type') ax[0].set_xlabel('Touch type') ax[1].imshow(choice_matrix,interpolation='none',cmap=cmap) for i in range(0,3): for j in range(0,3): ax[1].text(j, i, "{0:.2f}".format(choice_matrix[i,j]), va='center', ha='center',bbox=dict(facecolor='white',edgecolor='white', alpha=0.5)) ax[1].set_title('Choice matrix') # + '. ' + str(int(100 * accuracy_score(tt,ch))) + '%') ax[1].set_ylabel('Choice') ax[1].set_xlabel('Touch type') # plt.savefig('figs/graphs/pro_ret_confmatrix_TT_32_full.png') plt.plot(c_ch) print(confusion_matrix(ch,proret)) print(confusion_matrix(tt,proret))
[[164 54 77] [ 86 241 25] [ 21 114 133]] [[189 15 62] [ 80 236 25] [ 2 158 148]]
MIT
tf/.ipynb_checkpoints/Totally Random-checkpoint.ipynb
mathewzilla/whiskfree
bibliotecas utilizadas Aperte Play para inicializar as bibliotecas
import networkx as nx import matplotlib.pyplot as plt import numpy as np
_____no_output_____
MIT
TRABALHO1GRAFOS.ipynb
haroldosfilho/Python
Entre com o número de vértces do seu grafo e digite enter.
n = input("entre com o numero de vertices:" )
entre com o numero de vertices:5
MIT
TRABALHO1GRAFOS.ipynb
haroldosfilho/Python
aperte o play para tranformar num número inteiro a sua entrada.
num=int(str(n)) print(num)
5
MIT
TRABALHO1GRAFOS.ipynb
haroldosfilho/Python
aperte o play para gerar a lista dos vértices do seu Grafo
G = nx.path_graph(num) list(G.nodes) m = int(input("Entre com o número de arestas : "))
Entre com o número de arestas : 7
MIT
TRABALHO1GRAFOS.ipynb
haroldosfilho/Python
Digite as suas arestas, quais vértices estão conectados, aperte enter após cada aresta informada.
# creating an empty list lst = [] # iterating till the range for i in range(0, m): ele = str(input()) lst.append(ele) # adding the element print(lst)
01 12 13 23 24 34 02 ['01', '12', '13', '23', '24', '34', '02']
MIT
TRABALHO1GRAFOS.ipynb
haroldosfilho/Python
aperte play para gerar uma representação no plano do seu Grafo.
G = nx.Graph(lst) opts = { "with_labels": True, "node_color": 'y' } nx.draw(G, **opts)
_____no_output_____
MIT
TRABALHO1GRAFOS.ipynb
haroldosfilho/Python
aperte o play para gerar os elementos da sua matriz de adjacência do seu Grafo
A = nx.adjacency_matrix(G) print(A)
(0, 1) 1 (0, 2) 1 (1, 0) 1 (1, 2) 1 (1, 3) 1 (2, 0) 1 (2, 1) 1 (2, 3) 1 (2, 4) 1 (3, 1) 1 (3, 2) 1 (3, 4) 1 (4, 2) 1 (4, 3) 1
MIT
TRABALHO1GRAFOS.ipynb
haroldosfilho/Python
Agora basta apertar o play e a sua matriz de adjacência está pronta!
A = nx.adjacency_matrix(G).toarray() print(A)
[[0 1 1 0 0] [1 0 1 1 0] [1 1 0 1 1] [0 1 1 0 1] [0 0 1 1 0]]
MIT
TRABALHO1GRAFOS.ipynb
haroldosfilho/Python
ClarityViz Pipeline: .img -> histogram .nii -> graph represented as csv -> graph as graphml -> plotly To run: Step 1:First, run the following. This takes the .img, generates the localeq histogram as an nii file, gets the nodes and edges as a csv and converts the csv into a graphml
python runclarityviz.py --token Fear199Coronal --file-type img --source-directory /cis/project/clarity/data/clarity/isoCoronal
_____no_output_____
Apache-2.0
examples/Jupyter/ClarityViz Pipeline.ipynb
jonl1096/seelvizorg
Step 2: Then run this. This just converts the graphml into a plotly
python runclarityviz.py --token Fear199Coronal --plotly yes
_____no_output_____
Apache-2.0
examples/Jupyter/ClarityViz Pipeline.ipynb
jonl1096/seelvizorg
Results
Starting pipeline for Fear199.img Generating Histogram... FINISHED GENERATING HISTOGRAM Loading: Fear199/Fear199localeq.nii Image Loaded: Fear199/Fear199localeq.nii FINISHED LOADING NII Coverting to points... token=Fear199 total=600735744 max=255.000000 threshold=0.300000 sample=0.500000 (This will take couple minutes) Above threshold=461409948 Samples=230718301 Finished FINISHED GETTING POINTS ~/clarityviztesting/Fear199Coronal$ ls Fear199Coronal.csv Fear199Coronal.graphml Fear199Coronal.nodes.csv Fear199Coronal.edges.csv Fear199Coronallocaleq.nii Fear199Coronalplotly.html
_____no_output_____
Apache-2.0
examples/Jupyter/ClarityViz Pipeline.ipynb
jonl1096/seelvizorg
Code runclarityviz.py:
from clarityviz import clarityviz import ... def get_args(): parser = argparse.ArgumentParser(description="Description") parser.add_argument("--token", type=str, required=True, help="The token.") parser.add_argument("--file-type", type=str, required=False, help="The file type.") parser.add_argument("--source-directory", type=str, required=False, help="Optional setting of the source directory.") parser.add_argument("--plotly", type=str, required=False, help="Optional method to generate the plotly graphs.") parser.add_argument("--generate-nii-from-csv", type=str, required=False, help="script to generate nii") args = parser.parse_args() return args def main(): print('ayyooooo') args = get_args() if args.plotly == 'yes': ## Type in the path to your csv file here thedata = np.genfromtxt(args.token + '/' + args.token + '.csv', delimiter=',', dtype='int', usecols = (0,1,2), names=['a','b','c']) trace1 = go.Scatter3d( x = thedata['a'], y = thedata['b'], z = thedata['c'], mode='markers', marker=dict( size=1.2, color='purple', # set color to an array/list of desired values colorscale='Viridis', # choose a colorscale opacity=0.15 ) ) data = [trace1] layout = go.Layout( margin=dict( l=0, r=0, b=0, t=0 ) ) fig = go.Figure(data=data, layout=layout) print args.token + "plotly" plotly.offline.plot(fig, filename= args.token + "/" + args.token + "plotly.html") else: print('Starting pipeline for %s' % (args.token + '.' + args.file_type)) if args.source_directory == None: c = clarityviz(args.token) else: c = clarityviz(args.token, args.source_directory) if args.file_type == 'img': #c.loadEqImg() c.generateHistogram() print('FINISHED GENERATING HISTOGRAM') c.loadNiiImg() print('FINISHED LOADING NII') elif args.file_type == 'nii': c.loadNiiImg() print('FINISHED LOADING NII') c.imgToPoints(0.3, 0.5) print("FINISHED GETTING POINTS") c.savePoints() c.plot3d() print("FINISHED PLOT3D") c.graphmlconvert() print("FINISHED GRAPHMLCONVERT") if __name__ == "__main__": main()
_____no_output_____
Apache-2.0
examples/Jupyter/ClarityViz Pipeline.ipynb
jonl1096/seelvizorg
clarityviz.py
def generateHistogram(self): print('Generating Histogram...') if self._source_directory == None: path = self._token + '.img' else: path = self._source_directory + "/" + self._token + ".img" im = nib.load(path) im = im.get_data() img = im[:,:,:] shape = im.shape #affine = im.get_affine() x_value = shape[0] y_value = shape[1] z_value = shape[2] ##################################################### imgflat = img.reshape(-1) #img_grey = np.array(imgflat * 255, dtype = np.uint8) #img_eq = exposure.equalize_hist(img_grey) #new_img = img_eq.reshape(x_value, y_value, z_value) #globaleq = nib.Nifti1Image(new_img, np.eye(4)) ###################################################### #clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8)) img_grey = np.array(imgflat * 255, dtype = np.uint8) #threshed = cv2.adaptiveThreshold(img_grey, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 0) cl1 = clahe.apply(img_grey) #cv2.imwrite('clahe_2.jpg',cl1) #cv2.startWindowThread() #cv2.namedWindow("adaptive") #cv2.imshow("adaptive", cl1) #cv2.imshow("adaptive", threshed) #plt.imshow(threshed) localimgflat = cl1 #cl1.reshape(-1) newer_img = localimgflat.reshape(x_value, y_value, z_value) localeq = nib.Nifti1Image(newer_img, np.eye(4)) nib.save(localeq, self._token + '/' + self._token + 'localeq.nii') def loadGeneratedNii(self, path=None, info=False): path = self._token + '/' + self._token + 'localeq.nii' print("Loading: %s"%(path)) #pathname = path+self._token+".nii" img = nib.load(path) if info: print(img) #self._img = img.get_data()[:,:,:,0] self._img = img.get_data() self._shape = self._img.shape self._max = np.max(self._img) print("Image Loaded: %s"%(path)) return self def imgToPoints(self, threshold=0.1, sample=0.5, optimize=True): """Method to extract points data from the img file.""" if not 0 <= threshold < 1: raise ValueError("Threshold should be within [0,1).") if not 0 < sample <= 1: raise ValueError("Sample rate should be within (0,1].") if self._img is None: raise ValueError("Img haven't loaded, please call loadImg() first.") total = self._shape[0]*self._shape[1]*self._shape[2] print("Coverting to points...\ntoken=%s\ntotal=%d\nmax=%f\nthreshold=%f\nsample=%f"\ %(self._token,total,self._max,threshold,sample)) print("(This will take couple minutes)") # threshold filt = self._img > threshold * self._max x, y, z = np.where(filt) v = self._img[filt] if optimize: self.discardImg() v = np.int16(255*(np.float32(v)/np.float32(self._max))) l = v.shape print("Above threshold=%d"%(l)) # sample if sample < 1.0: filt = np.random.random(size=l) < sample x = x[filt] y = y[filt] z = z[filt] v = v[filt] self._points = np.vstack([x,y,z,v]) self._points = np.transpose(self._points) print("Samples=%d"%(self._points.shape[0])) print("Finished") return self def plot3d(self, infile = None): """Method for plotting the Nodes and Edges""" filename = "" points_file = None if infile == None: points_file = self._points filename = self._token else: self.loadInitCsv(infile) infile = self._infile filename = self._filename # points is an array of arrays points = self._points outpath = self._token + '/' nodename = outpath + filename + '.nodes.csv' edgename = outpath + filename + '.edges.csv' with open(nodename, 'w') as nodefile: with open(edgename, 'w') as edgefile: for ind in range(len(points)): #temp = points[ind].strip().split(',') temp = points[ind] x = temp[0] y = temp[1] z = temp[2] v = temp[3] radius = 18 nodefile.write("s" + str(ind + 1) + "," + str(x) + "," + str(y) + "," + str(z) + "\n") for index in range(ind + 1, len(points)): tmp = points[index] distance = math.sqrt(math.pow(int(x) - int(tmp[0]), 2) + math.pow(int(y) - int(tmp[1]), 2) + math.pow(int(z) - int(tmp[2]), 2)) if distance < radius: edgefile.write("s" + str(ind + 1) + "," + "s" + str(index + 1) + "\n") self._nodefile = nodefile self._edgefile = edgefile def graphmlconvert(self, nodefilename = None, edgefilename = None): """Method for extracting the data to a graphml file, based on the node and edge files""" nodefile = None edgefile = None # If no nodefilename was entered, used the Clarity object's nodefile if nodefilename == None: #nodefile = self._nodefile #nodefile = open(self._nodefile, 'r') self.loadNodeCsv(self._token + "/" + self._token + ".nodes.csv") nodefile = self._nodefile else: self.loadNodeCsv(nodefilename) nodefile = self._nodefile # If no edgefilename was entered, used the Clarity object's edgefile if edgefilename == None: #edgefile = self._edgefile #edgefile = open(self._edgefile, 'r') self.loadEdgeCsv(self._token + "/" + self._token + ".edges.csv") edgefile = self._edgefile else: self.loadEdgeCsv(edgefilename) edgefile = self._edgefile # Start writing to the output graphml file path = self._token + "/" + self._token + ".graphml" with open(path, 'w') as outfile: outfile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") outfile.write("<graphml xmlns=\"http://graphml.graphdrawing.org/xmlns\"\n") outfile.write(" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n") outfile.write(" xsi:schemaLocation=\"http://graphml.graphdrawing.org/xmlns\n") outfile.write(" http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd\">\n") outfile.write(" <key id=\"d0\" for=\"node\" attr.name=\"attr\" attr.type=\"string\"/>\n") outfile.write(" <key id=\"e_weight\" for=\"edge\" attr.name=\"weight\" attr.type=\"double\"/>\n") outfile.write(" <graph id=\"G\" edgedefault=\"undirected\">\n") for line in nodefile: if len(line) == 0: continue line = line.strip().split(',') outfile.write(" <node id=\"" + line[0] + "\">\n") outfile.write(" <data key=\"d0\">[" + line[1] + ", " + line[2] + ", " + line[3] +"]</data>\n") outfile.write(" </node>\n") for line in edgefile: if len(line) == 0: continue line = line.strip().split(',') outfile.write(" <edge source=\"" + line[0] + "\" target=\"" + line[1] + "\">\n") outfile.write(" <data key=\"e_weight\">1</data>\n") outfile.write(" </edge>\n") outfile.write(" </graph>\n</graphml>") def graphmlToPlotly(self, path): ## Type in the path to your csv file here thedata = np.genfromtxt('../data/points/localeq.csv', delimiter=',', dtype='int', usecols = (0,1,2), names=['a','b','c']) trace1 = go.Scatter3d( x = thedata['a'], y = thedata['b'], z = thedata['c'], mode='markers', marker=dict( size=1.2, color='purple', # set color to an array/list of desired values colorscale='Viridis', # choose a colorscale opacity=0.15 ) ) data = [trace1] layout = go.Layout( margin=dict( l=0, r=0, b=0, t=0 ) ) fig = go.Figure(data=data, layout=layout) print "localeq" plotly.offline.plot(fig, filename= "localeq")
_____no_output_____
Apache-2.0
examples/Jupyter/ClarityViz Pipeline.ipynb
jonl1096/seelvizorg
Mixture Density Networks with Edward, Keras and TensorFlowThis notebook explains how to implement Mixture Density Networks (MDN) with Edward, Keras and TensorFlow.Keep in mind that if you want to use Keras and TensorFlow, like we do in this notebook, you need to set the backend of Keras to TensorFlow, [here](http://keras.io/backend/) it is explained how to do that.In you are not familiar with MDNs have a look at the [following blog post](http://cbonnett.github.io/MDN.html) or at orginal [paper](http://research.microsoft.com/en-us/um/people/cmbishop/downloads/Bishop-NCRG-94-004.pdf) by Bishop.Edward implements many probability distribution functions that are TensorFlow compatible, this makes it attractive to use Edward for MDNs. Here are all the distributions that are currently implemented in Edward, there are more to come:1. [Bernoulli](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL49)2. [Beta](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL58)3. [Binomial](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL68)4. [Chi Squared](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL79)5. [Dirichlet](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL89)6. [Exponential](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL109)7. [Gamma](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL118)8. [Geometric](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL129)9. [Inverse Gamma](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL138)10. [log Normal](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL155)11. [Multinomial](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL165)12. [Multivariate Normal](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL194)13. [Negative Binomial](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL283)14. [Normal](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL294)15. [Poisson](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL310)16. [Student-t](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL319)17. [Truncated Normal](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL333)18. [Uniform](https://github.com/blei-lab/edward/blob/master/edward/stats/distributions.pyL352)Let's start with the necessary imports.
# imports %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns import edward as ed import numpy as np import tensorflow as tf from edward.stats import norm # Normal distribution from Edward. from keras import backend as K from keras.layers import Dense from sklearn.cross_validation import train_test_split
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
We will need some functions to plot the results later on, these are defined in the next code block.
from scipy.stats import norm as normal def plot_normal_mix(pis, mus, sigmas, ax, label='', comp=True): """ Plots the mixture of Normal models to axis=ax comp=True plots all components of mixture model """ x = np.linspace(-10.5, 10.5, 250) final = np.zeros_like(x) for i, (weight_mix, mu_mix, sigma_mix) in enumerate(zip(pis, mus, sigmas)): temp = normal.pdf(x, mu_mix, sigma_mix) * weight_mix final = final + temp if comp: ax.plot(x, temp, label='Normal ' + str(i)) ax.plot(x, final, label='Mixture of Normals ' + label) ax.legend(fontsize=13) def sample_from_mixture(x, pred_weights, pred_means, pred_std, amount): """ Draws samples from mixture model. Returns 2 d array with input X and sample from prediction of Mixture Model """ samples = np.zeros((amount, 2)) n_mix = len(pred_weights[0]) to_choose_from = np.arange(n_mix) for j,(weights, means, std_devs) in enumerate(zip(pred_weights, pred_means, pred_std)): index = np.random.choice(to_choose_from, p=weights) samples[j,1]= normal.rvs(means[index], std_devs[index], size=1) samples[j,0]= x[j] if j == amount -1: break return samples
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
Making some toy-data to play with.This is the same toy-data problem set as used in the [blog post](http://blog.otoro.net/2015/11/24/mixture-density-networks-with-tensorflow/) by Otoro where he explains MDNs. This is an inverse problem as you can see, for every ```X``` there are multiple ```y``` solutions.
def build_toy_dataset(nsample=40000): y_data = np.float32(np.random.uniform(-10.5, 10.5, (1, nsample))).T r_data = np.float32(np.random.normal(size=(nsample, 1))) # random noise x_data = np.float32(np.sin(0.75 * y_data) * 7.0 + y_data * 0.5 + r_data * 1.0) return train_test_split(x_data, y_data, random_state=42, train_size=0.1) X_train, X_test, y_train, y_test = build_toy_dataset() print("Size of features in training data: {:s}".format(X_train.shape)) print("Size of output in training data: {:s}".format(y_train.shape)) print("Size of features in test data: {:s}".format(X_test.shape)) print("Size of output in test data: {:s}".format(y_test.shape)) sns.regplot(X_train, y_train, fit_reg=False)
Size of features in training data: (4000, 1) Size of output in training data: (4000, 1) Size of features in test data: (36000, 1) Size of output in test data: (36000, 1)
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
Building a MDN using Edward, Keras and TFWe will define a class that can be used to construct MDNs. In this notebook we will be using a mixture of Normal Distributions. The advantage of defining a class is that we can easily reuse this to build other MDNs with different amount of mixture components. Furthermore, this makes it play nicely with Edward.
class MixtureDensityNetwork: """ Mixture density network for outputs y on inputs x. p((x,y), (z,theta)) = sum_{k=1}^K pi_k(x; theta) Normal(y; mu_k(x; theta), sigma_k(x; theta)) where pi, mu, sigma are the output of a neural network taking x as input and with parameters theta. There are no latent variables z, which are hidden variables we aim to be Bayesian about. """ def __init__(self, K): self.K = K # here K is the amount of Mixtures def mapping(self, X): """pi, mu, sigma = NN(x; theta)""" hidden1 = Dense(15, activation='relu')(X) # fully-connected layer with 15 hidden units hidden2 = Dense(15, activation='relu')(hidden1) self.mus = Dense(self.K)(hidden2) # the means self.sigmas = Dense(self.K, activation=K.exp)(hidden2) # the variance self.pi = Dense(self.K, activation=K.softmax)(hidden2) # the mixture components def log_prob(self, xs, zs=None): """log p((xs,ys), (z,theta)) = sum_{n=1}^N log p((xs[n,:],ys[n]), theta)""" # Note there are no parameters we're being Bayesian about. The # parameters are baked into how we specify the neural networks. X, y = xs self.mapping(X) result = tf.exp(norm.logpdf(y, self.mus, self.sigmas)) result = tf.mul(result, self.pi) result = tf.reduce_sum(result, 1) result = tf.log(result) return tf.reduce_sum(result)
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
We can set a seed in Edward so we can reproduce all the random components. The following line:```ed.set_seed(42)```sets the seed in Numpy and TensorFlow under the [hood](https://github.com/blei-lab/edward/blob/master/edward/util.pyL191). We use the class we defined above to initiate the MDN with 20 mixtures, this now can be used as an Edward model.
ed.set_seed(42) model = MixtureDensityNetwork(20)
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
In the following code cell we define the TensorFlow placeholders that are then used to define the Edward data model.The following line passes the ```model``` and ```data``` to ```MAP``` from Edward which is then used to initialise the TensorFlow variables. ```inference = ed.MAP(model, data)``` MAP is a Bayesian concept and stands for Maximum A Posteriori, it tries to find the set of parameters which maximizes the posterior distribution. In the example here we don't have a prior, in a Bayesian context this means we have a flat prior. For a flat prior MAP is equivalent to Maximum Likelihood Estimation. Edward is designed to be Bayesian about its statistical inference. The cool thing about MDN's with Edward is that we could easily include priors!
X = tf.placeholder(tf.float32, shape=(None, 1)) y = tf.placeholder(tf.float32, shape=(None, 1)) data = ed.Data([X, y]) # Make Edward Data model inference = ed.MAP(model, data) # Make the inference model sess = tf.Session() # Start TF session K.set_session(sess) # Pass session info to Keras inference.initialize(sess=sess) # Initialize all TF variables using the Edward interface
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
Having done that we can train the MDN in TensorFlow just like we normally would, and we can get out the predictions we are interested in from ```model```, in this case: * ```model.pi``` the mixture components, * ```model.mus``` the means,* ```model.sigmas``` the standard deviations. This is done in the last line of the code cell :```pred_weights, pred_means, pred_std = sess.run([model.pi, model.mus, model.sigmas], feed_dict={X: X_test})```The default minimisation technique used is ADAM with a decaying scale factor.This can be seen [here](https://github.com/blei-lab/edward/blob/master/edward/inferences.pyL94) in the code base of Edward. Having a decaying scale factor is not the standard way of using ADAM, this is inspired by the Automatic Differentiation Variational Inference [(ADVI)](http://arxiv.org/abs/1603.00788) work where it was used in the RMSPROP minimizer. The loss that is minimised in the ```MAP``` model from Edward is the negative log-likelihood, this calculation uses the ```log_prob``` method in the ```MixtureDensityNetwork``` class we defined above. The ```build_loss``` method in the ```MAP``` class can be found [here](https://github.com/blei-lab/edward/blob/master/edward/inferences.pyL396). However the method ```inference.loss``` used below, returns the log-likelihood, so we expect this quantity to be maximized.
NEPOCH = 1000 train_loss = np.zeros(NEPOCH) test_loss = np.zeros(NEPOCH) for i in range(NEPOCH): _, train_loss[i] = sess.run([inference.train, inference.loss], feed_dict={X: X_train, y: y_train}) test_loss[i] = sess.run(inference.loss, feed_dict={X: X_test, y: y_test}) pred_weights, pred_means, pred_std = sess.run([model.pi, model.mus, model.sigmas], feed_dict={X: X_test})
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
We can plot the log-likelihood of the training and test sample as function of training epoch.Keep in mind that ```inference.loss``` returns the total log-likelihood, so not the loss per data point, so in the plotting routine we divide by the size of the train and test data respectively. We see that it converges after 400 training steps.
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(16, 3.5)) plt.plot(np.arange(NEPOCH), test_loss/len(X_test), label='Test') plt.plot(np.arange(NEPOCH), train_loss/len(X_train), label='Train') plt.legend(fontsize=20) plt.xlabel('Epoch', fontsize=15) plt.ylabel('Log-likelihood', fontsize=15)
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
Next we can have a look at how some individual examples perform. Keep in mind this is an inverse problemso we can't get the answer correct, we can hope that the truth lies in area where the model has high probability.In the next plot the truth is the vertical grey line while the blue line is the prediction of the mixture density network. As you can see, we didn't do too bad.
obj = [0, 4, 6] fig, axes = plt.subplots(nrows=3, ncols=1, figsize=(16, 6)) plot_normal_mix(pred_weights[obj][0], pred_means[obj][0], pred_std[obj][0], axes[0], comp=False) axes[0].axvline(x=y_test[obj][0], color='black', alpha=0.5) plot_normal_mix(pred_weights[obj][2], pred_means[obj][2], pred_std[obj][2], axes[1], comp=False) axes[1].axvline(x=y_test[obj][2], color='black', alpha=0.5) plot_normal_mix(pred_weights[obj][1], pred_means[obj][1], pred_std[obj][1], axes[2], comp=False) axes[2].axvline(x=y_test[obj][1], color='black', alpha=0.5)
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
We can check the ensemble by drawing samples of the prediction and plotting the density of those. Seems the MDN learned what it needed too.
a = sample_from_mixture(X_test, pred_weights, pred_means, pred_std, amount=len(X_test)) sns.jointplot(a[:,0], a[:,1], kind="hex", color="#4CB391", ylim=(-10,10), xlim=(-14,14))
_____no_output_____
Apache-2.0
docs/source/notebooks/MDN_Edward_Keras_TF.ipynb
caosenqi/Edward1
Importing Required Libraries
from pyspark.sql import SparkSession from pyspark.sql import functions as F
_____no_output_____
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
Getting Spark Session
spark = SparkSession.builder.getOrCreate()
_____no_output_____
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
Reading CSV
df = spark.read.csv("Big_Cities_Health_Data_Inventory.csv", header=True) df.show(10)
+------------------+--------------------+----+------+---------------+-----+--------------------+--------------------------+--------------------+-------+-----+ |Indicator Category| Indicator|Year|Gender|Race/ Ethnicity|Value| Place|BCHC Requested Methodology| Source|Methods|Notes| +------------------+--------------------+----+------+---------------+-----+--------------------+--------------------------+--------------------+-------+-----+ | HIV/AIDS|AIDS Diagnoses Ra...|2013| Both| All| 30.4|Atlanta (Fulton C...| AIDS cases diagno...|Diagnoses numbers...| null| null| | HIV/AIDS|AIDS Diagnoses Ra...|2012| Both| All| 39.6|Atlanta (Fulton C...| AIDS cases diagno...|Diagnoses numbers...| null| null| | HIV/AIDS|AIDS Diagnoses Ra...|2011| Both| All| 41.7|Atlanta (Fulton C...| AIDS cases diagno...|Diagnoses numbers...| null| null| | Cancer|All Types of Canc...|2013| Male| All|195.8|Atlanta (Fulton C...| 2012, 2013, 2014;...|National Center f...| null| null| | Cancer|All Types of Canc...|2013|Female| All|135.5|Atlanta (Fulton C...| 2012, 2013, 2014;...|National Center f...| null| null| | Cancer|All Types of Canc...|2013| Both| All|159.3|Atlanta (Fulton C...| 2012, 2013, 2014;...|National Center f...| null| null| | Cancer|All Types of Canc...|2012| Male| All|199.2|Atlanta (Fulton C...| 2012, 2013, 2014;...|National Center f...| null| null| | Cancer|All Types of Canc...|2012|Female| All|137.6|Atlanta (Fulton C...| 2012, 2013, 2014;...|National Center f...| null| null| | Cancer|All Types of Canc...|2012| Both| All|160.3|Atlanta (Fulton C...| 2012, 2013, 2014;...|National Center f...| null| null| | Cancer|All Types of Canc...|2011| Male| All|196.2|Atlanta (Fulton C...| 2012, 2013, 2014;...|National Center f...| null| null| +------------------+--------------------+----+------+---------------+-----+--------------------+--------------------------+--------------------+-------+-----+ only showing top 10 rows
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
Printing Schema
df.printSchema()
root |-- Indicator Category: string (nullable = true) |-- Indicator: string (nullable = true) |-- Year: string (nullable = true) |-- Gender: string (nullable = true) |-- Race/ Ethnicity: string (nullable = true) |-- Value: string (nullable = true) |-- Place: string (nullable = true) |-- BCHC Requested Methodology: string (nullable = true) |-- Source: string (nullable = true) |-- Methods: string (nullable = true) |-- Notes: string (nullable = true)
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
Dropping Unwanted Columns
df = df.drop("Notes", "Methods", "Source", "BCHC Requested Methodology") df.printSchema()
root |-- Indicator Category: string (nullable = true) |-- Indicator: string (nullable = true) |-- Year: string (nullable = true) |-- Gender: string (nullable = true) |-- Race/ Ethnicity: string (nullable = true) |-- Value: string (nullable = true) |-- Place: string (nullable = true)
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
Counting Null Values
df.select([F.count(F.when(F.isnan(c) | F.col(c).isNull(), c)).alias(c) for c in df.columns]).show()
+------------------+---------+----+------+---------------+-----+-----+ |Indicator Category|Indicator|Year|Gender|Race/ Ethnicity|Value|Place| +------------------+---------+----+------+---------------+-----+-----+ | 0| 28| 28| 218| 212| 231| 218| +------------------+---------+----+------+---------------+-----+-----+
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
Since there are several null values in the columns as shown in the table above, first steps would be to remove / replace null values in each column Working with Null Values
df.filter(df["Indicator"].isNull()).show(28)
+--------------------+---------+----+------+---------------+-----+-----+ | Indicator Category|Indicator|Year|Gender|Race/ Ethnicity|Value|Place| +--------------------+---------+----+------+---------------+-----+-----+ | FOR THE POPULATI...| null|null| null| null| null| null| | 12 MONTHS (S1701)"| null|null| null| null| null| null| | (S1701)"| null|null| null| null| null| null| | (S1701)"| null|null| null| null| null| null| |from the flu shot...| null|null| null| null| null| null| |from the flu shot...| null|null| null| null| null| null| |from the flu shot...| null|null| null| null| null| null| |from the flu shot...| null|null| null| null| null| null| |from the flu shot...| null|null| null| null| null| null| |from the flu shot...| null|null| null| null| null| null| |from the flu shot...| null|null| null| null| null| null| |(percent of respo...| null|null| null| null| null| null| |(percent of respo...| null|null| null| null| null| null| |(percent of respo...| null|null| null| null| null| null| | your nose?"" "| null|null| null| null| null| null| | your nose?"" "| null|null| null| null| null| null| | your nose?"" "| null|null| null| null| null| null| | your nose?"" "| null|null| null| null| null| null| | your nose?"" "| null|null| null| null| null| null| | your nose?"" "| null|null| null| null| null| null| | your nose?"" "| null|null| null| null| null| null| |(percent of respo...| null|null| null| null| null| null| |(see note above a...| null|null| null| null| null| null| |(see note above a...| null|null| null| null| null| null| |(see note above a...| null|null| null| null| null| null| |(see note above a...| null|null| null| null| null| null| |(see note above a...| null|null| null| null| null| null| |(see note above a...| null|null| null| null| null| null| +--------------------+---------+----+------+---------------+-----+-----+
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
Since all the rows that have null values in Indicator have null values for other columns like Year, Gender, Race and etc, it would be better to remove these observations
# Counting total number of rows in the dataset to compare with the rows after null value rows are removed. rows_count_pre = df.count() print("Total number of rows before deleting: ",rows_count_pre) # deleting all the rows where there are null values in the columns mentioned below df = df.na.drop(subset=["Indicator", "Year", "Gender", "Race/ Ethnicity", "Value", "Place"]) rows_count_post = df.count() print("Total number of rows after deleting: ",rows_count_post) total_rows_removed = rows_count_pre - rows_count_post print("Total number of rows deleted: ", total_rows_removed) #Checking the null values again to see if the dataset is clean df.select([F.count(F.when(F.isnan(c) | F.col(c).isNull(), c)).alias(c) for c in df.columns]).show()
+------------------+---------+----+------+---------------+-----+-----+ |Indicator Category|Indicator|Year|Gender|Race/ Ethnicity|Value|Place| +------------------+---------+----+------+---------------+-----+-----+ | 0| 0| 0| 0| 0| 0| 0| +------------------+---------+----+------+---------------+-----+-----+
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
The results above show that all the rows with null values have been deleted from the dataset. This completes the step of removing all the null values from the dataset Splitting the Place Column into City and State Columns
split_col = F.split(df["Place"], ',') df = df.withColumn("City_County", split_col.getItem(0)) df = df.withColumn("State", split_col.getItem(1)) df.select("City_County", "State").show(truncate=False) Creating a User Defined Function to take care of the City_County column to extract the city. Same can be done using import re def extract_city(city_str): result = re.sub(r'\([^)]*\)', '', city_str) return result from pyspark.sql.types import StringType udfExtract = F.udf(extract_city, StringType()) df = df.withColumn("City", udfExtract(df["City_County"])) df.select("City", "State").show(truncate=False)
+--------+-----+ |City |State| +--------+-----+ |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | |Atlanta | GA | +--------+-----+ only showing top 20 rows
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
This sums up the cleaning process of data using PySpark. Below is the final state of the dataset
df.show()
+--------------------+--------------------+----+------+---------------+-----+--------------------+--------------------+-----+--------+ | Indicator Category| Indicator|Year|Gender|Race/ Ethnicity|Value| Place| City_County|State| City| +--------------------+--------------------+----+------+---------------+-----+--------------------+--------------------+-----+--------+ | HIV/AIDS|AIDS Diagnoses Ra...|2013| Both| All| 30.4|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | HIV/AIDS|AIDS Diagnoses Ra...|2012| Both| All| 39.6|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | HIV/AIDS|AIDS Diagnoses Ra...|2011| Both| All| 41.7|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2013| Male| All|195.8|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2013|Female| All|135.5|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2013| Both| All|159.3|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2012| Male| All|199.2|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2012|Female| All|137.6|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2012| Both| All|160.3|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2011| Male| All|196.2|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2011|Female| All|147.0|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2011| Both| All|165.2|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2013| Both| Black|208.3|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2012| Both| Black|202.7|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | |Maternal and Chil...|Infant Mortality ...|2012| Both| White| 4.5|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2011| Both| Black|216.0|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2013| Both| White|128.8|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2012| Both| White|133.7|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | | Cancer|All Types of Canc...|2011| Both| White|132.0|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | |Life Expectancy a...|All-Cause Mortali...|2012|Female| All|578.4|Atlanta (Fulton C...|Atlanta (Fulton C...| GA|Atlanta | +--------------------+--------------------+----+------+---------------+-----+--------------------+--------------------+-----+--------+ only showing top 20 rows
MIT
Data Cleaning with PySpark.ipynb
raziiq/python-pyspark-data-cleaning
Exploratory Data Analysis* Dataset taken from https://github.com/Tariq60/LIAR-PLUS 1. Import Libraries
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt TRAIN_PATH = "../data/raw/dataset/tsv/train2.tsv" VAL_PATH = "../data/raw/dataset/tsv/val2.tsv" TEST_PATH = "../data/raw/dataset/tsv/test2.tsv" columns = ["id", "statement_json", "label", "statement", "subject", "speaker", "speaker_title", "state_info", "party_affiliation", "barely_true_count", "false_count", "half_true_count", "mostly_true_count", "pants_fire_count", "context", "justification"]
_____no_output_____
MIT
notebooks/eda-notebook.ipynb
archity/fake-news
2. Read the dataset
train_df = pd.read_csv(TRAIN_PATH, sep="\t", names=columns) val_df = pd.read_csv(VAL_PATH, sep="\t", names=columns) test_df = pd.read_csv(TEST_PATH, sep="\t", names=columns) print(f"Length of train set: {len(train_df)}") print(f"Length of validation set: {len(val_df)}") print(f"Length of test set: {len(test_df)}") train_df.head()
_____no_output_____
MIT
notebooks/eda-notebook.ipynb
archity/fake-news
3. Data Cleaning * Some of the most important coloumns are "label", "statement".* Now we should check if any of them have null values.
print("Do we have empty strings in `label`?") pd.isna(train_df["label"]).value_counts()
Do we have empty strings in `label`?
MIT
notebooks/eda-notebook.ipynb
archity/fake-news
* 2 entries without any label* What exactly are those 2 entries?
train_df.loc[pd.isna(train_df["label"]), :].index train_df.loc[[2143]] train_df.loc[[9377]]
_____no_output_____
MIT
notebooks/eda-notebook.ipynb
archity/fake-news
* All the coloumns of those 2 entries are blank* Drop those 2 entries
train_df.dropna(subset=["label"], inplace=True) len(train_df)
_____no_output_____
MIT
notebooks/eda-notebook.ipynb
archity/fake-news
4. Some Feature Analysis 4.1 Party Affiliation
print(train_df["party_affiliation"].value_counts()) if not os.path.exists("./img"): os.makedirs("./img") fig = plt.figure(figsize=(10, 6)) party_affil_plot = train_df["party_affiliation"].value_counts().plot.bar() plt.tight_layout(pad=1) plt.savefig("img/party_affil_plot.png", dpi=200)
republican 4497 democrat 3336 none 1744 organization 219 independent 147 newsmaker 56 libertarian 40 activist 39 journalist 38 columnist 35 talk-show-host 26 state-official 20 labor-leader 11 tea-party-member 10 business-leader 9 green 3 education-official 2 liberal-party-canada 1 government-body 1 Moderate 1 democratic-farmer-labor 1 ocean-state-tea-party-action 1 constitution-party 1 Name: party_affiliation, dtype: int64
MIT
notebooks/eda-notebook.ipynb
archity/fake-news
4.2 States Stats
print(train_df["state_info"].value_counts()) fig = plt.figure(figsize=(10, 6)) state_info_plot = train_df["state_info"].value_counts().plot.bar() plt.tight_layout(pad=1) plt.savefig("img/state_info_plot.png", dpi=200)
Texas 1009 Florida 997 Wisconsin 713 New York 657 Illinois 556 ... Qatar 1 Virginia 1 United Kingdom 1 China 1 Rhode Island 1 Name: state_info, Length: 84, dtype: int64
MIT
notebooks/eda-notebook.ipynb
archity/fake-news
* Apparently, we have a state_info entry with value as "Virginia director, Coalition to Stop Gun Violence".It should be replaced with "Virginia" only
train_df[train_df["state_info"]=="Virginia director, Coalition to Stop Gun Violence"] indx = train_df[train_df["state_info"]=="Virginia director, Coalition to Stop Gun Violence"].index[0] train_df.loc[indx, "state_info"] = "Virginia" fig = plt.figure(figsize=(10, 6)) state_info_plot = train_df["state_info"].value_counts().plot.bar() plt.tight_layout(pad=1) plt.savefig("img/state_info_plot.png", dpi=200)
_____no_output_____
MIT
notebooks/eda-notebook.ipynb
archity/fake-news