markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
It would be nice to draw a bar chart showing the number of sides that each shape has. But drawing all those bars and axes would take quite a lot of code. Don't worry - someone has already done this, and we can use the code that they have written using the **import** command.
|
import matplotlib.pyplot as plt
%matplotlib inline
|
_____no_output_____
|
MIT
|
2 - Lists.ipynb
|
grahampullan/pythonical
|
(The `%matplotlib inline` is a Jupyter notebook command that means the plots we make will appear right here in our notebook) Now we can plot our bar chart using only three lines of code:
|
plt.bar(shapes,sides)
plt.xlabel("Shape")
plt.ylabel("Number of sides");
|
_____no_output_____
|
MIT
|
2 - Lists.ipynb
|
grahampullan/pythonical
|
Data Visualization
|
%matplotlib inline
import torch as pt
import matplotlib.pyplot as plt
x = pt.linspace(0, 10, 100)
fig = plt.figure()
plt.plot(x, pt.sin(x), '-')
plt.plot(x, pt.cos(x), '--')
plt.show() # not needed in notebook, but needed in production
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
You can save your plots...
|
fig.savefig('my_figure.png')
!ls -lh my_figure.png
# For Windows, comment out the above and replace with below
# On Windows, comment out above and uncomment below
#!dir my_figure.png"
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
...and reload saved images for display inside the notebook
|
from IPython.display import Image
Image('my_figure.png')
# matplotlib supports many different file types
fig.canvas.get_supported_filetypes()
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
MATLAB-Style Interface
|
plt.figure() # create a plot figure
# create the first of two panels and set current axis
plt.subplot(2, 1, 1) # (rows, columns, panel number)
plt.plot(x, pt.sin(x))
# create the second panel and set current axis
plt.subplot(2, 1, 2)
plt.plot(x, pt.cos(x));
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Grids
|
plt.style.use('seaborn-whitegrid')
fig = plt.figure()
ax = plt.axes()
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Draw a Function
|
plt.style.use('seaborn-whitegrid')
fig = plt.figure()
ax = plt.axes()
x = pt.linspace(0, 10, 1000)
ax.plot(x, pt.sin(x));
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Specify axes limits...
|
plt.plot(x, pt.sin(x))
plt.xlim(-1, 11)
plt.ylim(-1.5, 1.5);
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Flipping the Axes Limits
|
plt.plot(x, pt.sin(x))
plt.xlim(10, 0)
plt.ylim(1.2, -1.2);
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Axis
|
plt.plot(x, pt.sin(x))
plt.axis([-1, 11, -1.5, 1.5]);
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
...or let matplotlib "tighten" the axes...
|
plt.plot(x, pt.sin(x))
plt.axis('tight');
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
...or make the limits equal
|
plt.plot(x, pt.sin(x))
plt.axis('equal');
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Add titles and axis labels
|
plt.plot(x, pt.sin(x))
plt.title("A Sine Curve")
plt.xlabel("x")
plt.ylabel("sin(x)");
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
...and a legend
|
plt.plot(x, pt.sin(x), '-g', label='sin(x)')
plt.plot(x, pt.cos(x), ':b', label='cos(x)')
plt.axis('equal')
plt.legend();
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Object-Oriented Interface
|
# First create a grid of plots
# ax will be an array of two Axes objects
fig, ax = plt.subplots(2)
# Call plot() method on the appropriate object
ax[0].plot(x, pt.sin(x))
ax[1].plot(x, pt.cos(x));
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
OO interface to axes
|
ax = plt.axes()
ax.plot(x, pt.sin(x))
ax.set(xlim=(0, 10), ylim=(-2, 2),
xlabel='x', ylabel='sin(x)',
title='A Simple Plot');
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Interface Differences| MATLAB-Style | OO Style ||--------------|-----------------|| plt.xlabel() | ax.set_xlabel() || plt.ylabel() | ax.set_ylabel() || plt.xlim() | ax.set_xlim() || plt.ylim() | ax.set_ylim() || plt.title() | ax.set_title() | Custom legends
|
x = pt.linspace(0, 10, 1000)
plt.style.use('classic')
plt.figure(figsize=(12,6))
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
fig, ax = plt.subplots()
ax.plot(x, pt.sin(x), '-b', label='Sine')
ax.plot(x, pt.cos(x), '--r', label='Cosine')
ax.axis('equal')
leg = ax.legend()
ax.legend(loc='upper left', frameon=False)
fig
ax.legend(frameon=False, loc='lower center', ncol=2)
fig
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Many ways to specify color...
|
plt.plot(x, pt.sin(x - 0), color='blue') # specify color by name
plt.plot(x, pt.sin(x - 1), color='g') # short color code (rgbcmyk)
plt.plot(x, pt.sin(x - 2), color='0.75') # Grayscale between 0 and 1
plt.plot(x, pt.sin(x - 3), color='#FFDD44') # Hex code (RRGGBB from 00 to FF)
plt.plot(x, pt.sin(x - 4), color=(1.0,0.2,0.3)) # RGB tuple, values 0 to 1
plt.plot(x, pt.sin(x - 5), color='chartreuse'); # all HTML color names supported
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Specifying different line styles...
|
plt.plot(x, x + 0, linestyle='solid')
plt.plot(x, x + 1, linestyle='dashed')
plt.plot(x, x + 2, linestyle='dashdot')
plt.plot(x, x + 3, linestyle='dotted');
# For short, you can use the following codes:
plt.plot(x, x + 4, linestyle='-') # solid
plt.plot(x, x + 5, linestyle='--') # dashed
plt.plot(x, x + 6, linestyle='-.') # dashdot
plt.plot(x, x + 7, linestyle=':'); # dotted
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Specify different plot markers
|
rnd1 = pt.manual_seed(0)
rnd2 = pt.manual_seed(1)
for marker in 'o.,x+v^<>sd':
plt.plot(pt.rand(5, generator = rnd1), pt.rand(5, generator = rnd2), marker,
label='marker={}'.format(marker))
plt.legend(numpoints=1)
plt.xlim(0, 1.8);
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Scatterplots with Colors and Sizes
|
pt.manual_seed(0);
x = pt.randn(100)
y = pt.randn(100)
colors = pt.rand(100)
sizes = 1000 * pt.rand(100)
plt.scatter(x, y, c=colors, s=sizes, alpha=0.3,
cmap='viridis')
plt.colorbar(); # show color scale
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Visualizing Multiple Dimensions
|
from sklearn.datasets import load_iris
iris = load_iris()
features = iris.data.T
plt.scatter(features[0], features[1], alpha=0.2,
s=100*features[3], c=iris.target, cmap='viridis')
plt.xlabel(iris.feature_names[0])
plt.ylabel(iris.feature_names[1]);
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Histograms
|
data = pt.randn(10000)
plt.hist(data);
plt.hist(data, bins=30, alpha=0.5,
histtype='stepfilled', color='steelblue',
edgecolor='none')
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Display a grid of images
|
# load images of the digits 0 through 5 and visualize several of them
from sklearn.datasets import load_digits
digits = load_digits(n_class=6)
fig, ax = plt.subplots(8, 8, figsize=(6, 6))
for i, axi in enumerate(ax.flat):
axi.imshow(digits.images[i], cmap='binary')
axi.set(xticks=[], yticks=[])
|
_____no_output_____
|
Apache-2.0
|
pyt0/Demo_Data_Visualization.ipynb
|
nsingh216/edu
|
Classification MNIST
|
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1)
mnist.keys()
X, y = mnist['data'], mnist['target']
X.shape, y.shape
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
some_digit = X[0]
some_digit_img = some_digit.reshape(28, 28)
plt.imshow(some_digit_img, cmap='binary')
plt.axis('off')
plt.show()
y[0]
import numpy as np
y = y.astype(np.uint8)
y[0]
# MNIST is already split into training and test set
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Training a Binary ClassifierFor start let's make a binary classifier that will indentify single digit - digit 5.
|
y_train_5, y_test_5 = (y_train == 5), (y_test == 5)
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(random_state=42)
sgd_clf.fit(X_train, y_train_5)
sgd_clf.predict([some_digit])
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Performance Measures Measuring Accuracy Using Cross-Validation Implementing Cross-ValidationFollowing code is roughly equivalent to *Scikit-Learn*'s function `cross_val_score`.
|
from sklearn.model_selection import StratifiedKFold
from sklearn.base import clone
skfolds = StratifiedKFold(n_splits=3, shuffle=True, random_state=42)
for train_ix, test_ix in skfolds.split(X_train, y_train_5):
clone_clf = clone(sgd_clf)
X_train_folds = X_train[train_ix]
y_train_folds = y_train_5[train_ix]
X_test_folds = X_train[test_ix]
y_test_folds = y_train_5[test_ix]
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_folds)
n_correct = np.sum(y_pred == y_test_folds)
print(n_correct / len(y_pred))
from sklearn.model_selection import cross_val_score
cross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring='accuracy')
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
This seems pretty good! However, let's check a classifier that always classifies an image as **not 5**.
|
from sklearn.base import BaseEstimator
class Never5Classifier(BaseEstimator):
def fit(self, X, y=None):
return self
def predict(self, X):
return np.zeros((len(X), 1), dtype=bool)
never_5_clf = Never5Classifier()
cross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring='accuracy')
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Over 90% accuracy! Well, the problem is that just about 10% of the whole dataset are images of 5 (there are 10 numbers in total). Hence the 90% accuracy. Confusion MatrixThe idea of a *confusion matrix* is to count the number of times class A is classified as class B and so on. To compute the confusion matrix one must first get predicions (here on the train set, let's keep test set aside). We can take predictions for a cross-validation with `cross_val_predict` and pass them to `confusion_matrix`.For a binary classification the confusion matrix looks like this:| | N | P ||-----|----|----|| N | TN | FP || P | FN | TP |Rows are the *actual* class and columns are the predicted class, furthermore* *P* - *positive* (class)* *N* - *negative* (class)* *TN* - *true negative** *TP* - *true positive** *FN* - *false negative** *FP* - *false negative*
|
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
y_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)
confusion_matrix(y_train_5, y_train_pred)
y_train_perfect_predictions = y_train_5 # pretend we reached perfection
confusion_matrix(y_train_5, y_train_perfect_predictions)
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Precision and Recall**Precision** is the accuracy of positive predictions and is defined as $\text{precision} = \frac{TP}{TP + FP}$*Trivial way to ensure 100% precision is to make single prediction and make sure it's correct.***Recall (sensitivity, true positive rate)** is the ratio of positive instances that are correctly detected and is defined as $\text{recall} = \frac{TP}{TP + FN}$Intuitive notion of precision and recall:* *precision* - how often is the predictor correct when the actual class is the positive one* *recall* - how likely does the predictor detect the positive class
|
from sklearn.metrics import precision_score, recall_score
precision = precision_score(y_train_5, y_train_pred)
recall = recall_score(y_train_5, y_train_pred)
precision, recall
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Precision and recall are handy but it's even better to have single score based on which we can compare classifiers.$\mathbf{F_1}$ score is the *harmonic mean* of precision and recall. Regular mean puts the same weight to all values, harmonic mean gives much more importance to lower values. So in order to have high $F_1$ score, both precision and mean must be high.$$F_1 = \frac{2}{\frac{1}{\text{precision}} + \frac{1}{\text{recall}}} = 2 \times \frac{\text{precision} \times \text{recall}}{\text{precision} + \text{recall}} = \frac{TP}{TP + \frac{FN + FP}{2}}$$
|
from sklearn.metrics import f1_score
f1_score(y_train_5, y_train_pred)
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Precision/Recall Trade-off*Increasing precision reduces recall and vice versa.*How does the classification work? The `SGDClassifier`, for instance, computes for each instance a score based on a *decision function*. If this score is greater than *decision threshold*, it assigns the instance to the positive class. Shifting this threshold will likely result a change in precision and recall.
|
y_scores = sgd_clf.decision_function([some_digit])
y_scores
def predict_some_digit(threshold):
return (y_scores > threshold)
# Raising the threshold decreases recall
predict_some_digit(threshold=0), predict_some_digit(threshold=8000)
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
From the example above, increasing the decision threshold decreases recall (`some_digit` is actually a 5 and with the increased thresholt is is no longer recognized).But how to decide which threshold to use?
|
y_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method='decision_function')
from sklearn.metrics import precision_recall_curve
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], 'b--', label='Precision')
plt.plot(thresholds, recalls[:-1], 'g-', label='Recall')
plt.xlabel('Threshold')
plt.legend(loc='center right', fontsize=16)
plt.grid(True)
plt.axis([-50000, 50000, 0, 1])
precisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)
recall_90_precision = recalls[np.argmax(precisions >= 0.9)]
threshold_90_precision = thresholds[np.argmax(precisions >= 0.9)]
plt.figure(figsize=(8, 4))
# plot precision and recall curves vs decision threshold
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
# plot threshold corresponding to 90% precision
plt.plot([threshold_90_precision, threshold_90_precision], [0., 0.9], 'r:')
# plot precision level up to 90% precision threshold
plt.plot([-50000, threshold_90_precision], [0.9, 0.9], 'r:')
# plot recall level up to 90% precision threshold
plt.plot([-50000, threshold_90_precision], [recall_90_precision, recall_90_precision], 'r:')
# plot points on precision and recall curves corresponding to 90% precision threshold
plt.plot([threshold_90_precision], [0.9], 'ro')
plt.plot([threshold_90_precision], [recall_90_precision], 'ro')
plt.show()
plt.figure(figsize=(8, 6))
# plot precision vs recall
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel('Precision', fontsize=16)
plt.ylabel('Recall', fontsize=16)
# style the plot
plt.axis([0, 1, 0, 1])
plt.grid(True)
plt.title('Precision vs Recall')
# plot 90% precision point
plt.plot([recall_90_precision], [0.9], 'ro')
plt.plot([recall_90_precision, recall_90_precision], [0., 0.9], 'r:')
plt.plot([0.0, recall_90_precision], [0.9, 0.9], 'r:')
plt.show()
y_train_pred_90 = (y_scores >= threshold_90_precision)
precision_90 = precision_score(y_train_5, y_train_pred_90)
recall_90_precision = recall_score(y_train_5, y_train_pred_90)
precision_90, recall_90_precision
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
The ROC CurveThe **receiver operating characteristic** curve is similar to precesion-recall curve but instead plots *true positive rate (recall, sensitivity)* agains *false positive rate* (FPR). The FPR is 1 minus *true negative rate rate (specificity*. I.e. ROC curve plots *sensitivity* against 1 - *specificity*.
|
from sklearn.metrics import roc_curve
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0, 1, 0, 1])
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.grid(True)
fpr, tpr, thresholds = roc_curve(y_train_5, y_scores)
fpr_90 = fpr[np.argmax(tpr >= recall_90_precision)]
plt.figure(figsize=(8, 6))
# plot the ROC curve
plot_roc_curve(fpr, tpr)
# plot point of 90% precision on the ROC curve
plt.plot([fpr_90], [recall_90_precision], 'ro')
plt.show()
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Another way to compare classifiers is to measure the **area under the curve (AUC)**. Prfect classifier would have AUC score of 1 whereas completely random one would have 0.5 (this corresponds to the diagonal line in the ROC plot).
|
from sklearn.metrics import roc_auc_score
roc_auc_score(y_train_5, y_scores)
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
As a rule of thumb, use PR curve when* positive class is rare* we care more about the false positivesotherwise ROC curve might be better.*For instance in the plot above, it might seem that the AUC is quite good but that's just because there's only few examples of the positive class (5s). In this case, the PR curve presents much more realistic view.*Following example shows a DT which does not have a `decision_function` method. Instead, it has `predict_proba` method returning class probabilities. In general *Scikit-Learn* models will have one or the other method or both.
|
from sklearn.ensemble import RandomForestClassifier
forest_clf = RandomForestClassifier(random_state=42)
y_proba_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method='predict_proba')
y_scores_forest = y_proba_forest[:, 1] # score = probability of the positive class
fpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5, y_scores_forest)
recall_90_precision_forest = tpr_forest[np.argmax(fpr_forest >= fpr_90)]
plt.figure(figsize=(8, 6))
# plot the ROC curve of the SGD
plot_roc_curve(fpr, tpr, label='SGD')
# plot the ROC curve of the Random Forest
plot_roc_curve(fpr_forest, tpr_forest, label='Random Forest')
# plot point of 90% precision on the SGD ROC curve
plt.plot([fpr_90], [recall_90_precision], 'ro')
# plot point of 90% precision on the Random Forest ROC curve
plt.plot([fpr_90], [recall_90_precision_forest], 'ro')
plt.legend(loc='lower right', fontsize=16)
plt.show()
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Multiclass Classification**Multiclass (Multinominal) Classifiers**:* *Logistic Regression** *Random Forrest** *Naive Bayes***Binary Classifiers**:* *SGD** *SVM*Strategies to turn binary classifiers into multiclass:* **One-versus-the-rest (OvR)**: Train one classifier per class. When predicting class for new instance, get the score from each one and choose the class with the highest score.* **One-versus-one (OvO)**: Train one classifier for each pair of classes (for $N$ classes it's $N \times (N - 1) / 2$ classifiers). When predicting, run the instance through all classifiers and choose class which wins the most duels. Main advantage is that each classifier needs only portion of the training set which contains it's pair of classes which is good for classifiers which don't scale well (e.g. SVM).
|
from sklearn.svm import SVC
svm_clf = SVC(gamma="auto", random_state=42)
svm_clf.fit(X_train[:1000], y_train[:1000])
svm_clf.predict([some_digit])
some_digit_scores = svm_clf.decision_function([some_digit])
some_digit_scores
some_digit_class = np.argmax(some_digit_scores)
svm_clf.classes_[some_digit_class]
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
One can manually select the strategy by wrapping the model class into `OneVsRestClassifier` or `OneVsOneClassifier`.
|
from sklearn.multiclass import OneVsRestClassifier
ovr_clf = OneVsRestClassifier(SVC(gamma="auto", random_state=42))
ovr_clf.fit(X_train[:1000], y_train[:1000])
ovr_clf.predict([some_digit])
len(ovr_clf.estimators_)
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
`SGDClassifier` uses *OvR* under the hood
|
sgd_clf.fit(X_train, y_train)
sgd_clf.predict([some_digit])
sgd_clf.decision_function([some_digit])
cross_val_score(sgd_clf, X_train, y_train, cv=3, scoring='accuracy')
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
CV on the SGD classifier shows pretty good accuracy compared to dummy (random) classifier which would have around 10%. This can be improved even further by simply scaling the input.
|
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float64))
cross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring='accuracy')
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Error Analysis
|
y_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)
conf_mx = confusion_matrix(y_train, y_train_pred)
conf_mx
plt.matshow(conf_mx, cmap=plt.cm.gray)
plt.title('Training set confusion matrix for the SGD classifier')
plt.show()
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Let's transform the confusion matrix a bit to focus on the errors:1. divide each value by the number of instances (images in this case) in that class1. fill diagonal with zeros to keep just the errors
|
row_sums = conf_mx.sum(axis=1, keepdims=True)
norm_conf_mx = conf_mx / row_sums
np.fill_diagonal(norm_conf_mx, 0)
plt.matshow(norm_conf_mx, cmap=plt.cm.gray)
plt.title('Class-normalized confusion matrix with 0 on diagonal')
plt.show()
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Multilabel Classification*Multilabel classification* refers to a classification task where the classifier predicts multiple classes at once (output is a boolean vector).
|
from sklearn.neighbors import KNeighborsClassifier
y_train_large = (y_train >= 7)
y_train_odd = (y_train % 2 == 1)
y_multilabel = np.c_[y_train_large, y_train_odd]
knn_clf = KNeighborsClassifier()
knn_clf.fit(X_train, y_multilabel)
knn_clf.predict([some_digit])
# This takes too long to evaluate but normally it would output the F1 score
# y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_multilabel, cv=3, n_jobs=-1)
# f1_score(y_multilabel, y_train_knn_pred, average='macro')
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Multioutput Classification*Multioutput-multiclass* or just *multioutput classification* is a generalization of multilabel classification where each label can be multiclass (categorical, not just boolean).Following example removes noise from images. In this setup the output is one label per pixel (multilabel) and each pixel's label can have multiple values - pixel intensities (multioutput).
|
# modified training set
noise = np.random.randint(0, 100, (len(X_train), 784))
X_train_mod = X_train + noise
# modified test set
noise = np.random.randint(0, 100, (len(X_test), 784))
X_test_mod = X_test + noise
# targets are original images
y_train_mod = X_train
y_test_mod = X_test
some_index = 0
# noisy image
plt.subplot(121)
plt.imshow(X_test_mod[some_index].reshape(28, 28), cmap='binary')
plt.axis('off')
# original image
plt.subplot(122)
plt.imshow(y_test_mod[some_index].reshape(28, 28), cmap='binary')
plt.axis('off')
plt.show()
knn_clf.fit(X_train_mod, y_train_mod)
clean_digit = knn_clf.predict([X_test_mod[some_index]])
plt.imshow(clean_digit.reshape(28, 28), cmap='binary')
plt.axis('off')
plt.show()
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Extra Material Dummy Classifier
|
from sklearn.dummy import DummyClassifier
dummy_clf = DummyClassifier(strategy='prior')
y_probas_dummy = cross_val_predict(dummy_clf, X_train, y_train_5, cv=3, method='predict_proba')
y_scores_dummy = y_probas_dummy[:, 1]
fprr, tprr, thresholdsr = roc_curve(y_train_5, y_scores_dummy)
plot_roc_curve(fprr, tprr)
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
Exercises Data Augmentation
|
from scipy.ndimage.interpolation import shift
def shift_image(image, dx, dy):
image = image.reshape((28, 28))
shifted_image = shift(image, [dy, dx], cval=0, mode='constant')
return shifted_image.reshape([-1])
image = X_train[1000]
shifted_image_down = shift_image(image, 0, 5)
shifted_image_left = shift_image(image, -5, 0)
plt.figure(figsize=(12, 3))
# original image
plt.subplot(131)
plt.title('Original', fontsize=14)
plt.imshow(image.reshape(28, 28), interpolation='nearest', cmap='Greys')
# image shifted down
plt.subplot(132)
plt.title('Shifted down', fontsize=14)
plt.imshow(shifted_image_down.reshape(28, 28), interpolation='nearest', cmap='Greys')
# image shifted left
plt.subplot(133)
plt.title('Shifted left', fontsize=14)
plt.imshow(shifted_image_left.reshape(28, 28), interpolation='nearest', cmap='Greys')
plt.show()
from sklearn.metrics import accuracy_score
X_train_augmented = [image for image in X_train]
y_train_augmented = [label for label in y_train]
shifts = ((1, 0), (-1, 0), (0, 1), (0, -1))
for dx, dy in shifts:
for image, label in zip(X_train, y_train):
X_train_augmented.append(shift_image(image, dx, dy))
y_train_augmented.append(label)
X_train_augmented = np.array(X_train_augmented)
y_train_augmented = np.array(y_train_augmented)
shuffle_idx = np.random.permutation(len(X_train_augmented))
X_train_augmented = X_train_augmented[shuffle_idx]
y_train_augmented = y_train_augmented[shuffle_idx]
# Best params without augmentation
knn_clf = KNeighborsClassifier(n_neighbors=4, weights='distance')
knn_clf.fit(X_train_augmented, y_train_augmented)
# Accuracy without augmentation: 0.9714
y_pred = knn_clf.predict(X_test)
accuracy_score(y_test, y_pred)
|
_____no_output_____
|
MIT
|
03_classification.ipynb
|
matyama/homl
|
**Hands-on Lab : Web Scraping** Estimated time needed: **30 to 45** minutes Objectives In this lab you will perform the following: * Extract information from a given web site* Write the scraped data into a csv file. Extract information from the given web siteYou will extract the data from the below web site:
|
#this url contains the data you need to scrape
url = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DA0321EN-SkillsNetwork/labs/datasets/Programming_Languages.html"
|
_____no_output_____
|
MIT
|
Collecting Data using Web Scraping.ipynb
|
SMSesay/Data_Analyst_Capstone
|
The data you need to scrape is the **name of the programming language** and **average annual salary**. It is a good idea to open the url in your web broswer and study the contents of the web page before you start to scrape. Import the required libraries
|
# Your code here
from bs4 import BeautifulSoup
import requests
import pandas as pd
|
_____no_output_____
|
MIT
|
Collecting Data using Web Scraping.ipynb
|
SMSesay/Data_Analyst_Capstone
|
Download the webpage at the url
|
#your code goes here
data = requests.get(url).text
|
_____no_output_____
|
MIT
|
Collecting Data using Web Scraping.ipynb
|
SMSesay/Data_Analyst_Capstone
|
Create a soup object
|
#your code goes here
soup = BeautifulSoup(data, 'html5lib')
|
_____no_output_____
|
MIT
|
Collecting Data using Web Scraping.ipynb
|
SMSesay/Data_Analyst_Capstone
|
Scrape the `Language name` and `annual average salary`.
|
#your code goes here
lang_data = pd.DataFrame(columns=['Language', 'Avg_Salary'])
table = soup.find('table')
for row in table.find_all('tr'):
cols = row.find_all('td')
lang_name = cols[1].getText()
avg_salary = cols[3].getText()
lang_data = lang_data.append({"Language":lang_name, "Avg_Salary":avg_salary}, ignore_index=True)
#print("{}----------{}".format(lang_name, avg_salary))
|
_____no_output_____
|
MIT
|
Collecting Data using Web Scraping.ipynb
|
SMSesay/Data_Analyst_Capstone
|
Save the scrapped data into a file named *popular-languages.csv*
|
# your code goes here
#Drop the first row
#lang_data.drop(0, axis=0, inplace=True)
lang_data.to_csv('popular-languages.csv', index=False)
|
_____no_output_____
|
MIT
|
Collecting Data using Web Scraping.ipynb
|
SMSesay/Data_Analyst_Capstone
|
Datashader provides a flexible series of processing stages that map from raw data into viewable images. As shown in the [Introduction](1-Introduction.ipynb), using datashader can be as simple as calling ``datashade()``, but understanding each of these stages will help you get the most out of the library. The stages in a datashader pipeline are similar to those in a [3D graphics shading pipeline](https://en.wikipedia.org/wiki/Graphics_pipeline):Here the computational steps are listed across the top of the diagram, while the data structures or objects are listed along the bottom. Breaking up the computations in this way is what makes Datashader able to handle arbitrarily large datasets, because only one stage (Aggregation) requires access to the entire dataset. The remaining stages use a fixed-sized data structure regardless of the input dataset, allowing you to use any visualization or embedding methods you prefer without running into performance limitations.In this notebook, we'll first put together a simple, artificial example to get some data, and then show how to configure and customize each of the data-processing stages involved:1. [Projection](Projection)2. [Aggregation](Aggregation)3. [Transformation](Transformation)4. [Colormapping](Colormapping)5. [Embedding](Embedding) DataFor an example, we'll construct a dataset made of five overlapping 2D Gaussian distributions with different σs (spatial scales). By default we'll have 10,000 datapoints from each category, but you should see sub-second response times even for 1 million datapoints per category if you increase `num`.
|
import pandas as pd
import numpy as np
from collections import OrderedDict as odict
num=10000
np.random.seed(1)
dists = {cat: pd.DataFrame(odict([('x',np.random.normal(x,s,num)),
('y',np.random.normal(y,s,num)),
('val',val),
('cat',cat)]))
for x, y, s, val, cat in
[( 2, 2, 0.03, 10, "d1"),
( 2, -2, 0.10, 20, "d2"),
( -2, -2, 0.50, 30, "d3"),
( -2, 2, 1.00, 40, "d4"),
( 0, 0, 3.00, 50, "d5")] }
df = pd.concat(dists,ignore_index=True)
df["cat"]=df["cat"].astype("category")
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Datashader can work many different data objects provided by different data libraries depending on the type of data involved, such as columnar data in [Pandas](http://pandas.pydata.org) or [Dask](http://dask.pydata.org) dataframes, gridded multidimensional array data using [xarray](http://xarray.pydata.org), columnar data on GPUs using [cuDF](https://github.com/rapidsai/cudf), multidimensional arrays on GPUs using [CuPy](https://cupy.chainer.org/), and ragged arrays using [SpatialPandas](https://github.com/holoviz/spatialpandas) (see the [Performance User Guide](../10_Performance.ipynb) for a guide to selecting an appropriate library). Here, we're using a Pandas dataframe, with 50,000 rows by default:
|
df.tail()
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
To illustrate this dataset, we'll make a quick-and-dirty Datashader plot that dumps these x,y coordinates into an image:
|
import datashader as ds
import datashader.transfer_functions as tf
%time tf.shade(ds.Canvas().points(df,'x','y'))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Without any special tweaking, datashader is able to reveal the overall shape of this distribution faithfully: four summed 2D normal distributions of different variances, arranged at the corners of a square, overlapping another very high-variance 2D normal distribution centered in the square. This immediately obvious structure makes a great starting point for exploring the data, and you can then customize each of the various stages involved as described below.Of course, this is just a static plot, and you can't see what the axes are, so we can instead embed this data into an interactive plot if we prefer: Here, if you are running a live Python process, you can enable the "wheel zoom" tool on the right, zoom in anywhere in the distribution, and datashader will render a new image that shows the full distribution at that new location. If you are viewing this on a static web site, zooming will simply make the existing set of pixels larger, because this dynamic updating requires Python.Now that you can see the overall result, we'll unpack each of the steps in the Datashader pipeline and show how this image is constructed from the data. ProjectionDatashader is designed to render datasets projected on to a 2D rectangular grid, eventually generating an image where each pixel corresponds to one cell in that grid. The ***Projection*** stage is primarily conceptual, as it consists of you deciding what you want to plot and how you want to plot it:- **Variables**: Select which variable you want to have on the *x* axis, and which one for the *y* axis. If those variables are not already columns in your dataframe (e.g. if you want to do a coordinate transformation), you'll need to create suitable columns mapping directly to *x* and *y* for use in the next step. For this example, the "x" and "y" columns are conveniently named `x` and `y` already, but any column name can be used for these axes.- **Ranges**: Decide what ranges of those values you want to map onto the scene. If you omit the ranges, datashader will calculate the ranges from the data values, but you will often wish to supply explicit ranges for three reasons: 1. Calculating the ranges requires a complete pass over the data, which takes nearly as much time as actually aggregating the data, so your plots will be about twice as fast if you specify the ranges. 2. Real-world datasets often have some outliers with invalid values, which can make it difficult to see the real data, so after your first plot you will often want to specify only the range that appears to have valid data. 3. Over the valid range of data, you will often be mainly interested in a specific region, allowing you to zoom in to that area (though with an interactive plot you can always do that as needed).- **Axis types**: Decide whether you want `'linear'` or `'log'` axes.- **Resolution**: Decide what size of aggregate array you are going to want. Here's an example of specifying a ``Canvas`` (a.k.a. "Scene") object for a 200x200-pixel image covering the range +/-8.0 on both axes:
|
canvas = ds.Canvas(plot_width=300, plot_height=300,
x_range=(-8,8), y_range=(-8,8),
x_axis_type='linear', y_axis_type='linear')
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
At this stage, no computation has actually been done -- the `canvas` object is a purely declarative, recording your preferences to be applied in the next stage. AggregationOnce a `Canvas` object has been specified, it can then be used to guide aggregating the data into a fixed-sized grid. Data is assumed to consist of a series of items, each of which has some visible representation (its rendering as a "glyph") that is combined with the representation of other items to produce an aggregate representation of the whole set of items in the rectangular grid. The available glyph types for representing a data item are currently: - **Canvas.points**: each data item is a coordinate location (an x,y pair), mapping into the single closest grid cell to that datapoint's location. - **Canvas.line**: each data item is a coordinate location, mapping into every grid cell falling between this point's location and the next in a straight line segment. - **Canvas.area**: each data item is a coordinate location, rendered as a shape filling the axis-aligned area between this point, the next point, and a baseline (e.g. zero, filling the area between a line and a base). - **Canvas.trimesh**: each data item is a triple of coordinate locations specifying a triangle, filling in the region bounded by that triangle. - **Canvas.polygons**: each data item is a sequence of coordinate locations specifying a polygon, filling in the region bounded by that polygon (minus holes if specified separately). - **Canvas.raster**: the collection of data items is an array specifying regularly spaced axis-aligned rectangles forming a regular grid; each cell in this array is rendered as a filled rectangle. - **Canvas.quadmesh**: the collection of data items is an array specifying irregularly spaced quadrilaterals forming a grid that is regular in the input space but can have arbitrary rectilinear or curvilinear shapes in the aggregate grid; each cell in this array is rendered as a filled quadrilateral.These types are each covered in detail in the [User Guide](../user_guide/). Datashader can be extended to add additional types here and in each section below; see [Extending Datashader](../user_guide/9-Extending.ipynb) for more details. Many other plots like time series and network graphs can be constructed out of these basic primitives. ReductionsOne you have determined your mapping, you'll next need to choose a reduction operator to use when aggregating multiple datapoints into a given pixel. For points, each datapoint is mapped into a single pixel, while the other glyphs have spatial extent and can thus map into multiple pixels, each of which operates the same way. All glyphs act like points if the entire glyph is contained within that pixel. Here we will talk only about "datapoints" for simplicity, which for an area-based glyph should be interpreted as "the part of that glyph that falls into this pixel".All of the currently supported reduction operators are incremental, which means that we can efficiently process datasets in a single pass. Given an aggregate bin to update (typically corresponding to one eventual pixel) and a new datapoint, the reduction operator updates the state of the bin in some way. (Actually, datapoints are normally processed in batches for efficiency, but it's simplest to think about the operator as being applied per data point, and the mathematical result should be the same.) A large number of useful [reduction operators]((https://datashader.org/api.htmlreductions) are supplied in `ds.reductions`, including:**`count(column=None)`**: increment an integer count each time a datapoint maps to this bin. The resulting aggregate array will be an unsigned integer type, allowing counts to be distinguished from the other types that are normally floating point. **`any(column=None)`**: the bin is set to 1 if any datapoint maps to it, and 0 otherwise. **`sum(column)`**: add the value of the given column for this datapoint to a running total for this bin. **`by(column, reduction)`**: given a bin with categorical data (i.e., [Pandas' `categorical` datatype](https://pandas-docs.github.io/pandas-docs-travis/categorical.html)), aggregate each category separately, accumulating the given datapoint in an appropriate category within this bin. These categories can later be collapsed into a single aggregate if needed; see examples below. **`summary(name1=op1,name2=op2,...)`**: allows multiple reduction operators to be computed in a single pass over the data; just provide a name for each resulting aggregate and the corresponding reduction operator to use when creating that aggregate. If multiple aggregates are needed for the same dataset and the same Canvas, using `summary` will generally be much more efficient than making multiple separate passes over the dataset. The API documentation contains the complete list of [reduction operators]((https://datashader.org/api.htmlreductions) provided, including `mean`, `min`, `max`, `var` (variance), `std` (standard deviation). The reductions are also imported into the ``datashader`` namespace for convenience, so that they can be accessed like ``ds.mean()`` here.For the operators above, those accepting a `column` argument will only do the operation if the value of that column for this datapoint is not `NaN`. E.g. `count` with a column specified will count the datapoints having non-`NaN` values for that column.Once you have selected your reduction operator, you can compute the aggregation for each pixel-sized aggregate bin:
|
canvas.points(df, 'x', 'y', agg=ds.count())
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
The result of will be an [xarray](http://xarray.pydata.org) `DataArray` data structure containing the bin values (typically one value per bin, but more for multiple category or multiple-aggregate operators) along with axis range and type information.We can visualize this array in many different ways by customizing the pipeline stages described in the following sections, but for now we'll use HoloViews to render images using the default parameters to show the effects of a few different aggregate operators:
|
tf.Images(tf.shade( canvas.points(df,'x','y', ds.count()), name="count()"),
tf.shade( canvas.points(df,'x','y', ds.any()), name="any()"),
tf.shade( canvas.points(df,'x','y', ds.mean('y')), name="mean('y')"),
tf.shade(50-canvas.points(df,'x','y', ds.mean('val')), name="50- mean('val')"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Here ``count()`` renders each bin's count in a different color, to show the true distribution, while ``any()`` turns on a pixel if any point lands in that bin, and ``mean('y')`` averages the `y` column for every datapoint that falls in that bin. Of course, since ever datapoint falling into a bin happens to have the same `y` value, the mean reduction with `y` simply scales each pixel by its `y` location. For the last image above, we specified that the `val` column should be used for the `mean` reduction, which in this case results in each category being assigned a different color, because in our dataset all items in the same category happen to have the same `val`. Here we also manipulated the result of the aggregation before displaying it by subtracting it from 50, as detailed in the next section. TransformationNow that the data has been projected and aggregated into a gridded data structure, it can be processed in any way you like, before converting it to an image as will be described in the following section. At this stage, the data is still stored as bin data, not pixels, which makes a very wide variety of operations and transformations simple to express. For instance, instead of plotting all the data, we can easily plot only those bins in the 99th percentile by count (left), or apply any [NumPy ufunc](http://docs.scipy.org/doc/numpy/reference/ufuncs.html) to the bin values (whether or not it makes any sense!):
|
agg = canvas.points(df, 'x', 'y')
tf.Images(tf.shade(agg.where(agg>=np.percentile(agg,99)), name="99th Percentile"),
tf.shade(np.power(agg,2), name="Numpy square ufunc"),
tf.shade(np.sin(agg), name="Numpy sin ufunc"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
The [xarray documentation](http://xarray.pydata.org/en/stable/computation.html) describes all the various transformations you can apply from within xarray, and of course you can always extract the data values and operate on them outside of xarray for any transformation not directly supported by xarray, then construct a suitable xarray object for use in the following stage. Once the data is in the aggregate array, you generally don't have to worry much about optimization, because it's a fixed-sized grid regardless of your data size, and so it is very straightforward to apply arbitrary transformations to the aggregates.The above examples focus on a single aggregate, but there are many ways that you can use multiple data values per bin as well. For instance, you can apply any aggregation "categorically", aggregating `by` some categorical value so that datapoints for each unique value are aggregated independently:
|
aggc = canvas.points(df, 'x', 'y', ds.by('cat', ds.count()))
aggc
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Here the `count()` aggregate has been collected into not just a 2D aggregate array, but a whole stack of aggregate arrays, one per `cat` value, making the aggregate be three dimensional (x,y,cat) rather than just two (x,y). With this 3D aggregate of counts per category, you can then select a specific category or subset of them for further processing, where `.sum(dim='cat')` will collapse across such a subset to give a single aggregate array:
|
agg_d3_d5=aggc.sel(cat=['d3', 'd5']).sum(dim='cat')
tf.Images(tf.shade(aggc.sel(cat='d3'), name="Category d3"),
tf.shade(agg_d3_d5, name="Categories d3 and d5"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
You can also combine multiple aggregates however you like, as long as they were all constructed using the same Canvas object (which ensures that their aggregate arrays are the same size) and cover the same axis ranges:
|
tf.Images(tf.shade(agg_d3_d5.where(aggc.sel(cat='d3') == aggc.sel(cat='d5')), name="d3+d5 where d3==d5"),
tf.shade( agg.where(aggc.sel(cat='d3') == aggc.sel(cat='d5')), name="d1+d2+d3+d4+d5 where d3==d5"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
The above two results are using the same mask (only those bins `where` the counts for 'd3' and 'd5' are equal), but applied to different aggregates (either just the `d3` and `d5` categories, or the entire set of counts). ColormappingAs you can see above, the usual way to visualize an aggregate array is to map from each array bin into a color for a corresponding pixel in an image. The above examples use the `tf.shade()` method, which maps a scalar aggregate bin value into an RGB (color) triple and an alpha (opacity) value. By default, the colors are chosen from the colormap ['lightblue','darkblue'] (i.e., `ADD8E6` to `00008B`), with intermediate colors chosen as a linear interpolation independently for the red, green, and blue color channels (e.g. `AD` to `00` for the red channel, in this case). The alpha (opacity) value is set to 0 for empty bins and 1 for non-empty bins, allowing the page background to show through wherever there is no data. You can supply any colormap you like, including Bokeh palettes, Matplotlib colormaps, or a list of colors (using the color names from `ds.colors`, integer triples, or hexadecimal strings):
|
from bokeh.palettes import RdBu9
tf.Images(tf.shade(agg,cmap=["darkred", "yellow"], name="darkred, yellow"),
tf.shade(agg,cmap=[(230,230,0), "orangered", "#300030"], name="yellow, orange red, dark purple"),
tf.shade(agg,cmap=list(RdBu9), name="Bokeh RdBu9"),
tf.shade(agg,cmap="black", name="Black"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
As a special case ("Black", above), if you supply only a single color, the color will be kept constant at the given value but the alpha (opacity) channel will vary with the data. Colormapping categorical dataIf you want to use `tf.shade` with a categorical aggregate, you can use a colormap just as for a non-categorical aggregate if you first select a single category using something like `aggc.sel(cat='d3')` or else collapse all categories into a single aggregate using something like `aggc.sum(dim='cat')`. If you want to visualize all the categories in one image, you can use `tf.shade` with the categorical aggregate directly, which will assign a color to each category and then calculate the transparency and color of each pixel according to each category's contribution to that pixel:
|
color_key = dict(d1='blue', d2='green', d3='red', d4='orange', d5='purple')
tf.Images(tf.shade(aggc, name="Default color key"),
tf.shade(aggc, color_key=color_key, name="Custom color key"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Here the different colors mix not just visually due to blurring, but are actually mixed mathematically per pixel, with pixels that include data from multiple categories taking intermediate color values. The total (summed) data values across all categories are used to calculate the alpha channel, with the previously computed color being revealed to a greater or lesser extent depending on the value of the aggregate for that bin. See [Colormapping with negative values](Colormapping-with-negative-values) below for more details on how these colors and transparencies are calculated. The default color key for categorical data provides distinguishable colors for a couple of dozen categories, but you can provide an explicit color_key if you prefer. Choosing colors for different categories is more of an art than a science, because the colors not only need to be distinguishable, their combinations also need to be distinguishable if those categories ever overlap in nearby pixels, or else the results will be ambiguous. In practice, only a few categories can be reliably distinguished in this way, but [zooming in](3_Interactivity.ipynb) can be used to help disambiguate overlapping colors, as long as the basic set of colors is itself distinguishable. Transforming data values for colormappingIn each of the above examples, you may have noticed that we were never required to specify any parameters about the data values; the plots just appear like magic. That magic is implemented in `tf.shade`. What `tf.shade` does for a 2D aggregate (non-categorical) is:1. **Mask** out all bins with a `NaN` value (for floating-point arrays) or a zero value (for the unsigned integer arrays that are returned from `count`); these bins will not have any effect on subsequent computations. 2. **Transform** the bin values using a specified scalar function `how`. Calculates the value of that function for the difference between each bin value and the minimum non-masked bin value. E.g. for `how="linear"`, simply returns the difference unchanged. Other `how` functions are discussed below.3. **Map** the resulting transformed data array into the provided colormap. First finds the value span (*l*,*h*) for the resulting transformed data array -- what are the lowest and highest non-masked values? -- and then maps the range (*l*,*h*) into the full range of the colormap provided. If a colormap is used, masked values are given a fully transparent alpha value, and non-masked ones are given a fully opaque alpha value. If a single color is used, the alpha value starts at `min_alpha` and increases proportionally to the mapped data value up to the full `alpha` value.The result is thus auto-ranged to show whatever data values are found in the aggregate bins, with the `span` argument (described below) allowing you to override the range explicitly if you need to.As described in [Plotting Pitfalls](../user_guide/1_Plotting_Pitfalls.ipynb), auto-ranging is only part of what is required to reveal the structure of the dataset; it's also crucial to automatically and potentially nonlinearly map from the aggregate values (e.g. bin counts) into the colormap. If we used a linear mapping, we'd see very little of the structure of the data:
|
tf.shade(agg,how='linear')
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
In the linear version, you can see that the bins that have zero count show the background color, since they have been masked out using the alpha channel of the image, and that the rest of the pixels have been mapped to colors near the bottom of the colormap. If you peer closely at it, you may even be able to see that one pixel (from the smallest Gaussian) has been mapped to the highest color in the colormap (here dark blue). But no other structure is visible, because the highest-count bin is so much higher than all of the other bins:
|
top15=agg.values.flat[np.argpartition(agg.values.flat, -15)[-15:]]
print(sorted(top15))
print(sorted(np.round(top15*255.0/agg.values.max()).astype(int)))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
I.e., if using a colormap with 255 colors, the largest bin (`agg.values.max()`) is mapped to the highest color, but with a linear scale all of the other bins map to only the first 24 colors, leaving all intermediate colors unused. If we want to see any structure for these intermediate ranges, we need to transform these numerical values somehow before displaying them. For instance, if we take the logarithm of these large values, they will be mapped into a more tractable range:
|
print(np.log1p(sorted(top15)))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
So we can plot the logarithms of the values (``how='log'``, below), which is an arbitrary transform but is appropriate for many types of data. Alternatively, we can make a histogram of the numeric values, then assign a pixel color to each equal-sized histogram bin to ensure even usage of every displayable color (``how='eq_hist'``; see [plotting pitfalls](../user_guide/1_Plotting_Pitfalls.ipynb). We can even supply any arbitrary transformation to the colormapper as a callable, such as a twenty-third root:
|
tf.Images(tf.shade(agg,how='log', name="log"),
tf.shade(agg,how='eq_hist', name="eq_hist"),
tf.shade(agg,how=lambda d, m: np.where(m, np.nan, d)**(1/23.), name="23rd root"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Usually, however, such custom operations are done directly on the aggregate during the ***Transformation*** stage; the `how` operations are meant for simple, well-defined transformations solely for the final steps of visualization, which allows the main aggregate array to stay in the original units and scale in which it was measured. Using `how` also helps simplify the subsequent ***Embedding*** stage, letting it provide one of a fixed set of legend types, either linear (for `how=linear`), logarithmic (for `how=log`) or percentile (for `how=eq_hist`). See the [shade docs](https://datashader.org/api.htmldatashader.transfer_functions.shade) for more details on the `how` functions. For categorical aggregates, the `shade` function works similarly to providing a single color to a non-categorical aggregate, with the alpha (opacity) calculated from the total value across all categories (and the color calculated as a weighted mixture of the colors for each category). Controlling ranges for colormappingBy default, `shade` will autorange on the aggregate array, mapping the lowest and highest values of the aggregate array into the lowest and highest values of the colormap (or the available alpha values, for single colors). You can instead focus on a specific `span` of the aggregate data values, mapping that span into the available colors or the available alpha values:
|
tf.Images(tf.shade(agg,cmap=["grey", "blue"], name="gb 0 20", span=[0,20], how="linear"),
tf.shade(agg,cmap=["grey", "blue"], name="gb 50 200", span=[50,200], how="linear"),
tf.shade(agg,cmap="green", name="Green 10 20", span=[10,20], how="linear"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
On the left, all counts above 20 are mapped to the highest value in the colormap (blue in this case), losing the ability to distinguish between values above 20 but providing the maximum color precision for the specific range 0 to 20. In the middle, all values 0 to 50 map to the first color in the colormap (grey in this case), and the colors are then linearly interpolated up to 200, with all values 200 and above mapping to the highest value in the colormap (blue in this case). With the single color mapping to alpha on the right, counts up to 10 are all mapped to `min_alpha`, counts 20 and above are all mapped to the specified `alpha` (255 in this case), and alpha is scaled linearly in between. For plots that scale with alpha (i.e., categorical or single-color non-categorical plots), you can control the range of alpha values generated by setting `min_alpha` (lower bound) and `alpha` (upper bound), on a scale 0 to 255):
|
tf.Images(tf.shade(agg,cmap="green", name="Green"),
tf.shade(agg,cmap="green", name="No min_alpha", min_alpha=0),
tf.shade(agg,cmap="green", name="Small alpha range", min_alpha=50, alpha=80))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Here you can see that the faintest pixels are more visible with the default `min_alpha` (normally 40, left) than if you explicitly set the `min_alpha=0` (middle), which is why the `min_alpha` default is non-zero; otherwise low values would be indistinguishable from the background (see [Plotting Pitfalls](../user_guide/1_Plotting_Pitfalls.ipynb)).You can combine `span` and `alpha` ranges to specifically control the data value range that maps to an opacity range, for single-color and categorical plotting:
|
tf.Images(tf.shade(agg,cmap="green", name="g 0,20", span=[ 0,20], how="linear"),
tf.shade(agg,cmap="green", name="g 10,20", span=[10,20], how="linear"),
tf.shade(agg,cmap="green", name="g 10,20 0", span=[10,20], how="linear", min_alpha=0))
tf.Images(tf.shade(aggc, name="eq_hist"),
tf.shade(aggc, name="linear", how='linear'),
tf.shade(aggc, name="span 0,10", how='linear', span=(0,10)),
tf.shade(aggc, name="span 0,10", how='linear', span=(0,20), min_alpha=0))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
The categorical examples above focus on counts, but `ds.by` works on other aggregate types as well, colorizing by category but aggregating by sum, mean, etc. (but see the [following section](Colormapping-with-negative-values) for details on how to interpret such colors):
|
agg_c = canvas.points(df,'x','y', ds.by('cat', ds.count()))
agg_s = canvas.points(df,'x','y', ds.by("cat", ds.sum("val")))
agg_m = canvas.points(df,'x','y', ds.by("cat", ds.mean("val")))
tf.Images(tf.shade(agg_c), tf.shade(agg_s), tf.shade(agg_m))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Colormapping with negative valuesThe above examples all use positive data values to avoid confusion when there is no colorbar or other explicit indication of a z (color) axis range. Negative values are also supported, in which case for a non-categorical plot you should normally use a [diverging colormap](https://colorcet.holoviz.org/user_guide/Continuous.htmlDiverging-colormaps,-for-plotting-magnitudes-increasing-or-decreasing-from-a-central-point:):
|
from colorcet import coolwarm, CET_D8
dfn = df.copy()
dfn.val.replace({20:-20, 30:0, 40:-40}, inplace=True)
aggn = ds.Canvas().points(dfn,'x','y', agg=ds.mean("val"))
tf.Images(tf.shade(aggn, name="Sequential", cmap=["lightblue","blue"], how="linear"),
tf.shade(aggn, name="DivergingW", cmap=coolwarm[::-1], span=(-50,50), how="linear"),
tf.shade(aggn, name="DivergingB", cmap=CET_D8[::-1], span=(-50,50), how="linear"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
In both of the above plots, values with no data are transparent as usual, showing white. With a sequential lightblue to blue colormap, increasing `val` numeric values are mapped to the colormap in order, with the smallest values (-40; large blob in the top left) getting the lowest color value (lightblue), less negative values (-20, blob in the bottom right) getting an intermediate color, and the largest average values (50, large distribution in the background) getting the highest color. Looking at such a plot, viewers have no easy way to determine which values are negative. Using a diverging colormap (right two plots) and forcing the span to be symmetric around zero ensures that negative values are plotted in one color range (reds) and positive are plotted in a clearly different range (blues). Note that when using a diverging colormap with transparent values, you should carefully consider what you want to happen around the zero point; here values with nearly zero average (blob in bottom left) disappear when using a white-centered diverging map ("coolwarm"), while they show up but in a neutral color when using a diverging map with a contrasting central color ("CET_D8").For categorical plots of values that can be negative, the results are often quite difficult to interpret, for the same reason as for the Sequential case above:
|
agg_c = canvas.points(dfn,'x','y', ds.by('cat', ds.count()))
agg_s = canvas.points(dfn,'x','y', ds.by("cat", ds.sum("val")))
agg_m = canvas.points(dfn,'x','y', ds.by("cat", ds.mean("val")))
tf.Images(tf.shade(agg_c, name="count"),
tf.shade(agg_s, name="sum"),
tf.shade(agg_s, name="sum baseline=0", color_baseline=0))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Here a `count` aggregate ignores the negative values and thus works the same as when values were positive, but `sum` and other aggregates like `mean` take the negative values into account. By default, a pixel with the lowest value (whether negative or positive) maps to `min_alpha`, and the highest maps to `alpha`. The color is determined by how different each category's value is from the minimum value across all categories; categories with high values relative to the minimum contribute more to the color. There is not currently any way to tell which data values are positive or negative, as you can using a diverging colormap in the non-categorical case.Instead of using the default of the data minimum, you can pass a specific `color_baseline`, which is appropriate if your data has a well-defined reference value such as zero. Here, when we pass `color_baseline=0` the negative values are essentially ignored for color calculations, which can be seen on the green blob, where any orange data point is fully orange despite the presence of green-category datapoints; the middle plot `sum` shows a more appropriate color mixture in that case. SpreadingOnce an image has been created, it can be further transformed with a set of functions from `ds.transfer_functions`.For instance, because it can be difficult to see individual dots, particularly for zoomed-in plots, you can transform the image to replace each non-transparent pixel with a shape, such as a circle (default) or square. This process is called spreading:
|
img = tf.shade(aggc, name="Original image")
tf.Images(img,
tf.spread(img, name="spread 1px"),
tf.spread(img, px=2, name="spread 2px"),
tf.spread(img, px=3, shape='square', name="spread square"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
As you can see, spreading is very effective for isolated datapoints, which is what it's normally used for, but it has overplotting-like effects for closely spaced points like in the green and purple regions above, and so it would not normally be used when the datapoints are dense.Spreading can be used with a custom mask, as long as it is square and an odd width and height (so that it will be centered over the original pixel):
|
mask = np.array([[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]])
tf.spread(img, mask=mask)
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
To support interactive zooming, where spreading would be needed only in sparse regions of the dataset, we provide the dynspread function. `dynspread` will dynamically calculate the spreading size to use by counting the fraction of non-masked bins that have non-masked neighbors; see the[dynspread docs](https://datashader.org/api.htmldatashader.transfer_functions.dynspread) for more details. Other image transfer_functionsOther useful image operations are also provided, such as setting the background color or combining images:
|
tf.Images(tf.set_background(img,"black", name="Black bg"),
tf.stack(img,tf.shade(aggc.sel(cat=['d2', 'd3']).sum(dim='cat')), name="Sum d2 and d3 colors"),
tf.stack(img,tf.shade(aggc.sel(cat=['d2', 'd3']).sum(dim='cat')), how='saturate', name="d2+d3 saturated"))
|
_____no_output_____
|
BSD-3-Clause
|
examples/getting_started/2_Pipeline.ipynb
|
odidev/datashader
|
Lambda School Data Science, Unit 2: Predictive Modeling Applied Modeling, Module 1You will use your portfolio project dataset for all assignments this sprint. AssignmentComplete these tasks for your project, and document your decisions.- [ ] Choose your target. Which column in your tabular dataset will you predict?- [ ] Choose which observations you will use to train, validate, and test your model. And which observations, if any, to exclude.- [ ] Determine whether your problem is regression or classification.- [ ] Choose your evaluation metric.- [ ] Begin with baselines: majority class baseline for classification, or mean baseline for regression, with your metric of choice.- [ ] Begin to clean and explore your data.- [ ] Begin to choose which features, if any, to exclude. Would some features "leak" information from the future? Reading ROC AUC- [Machine Learning Meets Economics](http://blog.mldb.ai/blog/posts/2016/01/ml-meets-economics/)- [ROC curves and Area Under the Curve explained](https://www.dataschool.io/roc-curves-and-auc-explained/)- [The philosophical argument for using ROC curves](https://lukeoakdenrayner.wordpress.com/2018/01/07/the-philosophical-argument-for-using-roc-curves/) Imbalanced Classes- [imbalance-learn](https://github.com/scikit-learn-contrib/imbalanced-learn)- [Learning from Imbalanced Classes](https://www.svds.com/tbt-learning-imbalanced-classes/) Last lesson- [Attacking discrimination with smarter machine learning](https://research.google.com/bigpicture/attacking-discrimination-in-ml/), by Google Research, with interactive visualizations. _"A threshold classifier essentially makes a yes/no decision, putting things in one category or another. We look at how these classifiers work, ways they can potentially be unfair, and how you might turn an unfair classifier into a fairer one. As an illustrative example, we focus on loan granting scenarios where a bank may grant or deny a loan based on a single, automatically computed number such as a credit score."_- [How Shopify Capital Uses Quantile Regression To Help Merchants Succeed](https://engineering.shopify.com/blogs/engineering/how-shopify-uses-machine-learning-to-help-our-merchants-grow-their-business)- [Maximizing Scarce Maintenance Resources with Data: Applying predictive modeling, precision at k, and clustering to optimize impact](https://towardsdatascience.com/maximizing-scarce-maintenance-resources-with-data-8f3491133050), by Lambda DS3 student Michael Brady. His blog post extends the Tanzania Waterpumps scenario, far beyond what's in the lecture notebook.- [Notebook about how to calculate expected value from a confusion matrix by treating it as a cost-benefit matrix](https://github.com/podopie/DAT18NYC/blob/master/classes/13-expected_value_cost_benefit_analysis.ipynb)- [Simple guide to confusion matrix terminology](https://www.dataschool.io/simple-guide-to-confusion-matrix-terminology/) by Kevin Markham, with video- [Visualizing Machine Learning Thresholds to Make Better Business Decisions](https://blog.insightdatascience.com/visualizing-machine-learning-thresholds-to-make-better-business-decisions-4ab07f823415) Import the final Liverpool Football Club data file.
|
# import pandas library as pd.
import pandas as pd
# read in the LiverpoolFootballClub_all csv file.
LPFC = pd.read_csv('https://raw.githubusercontent.com/CVanchieri/LSDS-DataSets/master/EnglishPremierLeagueData/LiverpoolFootballClubData_EPL.csv')
# show the data frame shape.
print(LPFC.shape)
# show the data frame with headers.
LPFC.head()
|
(1003, 161)
|
MIT
|
module1/1.Assignment/1.Assignment_AppliedModeling_Module1.ipynb
|
CVanchieri/DS-Unit2-Sprint3-AppliedModeling
|
Organizing & cleaning.
|
# group the columns we want to use.
columns = ["Div", "Date", "HomeTeam", "AwayTeam", "FTHG", "FTAG", "FTR",
"HTHG", "HTAG", "HTR", "HS", "AS", "HST", "AST", "HHW", "AHW",
"HC", "AC", "HF", "AF", "HO", "AO", "HY", "AY", "HR", "AR", "HBP", "ABP"]
# create a new data frame with just the grouped columns.
LPFC = LPFC[columns]
# show the data frame shape.
print(LPFC.shape)
# show the data frame with headers.
LPFC.head()
# relableing columns for better understanding.
LPFC = LPFC.rename(columns={"Div": "Division", "Date": "GameDate", "FTHG": "FullTimeHomeGoals", "FTAG": "FullTimeAwayGoals", "FTR": "FullTimeResult", "HTHG": "HalfTimeHomeGoals",
"HTAG": "HalfTimeAwayGoals", "HTR": "HalfTimeResult", "HS": "HomeShots", "AS": "AwayShots",
"HST": "HomeShotsOnTarget", "AST": "AwayShotsOnTarget", "HHW": "HomeShotsHitFrame",
"AHW": "AwayShotsHitFrame", "HC": "HomeCorners", "AC": "AwayCorners", "HF": "HomeFouls",
"AF": "AwayFouls", "HO": "HomeOffSides", "AO": "AwayOffSides", "HY": "HomeYellowCards",
"AY": "AwayYellowCards", "HR": "HomeRedCards", "AR": "AwayRedCards", "HBP": "HomeBookingPoints_Y5_R10",
"ABP": "AwayBookingPoints_Y5_R10"})
# show the data frame with headers.
LPFC.head()
|
_____no_output_____
|
MIT
|
module1/1.Assignment/1.Assignment_AppliedModeling_Module1.ipynb
|
CVanchieri/DS-Unit2-Sprint3-AppliedModeling
|
Baseline accuracy score.
|
# import accuracy_score from sklearn.metrics library.
from sklearn.metrics import accuracy_score
# determine 'majority class' baseline starting point for every prediction.
# single out the target, 'FullTimeResult' column.
target = LPFC['FullTimeResult']
# create the majority class with setting the 'mode' on the target data.
majority_class = target.mode()[0]
# create the y_pred data.
y_pred = [majority_class] * len(target)
# accuracy score for the majority class baseline = frequency of the majority class.
ac = accuracy_score(target, y_pred)
print("'Majority Baseline' Accuracy Score =", ac)
|
'Majority Baseline' Accuracy Score = 0.4745762711864407
|
MIT
|
module1/1.Assignment/1.Assignment_AppliedModeling_Module1.ipynb
|
CVanchieri/DS-Unit2-Sprint3-AppliedModeling
|
Train/test split the data frame, train/val/test.
|
df = LPFC.copy()
target = 'FullTimeResult'
y = df[target]
# import train_test_split from sklearn.model_selection library.
from sklearn.model_selection import train_test_split
target = ['FullTimeResult']
y = df[target]
# split data into train, test.
X_train, X_val, y_train, y_val = train_test_split(df, y, test_size=0.20,
stratify=y, random_state=42)
# show the data frame shapes.
print("train =", X_train.shape, y_train.shape, "val =", X_val.shape, y_val.shape)
|
train = (802, 28) (802, 1) val = (201, 28) (201, 1)
|
MIT
|
module1/1.Assignment/1.Assignment_AppliedModeling_Module1.ipynb
|
CVanchieri/DS-Unit2-Sprint3-AppliedModeling
|
LogisticREgression model.
|
import numpy as np
from datetime import datetime
def wrangle(X):
"""Wrangle train, validate, and test sets in the same way"""
# prevent SettingWithCopyWarning with a copy.
X = X.copy()
# make 'GameDate' useable with datetime.
X['GameDate'] = pd.to_datetime(X['GameDate'], infer_datetime_format=True)
# create new columns for 'YearOfGame', 'MonthOfGame', 'DayOfGame'.
X['YearOfGame'] = X['GameDate'].dt.year
X['MonthOfGame'] = X['GameDate'].dt.month
X['DayOfGame'] = X['GameDate'].dt.day
# removing 'FullTimeHomeGoals', 'FullTimeAwayGoals' as they directly coorelated to the result.
dropped_columns = ['FullTimeHomeGoals', 'FullTimeAwayGoals', 'Division', 'GameDate']
X = X.drop(columns=dropped_columns)
# return the wrangled dataframe
return X
X_train = wrangle(X_train)
X_val = wrangle(X_val)
# create the target as status_group.
target = 'FullTimeResult'
# set the features, remove target and id column.
train_features = X_train.drop(columns=[target])
# group all the numeric features.
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
# group the cardinality of the nonnumeric features.
cardinality = train_features.select_dtypes(exclude='number').nunique()
# group all categorical features with cardinality <= 100.
categorical_features = cardinality[cardinality <= 500].index.tolist()
# create features with numeric + categorical
features = numeric_features + categorical_features
# create the new vaules with the new features/target data.
X_train = X_train[features]
X_val = X_val[features]
!pip install category_encoders
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='median'),
StandardScaler(),
LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=1000, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
print ('Training Accuracy', pipeline.score(X_train, y_train))
print('Validation Accuracy', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_val)
|
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py:724: DataConversionWarning: A column-vector y was passed when a 1d array was expected. Please change the shape of y to (n_samples, ), for example using ravel().
y = column_or_1d(y, warn=True)
|
MIT
|
module1/1.Assignment/1.Assignment_AppliedModeling_Module1.ipynb
|
CVanchieri/DS-Unit2-Sprint3-AppliedModeling
|
___ ___ Matplotlib Exercises Welcome to the exercises for reviewing matplotlib! Take your time with these, Matplotlib can be tricky to understand at first. These are relatively simple plots, but they can be hard if this is your first time with matplotlib, feel free to reference the solutions as you go along.Also don't worry if you find the matplotlib syntax frustrating, we actually won't be using it that often throughout the course, we will switch to using seaborn and pandas built-in visualization capabilities. But, those are built-off of matplotlib, which is why it is still important to get exposure to it!** * NOTE: ALL THE COMMANDS FOR PLOTTING A FIGURE SHOULD ALL GO IN THE SAME CELL. SEPARATING THEM OUT INTO MULTIPLE CELLS MAY CAUSE NOTHING TO SHOW UP. * ** ExercisesFollow the instructions to recreate the plots using this data: Data
|
import numpy as np
x = np.arange(0,100)
y = x*2
z = x**2
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
** Import matplotlib.pyplot as plt and set %matplotlib inline if you are using the jupyter notebook. What command do you use if you aren't using the jupyter notebook?**
|
import matplotlib.pyplot as plt
%matplotlib inline
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
Exercise 1** Follow along with these steps: *** ** Create a figure object called fig using plt.figure() *** ** Use add_axes to add an axis to the figure canvas at [0,0,1,1]. Call this new axis ax. *** ** Plot (x,y) on that axes and set the labels and titles to match the plot below:**
|
# Functional Method
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.plot(x, y)
ax.set_title('title')
ax.set_xlabel('X')
ax.set_ylabel('Y')
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
Exercise 2** Create a figure object and put two axes on it, ax1 and ax2. Located at [0,0,1,1] and [0.2,0.5,.2,.2] respectively.**
|
# create figure canvas
fig = plt.figure()
# create axes
ax1 = fig.add_axes([0,0,1,1])
ax2 = fig.add_axes([0.2,0.5,.2,.2])
plt.xticks(np.arange(0, 1.2, step=0.2))
plt.yticks(np.arange(0, 1.2, step=0.2))
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
** Now plot (x,y) on both axes. And call your figure object to show it.**
|
# create figure canvas
fig = plt.figure()
# create axes
ax1 = fig.add_axes([0,0,1,1])
ax2 = fig.add_axes([0.2,0.5,.2,.2])
ax1.set_xlabel('x1')
ax1.set_ylabel('y1')
ax2.set_xlabel('x2')
ax2.set_ylabel('y2')
ax1.plot(x, y, 'r-')
ax2.plot(x, y, 'b--')
plt.xticks(np.arange(0, 120, step=20))
plt.yticks(np.arange(0, 220, step=50))
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
Exercise 3** Create the plot below by adding two axes to a figure object at [0,0,1,1] and [0.2,0.5,.4,.4]**
|
fig = plt.figure()
ax1 = fig.add_axes([0,0,1,1])
ax2 = fig.add_axes([0.2,0.5,.4,.4])
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
** Now use x,y, and z arrays to recreate the plot below. Notice the xlimits and y limits on the inserted plot:**
|
fig = plt.figure()
ax1 = fig.add_axes([0,0,1,1])
ax2 = fig.add_axes([0.2,0.5,.4,.4])
ax1.plot(x, z)
ax2.plot(x, y, 'r--') # zoom using xlimit (20, 22), ylimit (30, 50)
ax2.set_xlim([20, 22])
ax2.set_ylim([30, 50])
ax2.set_title('zoom')
ax2.set_xlabel('X')
ax2.set_ylabel('Y')
ax1.set_xlabel('X')
ax1.set_ylabel('Z')
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
Exercise 4** Use plt.subplots(nrows=1, ncols=2) to create the plot below.**
|
fig, axes = plt.subplots(nrows=1, ncols=2)
# axes object is an array of subplot axis.
plt.tight_layout() # add space between rows & columns.
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
** Now plot (x,y) and (x,z) on the axes. Play around with the linewidth and style**
|
fig, axes = plt.subplots(nrows=1, ncols=2)
# axes object is an array of subplot axis.
axes[0].plot(x, y, 'b--', lw=3)
axes[1].plot(x, z, 'r-.', lw=2)
plt.tight_layout() # add space between rows & columns.
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
** See if you can resize the plot by adding the figsize() argument in plt.subplots() are copying and pasting your previous code.**
|
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 7))
# axes object is an array of subplot axis.
axes[0].plot(x, y, 'b--', lw=3)
axes[1].plot(x, z, 'r-.', lw=2)
plt.tight_layout() # add space between rows & columns.
|
_____no_output_____
|
Apache-2.0
|
udemy-ds-bc/Py_DS_ML_bootcamp/00-my-practice/05-Data-Visualization-with-Matplotlib/02_my_matplotlib_exercise.ipynb
|
JennEYoon/python-ml
|
Implementing the Gradient Descent AlgorithmIn this lab, we'll implement the basic functions of the Gradient Descent algorithm to find the boundary in a small dataset. First, we'll start with some functions that will help us plot and visualize the data.
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#Some helper functions for plotting and drawing lines
def plot_points(X, y):
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'blue', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'red', edgecolor = 'k')
def display(m, b, color='g--'):
plt.xlim(-0.05,1.05)
plt.ylim(-0.05,1.05)
x = np.arange(-10, 10, 0.1)
plt.plot(x, m*x+b, color)
|
_____no_output_____
|
MIT
|
intro-neural-networks/gradient-descent/GradientDescent.ipynb
|
basilcea/deep-learning-v2-pytorch
|
Reading and plotting the data
|
data = pd.read_csv('data.csv', header=None)
X = np.array(data[[0,1]])
y = np.array(data[2])
plot_points(X,y)
plt.show()
|
_____no_output_____
|
MIT
|
intro-neural-networks/gradient-descent/GradientDescent.ipynb
|
basilcea/deep-learning-v2-pytorch
|
TODO: Implementing the basic functionsHere is your turn to shine. Implement the following formulas, as explained in the text.- Sigmoid activation function$$\sigma(x) = \frac{1}{1+e^{-x}}$$- Output (prediction) formula$$\hat{y} = \sigma(w_1 x_1 + w_2 x_2 + b)$$- Error function$$Error(y, \hat{y}) = - y \log(\hat{y}) - (1-y) \log(1-\hat{y})$$- The function that updates the weights$$ w_i \longrightarrow w_i + \alpha (y - \hat{y}) x_i$$$$ b \longrightarrow b + \alpha (y - \hat{y})$$
|
# Implement the following functions
# Activation (sigmoid) function
def sigmoid(x):
exp = np.exp(-x)
return 1/(1+exp)
# Output (prediction) formula
def output_formula(features, weights, bias):
return sigmoid(np.dot(features,weights)+bias)
# Error (log-loss) formula
def error_formula(y, output):
return -y*np.log(output)- (1 - y) * np.log(1-output)
# Gradient descent step
def update_weights(x, y, weights, bias, learnrate):
output = output_formula(x , weights , bias)
error = y- output
weights += learnrate*x*error
bias += learnrate*error
return weights, bias
# # Activation (sigmoid) function
# def sigmoid(x):
# return 1 / (1 + np.exp(-x))
# def output_formula(features, weights, bias):
# return sigmoid(np.dot(features, weights) + bias)
# def error_formula(y, output):
# return - y*np.log(output) - (1 - y) * np.log(1-output)
# def update_weights(x, y, weights, bias, learnrate):
# output = output_formula(x, weights, bias)
# d_error = y - output
# weights += learnrate * d_error * x
# bias += learnrate * d_error
# return weights, bias
|
_____no_output_____
|
MIT
|
intro-neural-networks/gradient-descent/GradientDescent.ipynb
|
basilcea/deep-learning-v2-pytorch
|
Training functionThis function will help us iterate the gradient descent algorithm through all the data, for a number of epochs. It will also plot the data, and some of the boundary lines obtained as we run the algorithm.
|
np.random.seed(44)
epochs = 100
learnrate = 0.01
def train(features, targets, epochs, learnrate, graph_lines=False):
errors = []
n_records, n_features = features.shape
last_loss = None
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
bias = 0
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features, targets):
output = output_formula(x, weights, bias)
error = error_formula(y, output)
weights, bias = update_weights(x, y, weights, bias, learnrate)
# Printing out the log-loss error on the training set
out = output_formula(features, weights, bias)
loss = np.mean(error_formula(targets, out))
errors.append(loss)
if e % (epochs / 10) == 0:
print("\n========== Epoch", e,"==========")
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
predictions = out > 0.5
accuracy = np.mean(predictions == targets)
print("Accuracy: ", accuracy)
if graph_lines and e % (epochs / 100) == 0:
display(-weights[0]/weights[1], -bias/weights[1])
# Plotting the solution boundary
plt.title("Solution boundary")
display(-weights[0]/weights[1], -bias/weights[1], 'black')
# Plotting the data
plot_points(features, targets)
plt.show()
# Plotting the error
plt.title("Error Plot")
plt.xlabel('Number of epochs')
plt.ylabel('Error')
plt.plot(errors)
plt.show()
|
_____no_output_____
|
MIT
|
intro-neural-networks/gradient-descent/GradientDescent.ipynb
|
basilcea/deep-learning-v2-pytorch
|
Time to train the algorithm!When we run the function, we'll obtain the following:- 10 updates with the current training loss and accuracy- A plot of the data and some of the boundary lines obtained. The final one is in black. Notice how the lines get closer and closer to the best fit, as we go through more epochs.- A plot of the error function. Notice how it decreases as we go through more epochs.
|
train(X, y, epochs, learnrate, True)
|
========== Epoch 0 ==========
Train loss: 0.7135845195381633
Accuracy: 0.4
========== Epoch 10 ==========
Train loss: 0.6225835210454962
Accuracy: 0.59
========== Epoch 20 ==========
Train loss: 0.5548744083669508
Accuracy: 0.74
========== Epoch 30 ==========
Train loss: 0.501606141872473
Accuracy: 0.84
========== Epoch 40 ==========
Train loss: 0.4593334641861401
Accuracy: 0.86
========== Epoch 50 ==========
Train loss: 0.42525543433469976
Accuracy: 0.93
========== Epoch 60 ==========
Train loss: 0.39734615716713984
Accuracy: 0.93
========== Epoch 70 ==========
Train loss: 0.3741469765239074
Accuracy: 0.93
========== Epoch 80 ==========
Train loss: 0.3545997336816197
Accuracy: 0.94
========== Epoch 90 ==========
Train loss: 0.3379273658879921
Accuracy: 0.94
|
MIT
|
intro-neural-networks/gradient-descent/GradientDescent.ipynb
|
basilcea/deep-learning-v2-pytorch
|
Expected numbers on Table 3.
|
rows = []
datasets = {
'Binary': 2,
'AG news': 4,
'CIFAR10': 10,
'CIFAR100': 100,
'Wiki3029': 3029,
}
def expectations(C: int) -> float:
"""
C is the number of latent classes.
"""
e = 0.
for k in range(1, C + 1):
e += C / k
return e
for dataset_name, C in datasets.items():
e = expectations(C)
rows.append((dataset_name, C, np.ceil(e)))
# ImageNet is non-uniform label distribution on the training dataset
data = json.load(open("./imagenet_count.json"))
counts = np.array(list(data.values()))
total_num = np.sum(counts)
prob = counts / total_num
def integrand(t: float, prob: np.ndarray) -> float:
return 1. - np.prod(1 - np.exp(-prob * t))
rows.append(("ImageNet", len(prob), np.ceil(quad(integrand, 0, np.inf, args=(prob))[0])))
print(tabulate(rows, headers=["Dataset", "\# classes", "\mathbb{E}[K+1]"]))
|
Dataset \# classes \mathbb{E}[K+1]
--------- ------------ -----------------
Binary 2 3
AG news 4 9
CIFAR10 10 30
CIFAR100 100 519
Wiki3029 3029 26030
ImageNet 1000 7709
|
MIT
|
code/notebooks/coupon.ipynb
|
nzw0301/Understanding-Negative-Samples-in-Instance-Discriminative-Self-supervised-Representation-Learning
|
Probability $\upsilon$
|
def prob(C, N):
"""
C: the number of latent class
N: the number of samples to draw
"""
theoretical = []
for n in range(C, N + 1):
p = 0.
for m in range(C - 1):
p += comb(C - 1, m) * ((-1) ** m) * np.exp((n - 1) * np.log(1. - (m + 1) / C))
theoretical.append((n, max(p, 0.)))
return np.array(theoretical)
# example of CIFAR-10
C = 10
for N in [32, 63, 128, 256, 512]:
p = np.sum(prob(C, N).T[1])
print("{:3d} {:.7f}".format(N, p))
# example of CIFAR-100
C = 100
ps = []
ns = []
for N in 128 * np.arange(1, 9):
p = np.sum(prob(C, N).T[1])
print("{:4d} {}".format(N, p))
ps.append(p)
ns.append(N)
|
128 0.0004517171443332115
256 0.0005750103110269027
384 0.10845377001311465
512 0.5531327628081966
640 0.8510308810769567
768 0.956899070354311
896 0.9882414056661265
1024 0.9970649738141432
|
MIT
|
code/notebooks/coupon.ipynb
|
nzw0301/Understanding-Negative-Samples-in-Instance-Discriminative-Self-supervised-Representation-Learning
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.