markdown
stringlengths 0
1.02M
| code
stringlengths 0
832k
| output
stringlengths 0
1.02M
| license
stringlengths 3
36
| path
stringlengths 6
265
| repo_name
stringlengths 6
127
|
---|---|---|---|---|---|
Train cdcgan model
|
%%bash
gsutil -m rm -rf ${OUTPUT_DIR}
export PYTHONPATH=$PYTHONPATH:$PWD/cdcgan_module
python3 -m trainer.task \
--train_file_pattern=${TRAIN_FILE_PATTERN} \
--eval_file_pattern=${EVAL_FILE_PATTERN} \
--output_dir=${OUTPUT_DIR} \
--job-dir=./tmp \
\
--train_batch_size=${TRAIN_BATCH_SIZE} \
--train_steps=${TRAIN_STEPS} \
--save_summary_steps=${SAVE_SUMMARY_STEPS} \
--save_checkpoints_steps=${SAVE_CHECKPOINTS_STEPS} \
--keep_checkpoint_max=${KEEP_CHECKPOINT_MAX} \
--input_fn_autotune=${INPUT_FN_AUTOTUNE} \
\
--eval_batch_size=${EVAL_BATCH_SIZE} \
--eval_steps=${EVAL_STEPS} \
--start_delay_secs=${START_DELAY_SECS} \
--throttle_secs=${THROTTLE_SECS} \
\
--height=${HEIGHT} \
--width=${WIDTH} \
--depth=${DEPTH} \
\
--num_classes=${NUM_CLASSES} \
--label_embedding_dimension=${LABEL_EMBEDDING_DIMENSION} \
\
--latent_size=${LATENT_SIZE} \
--generator_projection_dims=${GENERATOR_PROJECTION_DIMS} \
--generator_use_labels=${GENERATOR_USE_LABELS} \
--generator_embed_labels=${GENERATOR_EMBED_LABELS} \
--generator_concatenate_labels=${GENERATOR_CONCATENATE_LABELS} \
--generator_num_filters=${GENERATOR_NUM_FILTERS} \
--generator_kernel_sizes=${GENERATOR_KERNEL_SIZES} \
--generator_strides=${GENERATOR_STRIDES} \
--generator_final_num_filters=${GENERATOR_FINAL_NUM_FILTERS} \
--generator_final_kernel_size=${GENERATOR_FINAL_KERNEL_SIZE} \
--generator_final_stride=${GENERATOR_FINAL_STRIDE} \
--generator_leaky_relu_alpha=${GENERATOR_LEAKY_RELU_ALPHA} \
--generator_final_activation=${GENERATOR_FINAL_ACTIVATION} \
--generator_l1_regularization_scale=${GENERATOR_L1_REGULARIZATION_SCALE} \
--generator_l2_regularization_scale=${GENERATOR_L2_REGULARIZATION_SCALE} \
--generator_optimizer=${GENERATOR_OPTIMIZER} \
--generator_learning_rate=${GENERATOR_LEARNING_RATE} \
--generator_adam_beta1=${GENERATOR_ADAM_BETA1} \
--generator_adam_beta2=${GENERATOR_ADAM_BETA2} \
--generator_adam_epsilon=${GENERATOR_ADAM_EPSILON} \
--generator_clip_gradients=${GENERATOR_CLIP_GRADIENTS} \
--generator_train_steps=${GENERATOR_TRAIN_STEPS} \
\
--discriminator_use_labels=${DISCRIMINATOR_USE_LABELS} \
--discriminator_embed_labels=${DISCRIMINATOR_EMBED_LABELS} \
--discriminator_concatenate_labels=${DISCRIMINATOR_CONCATENATE_LABELS} \
--discriminator_num_filters=${DISCRIMINATOR_NUM_FILTERS} \
--discriminator_kernel_sizes=${DISCRIMINATOR_KERNEL_SIZES} \
--discriminator_strides=${DISCRIMINATOR_STRIDES} \
--discriminator_dropout_rates=${DISCRIMINATOR_DROPOUT_RATES} \
--discriminator_leaky_relu_alpha=${DISCRIMINATOR_LEAKY_RELU_ALPHA} \
--discriminator_l1_regularization_scale=${DISCRIMINATOR_L1_REGULARIZATION_SCALE} \
--discriminator_l2_regularization_scale=${DISCRIMINATOR_L2_REGULARIZATION_SCALE} \
--discriminator_optimizer=${DISCRIMINATOR_OPTIMIZER} \
--discriminator_learning_rate=${DISCRIMINATOR_LEARNING_RATE} \
--discriminator_adam_beta1=${DISCRIMINATOR_ADAM_BETA1} \
--discriminator_adam_beta2=${DISCRIMINATOR_ADAM_BETA2} \
--discriminator_adam_epsilon=${DISCRIMINATOR_ADAM_EPSILON} \
--discriminator_clip_gradients=${DISCRIMINATOR_CLIP_GRADIENTS} \
--discriminator_train_steps=${DISCRIMINATOR_TRAIN_STEPS} \
--label_smoothing=${LABEL_SMOOTHING}
|
_____no_output_____
|
Apache-2.0
|
machine_learning/gan/cdcgan/tf_cdcgan/tf_cdcgan_run_module_local.ipynb
|
ryangillard/artificial_intelligence
|
Prediction
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
!gsutil ls gs://machine-learning-1234-bucket/gan/cdcgan/trained_model2/export/exporter
predict_fn = tf.contrib.predictor.from_saved_model(
"gs://machine-learning-1234-bucket/gan/cdcgan/trained_model2/export/exporter/1592859903"
)
predictions = predict_fn(
{
"Z": np.random.normal(size=(num_classes, 512)),
"label": np.arange(num_classes)
}
)
print(list(predictions.keys()))
|
['generated_images']
|
Apache-2.0
|
machine_learning/gan/cdcgan/tf_cdcgan/tf_cdcgan_run_module_local.ipynb
|
ryangillard/artificial_intelligence
|
Convert image back to the original scale.
|
generated_images = np.clip(
a=((predictions["generated_images"] + 1.0) * (255. / 2)).astype(np.int32),
a_min=0,
a_max=255
)
print(generated_images.shape)
def plot_images(images):
"""Plots images.
Args:
images: np.array, array of images of
[num_images, height, width, depth].
"""
num_images = len(images)
plt.figure(figsize=(20, 20))
for i in range(num_images):
image = images[i]
plt.subplot(1, num_images, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(
image,
cmap=plt.cm.binary
)
plt.show()
plot_images(generated_images)
|
_____no_output_____
|
Apache-2.0
|
machine_learning/gan/cdcgan/tf_cdcgan/tf_cdcgan_run_module_local.ipynb
|
ryangillard/artificial_intelligence
|
Check surface fluxes of CO$_2$
|
# check the data folder to swith to another mixing conditions
#ds = xr.open_dataset('data/results_so4_adv/5_po75-25_di10e-9/water.nc')
ds = xr.open_dataset('data/results_so4_adv/9_po75-25_di30e-9/water.nc')
#ds = xr.open_dataset('data/no_denitrification/water.nc')
dicflux_df = ds['B_C_DIC _flux'].to_dataframe()
oxyflux_df = ds['B_BIO_O2 _flux'].to_dataframe()
dicflux_surface = dicflux_df.groupby('z_faces').get_group(0)
oxyflux_surface = oxyflux_df.groupby('z_faces').get_group(0)
dicflux_surface_year = dicflux_surface.loc['2011-01-01':'2011-12-31']
oxyflux_surface_year = oxyflux_surface.loc['2011-01-01':'2011-12-31']
ox = np.arange(1,366,1)
plt.plot(ox, dicflux_surface_year); plt.gcf().set_size_inches(10, 2);
plt.title('Air-sea CO$_2$ flux, positive means upwards');
plt.xlabel('Day'); plt.ylabel('Flux, mmol m$^{-2}$ d$^{-1}$');
|
_____no_output_____
|
CC-BY-3.0
|
s_6_air-sea_and_advective_fluxes_WS.ipynb
|
limash/ws_notebook
|
Advective TA exchange These are data on how alkalinity in the Wadden Sea changes due to mixing with the North Sea. Positive means alkalinity comes from the North Sea, negative - goes to the North Sea.
|
nh4ta_df = ds['TA_due_to_NH4'].to_dataframe()
no3ta_df = ds['TA_due_to_NO3'].to_dataframe()
po4ta_df = ds['TA_due_to_PO4'].to_dataframe()
so4ta_df = ds['TA_due_to_SO4'].to_dataframe()
nh4ta_year = nh4ta_df.loc['2011-01-01':'2011-12-31']
no3ta_year = no3ta_df.loc['2011-01-01':'2011-12-31']
po4ta_year = po4ta_df.loc['2011-01-01':'2011-12-31']
so4ta_year = so4ta_df.loc['2011-01-01':'2011-12-31']
nh4ta = np.array(nh4ta_year.TA_due_to_NH4.values)
no3ta = np.array(no3ta_year.TA_due_to_NO3.values)
po4ta = np.array(po4ta_year.TA_due_to_PO4.values)
so4ta = np.array(so4ta_year.TA_due_to_SO4.values)
total = nh4ta+no3ta+po4ta+so4ta
plt.plot(ox, total); plt.gcf().set_size_inches(10, 2);
plt.title('WS - NS alkalinity flux, positive means to the WS');
plt.xlabel('Day'); plt.ylabel('Flux, mmol m$^{-2}$ d$^{-1}$');
year = (('2011-01-01','2011-01-31'), ('2011-02-01','2011-02-28'), ('2011-03-01','2011-03-31'), ('2011-04-01','2011-04-30'),
('2011-05-01','2011-05-31'), ('2011-06-01','2011-06-30'), ('2011-07-01','2011-07-31'), ('2011-08-01','2011-08-31'),
('2011-09-01','2011-09-30'), ('2011-10-01','2011-10-31'), ('2011-11-01','2011-11-30'), ('2011-12-01','2011-12-31'))
nh4ta_year = []
no3ta_year = []
po4ta_year = []
so4ta_year = []
for month in year:
nh4ta_month = nh4ta_df.loc[month[0]:month[1]]
no3ta_month = no3ta_df.loc[month[0]:month[1]]
po4ta_month = po4ta_df.loc[month[0]:month[1]]
so4ta_month = so4ta_df.loc[month[0]:month[1]]
nh4ta_year.append(nh4ta_month['TA_due_to_NH4'].sum())
no3ta_year.append(no3ta_month['TA_due_to_NO3'].sum())
po4ta_year.append(po4ta_month['TA_due_to_PO4'].sum())
so4ta_year.append(so4ta_month['TA_due_to_SO4'].sum())
nh4ta = np.array(nh4ta_year)
no3ta = np.array(no3ta_year)
po4ta = np.array(po4ta_year)
so4ta = np.array(so4ta_year)
total = nh4ta+no3ta+po4ta+so4ta
|
_____no_output_____
|
CC-BY-3.0
|
s_6_air-sea_and_advective_fluxes_WS.ipynb
|
limash/ws_notebook
|
here and further, units: mmol m$^{-2}$
|
nh4ta
sum(nh4ta)
no3ta
sum(no3ta)
po4ta
sum(po4ta)
so4ta
sum(so4ta)
total
sum(total)
|
_____no_output_____
|
CC-BY-3.0
|
s_6_air-sea_and_advective_fluxes_WS.ipynb
|
limash/ws_notebook
|
Scatter Plot with MinimapThis example shows how to create a miniature version of a plot such that creating a selection in the miniature version adjusts the axis limits in another, more detailed view.
|
import altair as alt
from vega_datasets import data
source = data.seattle_weather()
zoom = alt.selection_interval(encodings=["x", "y"])
minimap = (
alt.Chart(source)
.mark_point()
.add_selection(zoom)
.encode(
x="date:T",
y="temp_max:Q",
color=alt.condition(zoom, "weather", alt.value("lightgray")),
)
.properties(
width=200,
height=200,
title="Minimap -- click and drag to zoom in the detail view",
)
)
detail = (
alt.Chart(source)
.mark_point()
.encode(
x=alt.X(
"date:T", scale=alt.Scale(domain={"selection": zoom.name, "encoding": "x"})
),
y=alt.Y(
"temp_max:Q",
scale=alt.Scale(domain={"selection": zoom.name, "encoding": "y"}),
),
color="weather",
)
.properties(width=600, height=400, title="Seattle weather -- detail view")
)
detail | minimap
|
_____no_output_____
|
MIT
|
doc/gallery/scatter_with_minimap.ipynb
|
mattijn/altdoc
|
Using Ray for Highly Parallelizable TasksWhile Ray can be used for very complex parallelization tasks,often we just want to do something simple in parallel.For example, we may have 100,000 time series to process with exactly the same algorithm,and each one takes a minute of processing.Clearly running it on a single processor is prohibitive: this would take 70 days.Even if we managed to use 8 processors on a single machine,that would bring it down to 9 days. But if we can use 8 machines, each with 16 cores,it can be done in about 12 hours.How can we use Ray for these types of task? We take the simple example of computing the digits of pi.The algorithm is simple: generate random x and y, and if ``x^2 + y^2 < 1``, it'sinside the circle, we count as in. This actually turns out to be pi/4(remembering your high school math).The following code (and this notebook) assumes you have already set up your Ray cluster and that you are running on the head node. For more details on how to set up a Ray cluster please see the [Ray Cluster Quickstart Guide](https://docs.ray.io/en/master/cluster/quickstart.html).
|
import ray
import random
import time
import math
from fractions import Fraction
# Let's start Ray
ray.init(address='auto')
|
INFO:anyscale.snapshot_util:Synced git objects for /home/ray/workspace-project-waleed_test1 to /efs/workspaces/shared_objects in 0.07651424407958984s.
INFO:anyscale.snapshot_util:Created snapshot for /home/ray/workspace-project-waleed_test1 at /tmp/snapshot_2022-05-16T16:38:57.388956_otbjcv41.zip of size 1667695 in 0.014925718307495117s.
INFO:anyscale.snapshot_util:Content hashes b'f4fcea43e90a69d561bf323a07691536' vs b'f4fcea43e90a69d561bf323a07691536'
INFO:anyscale.snapshot_util:Content hash unchanged, not saving new snapshot.
INFO:ray.worker:Connecting to existing Ray cluster at address: 172.31.78.11:9031
2022-05-16 16:38:57,451 INFO packaging.py:269 -- Pushing file package 'gcs://_ray_pkg_bf4a08129b7b19b96a1701be1151f9a8.zip' (1.59MiB) to Ray cluster...
2022-05-16 16:38:57,470 INFO packaging.py:278 -- Successfully pushed file package 'gcs://_ray_pkg_bf4a08129b7b19b96a1701be1151f9a8.zip'.
|
Apache-2.0
|
doc/source/ray-core/examples/highly_parallel.ipynb
|
minds-ai/ray
|
We use the ``@ray.remote`` decorator to create a Ray task.A task is like a function, except the result is returned asynchronously.It also may not run on the local machine, it may run elsewhere in the cluster.This way you can run multiple tasks in parallel,beyond the limit of the number of processors you can have in a single machine.
|
@ray.remote
def pi4_sample(sample_count):
"""pi4_sample runs sample_count experiments, and returns the
fraction of time it was inside the circle.
"""
in_count = 0
for i in range(sample_count):
x = random.random()
y = random.random()
if x*x + y*y <= 1:
in_count += 1
return Fraction(in_count, sample_count)
|
_____no_output_____
|
Apache-2.0
|
doc/source/ray-core/examples/highly_parallel.ipynb
|
minds-ai/ray
|
To get the result of a future, we use ray.get() which blocks until the result is complete.
|
SAMPLE_COUNT = 1000 * 1000
start = time.time()
future = pi4_sample.remote(sample_count = SAMPLE_COUNT)
pi4 = ray.get(future)
end = time.time()
dur = end - start
print(f'Running {SAMPLE_COUNT} tests took {dur} seconds')
|
Running 1000000 tests took 1.4935967922210693 seconds
|
Apache-2.0
|
doc/source/ray-core/examples/highly_parallel.ipynb
|
minds-ai/ray
|
Now let's see how good our approximation is.
|
pi = pi4 * 4
float(pi)
abs(pi-math.pi)/pi
|
_____no_output_____
|
Apache-2.0
|
doc/source/ray-core/examples/highly_parallel.ipynb
|
minds-ai/ray
|
Meh. A little off -- that's barely 4 decimal places.Why don't we do it a 100,000 times as much? Let's do 100 billion!
|
FULL_SAMPLE_COUNT = 100 * 1000 * 1000 * 1000 # 100 billion samples!
BATCHES = int(FULL_SAMPLE_COUNT / SAMPLE_COUNT)
print(f'Doing {BATCHES} batches')
results = []
for _ in range(BATCHES):
results.append(pi4_sample.remote())
output = ray.get(results)
|
Doing 100000 batches
|
Apache-2.0
|
doc/source/ray-core/examples/highly_parallel.ipynb
|
minds-ai/ray
|
Notice that in the above, we generated a list with 100,000 futures.Now all we do is have to do is wait for the result.Depending on your ray cluster's size, this might take a few minutes.But to give you some idea, if we were to do it on a single machine,when I ran this it took 0.4 seconds.On a single core, that means we're looking at 0.4 * 100000 = about 11 hours. Here's what the Dashboard looks like: So now, rather than just a single core working on this,I have 168 working on the task together. And its ~80% efficient.
|
pi = sum(output)*4/len(output)
float(pi)
abs(pi-math.pi)/pi
|
_____no_output_____
|
Apache-2.0
|
doc/source/ray-core/examples/highly_parallel.ipynb
|
minds-ai/ray
|
Lambda School Data Science*Unit 2, Sprint 1, Module 3*--- Ridge Regression AssignmentWe're going back to our other **New York City** real estate dataset. Instead of predicting apartment rents, you'll predict property sales prices.But not just for condos in Tribeca...- [ ] Use a subset of the data where `BUILDING_CLASS_CATEGORY` == `'01 ONE FAMILY DWELLINGS'` and the sale price was more than 100 thousand and less than 2 million.- [ ] Do train/test split. Use data from January βΒ March 2019 to train. Use data from April 2019 to test.- [ ] Do one-hot encoding of categorical features.- [ ] Do feature selection with `SelectKBest`.- [ ] Fit a ridge regression model with multiple features. Use the `normalize=True` parameter (or do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html) beforehand βΒ use the scaler's `fit_transform` method with the train set, and the scaler's `transform` method with the test set)- [ ] Get mean absolute error for the test set.- [ ] As always, commit your notebook to your fork of the GitHub repo.The [NYC Department of Finance](https://www1.nyc.gov/site/finance/taxes/property-rolling-sales-data.page) has a glossary of property sales terms and NYC Building Class Code Descriptions. The data comes from the [NYC OpenData](https://data.cityofnewyork.us/browse?q=NYC%20calendar%20sales) portal. Stretch GoalsDon't worry, you aren't expected to do all these stretch goals! These are just ideas to consider and choose from.- [ ] Add your own stretch goal(s) !- [ ] Instead of `Ridge`, try `LinearRegression`. Depending on how many features you select, your errors will probably blow up! π₯- [ ] Instead of `Ridge`, try [`RidgeCV`](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html).- [ ] Learn more about feature selection: - ["Permutation importance"](https://www.kaggle.com/dansbecker/permutation-importance) - [scikit-learn's User Guide for Feature Selection](https://scikit-learn.org/stable/modules/feature_selection.html) - [mlxtend](http://rasbt.github.io/mlxtend/) library - scikit-learn-contrib libraries: [boruta_py](https://github.com/scikit-learn-contrib/boruta_py) & [stability-selection](https://github.com/scikit-learn-contrib/stability-selection) - [_Feature Engineering and Selection_](http://www.feat.engineering/) by Kuhn & Johnson.- [ ] Try [statsmodels](https://www.statsmodels.org/stable/index.html) if youβre interested in more inferential statistical approach to linear regression and feature selection, looking at p values and 95% confidence intervals for the coefficients.- [ ] Read [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way.- [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
|
import numpy as np
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
import pandas as pd
import pandas_profiling
# Read New York City property sales data
df = pd.read_csv(DATA_PATH+'condos/NYC_Citywide_Rolling_Calendar_Sales.csv',
parse_dates=['SALE DATE'],
index_col=('SALE DATE'))
# Changing space to underscore in index name
df.index.name = 'SALE_DATE'
# Change column names: replace spaces with underscores
df.columns = [col.replace(' ', '_') for col in df]
# SALE_PRICE was read as strings.
# Remove symbols, convert to integer
df['SALE_PRICE'] = (
df['SALE_PRICE']
.str.replace('$','')
.str.replace('-','')
.str.replace(',','')
.astype(int)
)
# BOROUGH is a numeric column, but arguably should be a categorical feature,
# so convert it from a number to a string
df['BOROUGH'] = df['BOROUGH'].astype(str)
# Reduce cardinality for NEIGHBORHOOD feature
# Get a list of the top 10 neighborhoods
top10 = df['NEIGHBORHOOD'].value_counts()[:10].index
# At locations where the neighborhood is NOT in the top 10,
# replace the neighborhood with 'OTHER'
df.loc[~df['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
print(df.shape)
df.head()
# Getting rid of commas from land square ft and converting all values to floats
df['LAND_SQUARE_FEET'] = df['LAND_SQUARE_FEET'].str.replace(',','')
df['LAND_SQUARE_FEET'] = df['LAND_SQUARE_FEET'].replace({'': np.NaN, '########': np.NaN})
df['LAND_SQUARE_FEET'] = df['LAND_SQUARE_FEET'].astype(float)
df['LAND_SQUARE_FEET'].value_counts()
df.info()
def wrangle(df):
# Making a copy of the dataset
df = df.copy()
# Making a subset of the data where BUILDING_CLASS_CATEGORY == '01 ONE FAMILY
# DWELLINGS' and the sale price was more than 100 thousand and less than 2 million
df = df[(df['BUILDING_CLASS_CATEGORY'] == '01 ONE FAMILY DWELLINGS') &
(df['SALE_PRICE'] > 100000) &
(df['SALE_PRICE'] < 2000000)]
# Dropping high-cardinality categorical columns
hc_cols = [col for col in df.select_dtypes('object').columns
if df[col].nunique() > 11]
df.drop(columns=hc_cols, inplace=True)
return df
df = wrangle(df)
df['TAX_CLASS_AT_TIME_OF_SALE'].value_counts()
df.info()
# Dropping NaN columns, building class column since now they are all the same,
# and tax class at time of sale column since they are also all identical
df = df.drop(['BUILDING_CLASS_CATEGORY', 'EASE-MENT', 'APARTMENT_NUMBER', 'TAX_CLASS_AT_TIME_OF_SALE'], axis=1)
print(df.shape)
df.head()
df.info()
# Splitting Data
# splitting into target and feature matrix
target = 'SALE_PRICE'
y = df[target]
X = df.drop(columns=target)
# splitting into training and test sets:
# Using data from January β March 2019 to train. Using data from April 2019 to test
cutoff = '2019-04-01'
mask = X.index < cutoff
X_train, y_train = X.loc[mask], y.loc[mask]
X_test, y_test = X.loc[~mask], y.loc[~mask]
# Establishing Baseline
y_pred = [y_train.mean()] * len(y_train)
from sklearn.metrics import mean_absolute_error
print('Baseline MAE:', mean_absolute_error(y_train, y_pred))
# Applying transformer: OneHotEncoder
# Step 1: Importing the transformer class
from category_encoders import OneHotEncoder, OrdinalEncoder
# Step 2: Instantiating the transformer
ohe = OneHotEncoder(use_cat_names=True)
# Step 3: Fitting my TRAINING data to the transfomer
ohe.fit(X_train)
# Step 4: Transforming
XT_train = ohe.transform(X_train)
print(len(XT_train.columns))
XT_train.columns
print(XT_train.shape)
XT_train.head()
# Performing feature selection with SelectKBest
# Importing the feature selector utility:
from sklearn.feature_selection import SelectKBest, f_regression
# Creating the selector object with the best k=1 features:
selector = SelectKBest(score_func=f_regression, k=1)
# Running the selector on the training data:
XT_train_selected = selector.fit_transform(XT_train, y_train)
# Finding the features that were selected:
selected_mask = selector.get_support()
all_features = XT_train.columns
selected_feature = all_features[selected_mask]
print('The selected feature: ', selected_feature[0])
# Scaling the ohe data with StandardScaler:
from sklearn.preprocessing import StandardScaler
ss = StandardScaler()
ss.fit(XT_train)
XTT_train = ss.transform(XT_train)
# Building Ridge Regression Model:
from sklearn.linear_model import Ridge
model = Ridge(alpha=150)
model.fit(XTT_train, y_train)
# Checking metrics:
XT_test = ohe.transform(X_test)
XTT_test = ss.transform(XT_test)
print('RIDGE train MAE', mean_absolute_error(y_train, model.predict(XTT_train)))
print('RIDGE test MAE', mean_absolute_error(y_test, model.predict(XTT_test)))
|
RIDGE train MAE 151103.0875222934
RIDGE test MAE 155194.34287168915
|
MIT
|
module3-ridge-regression/LS_DS_213_assignment.ipynb
|
Collin-Campbell/DS-Unit-2-Linear-Models
|
Zircon model training notebook; (extensively) modified from Detectron2 training tutorialThis Colab Notebook will allow users to train new models to detect and segment detrital zircon from RL images using Detectron2 and the training dataset provided in the colab_zirc_dims repo. It is set up to train a Mask RCNN model (ResNet depth=101), but could be modified for other instance segmentation models provided that they are supported by Detectron2.The training dataset should be uploaded to the user's Google Drive before running this notebook. Install detectron2
|
!pip install pyyaml==5.1
import torch
TORCH_VERSION = ".".join(torch.__version__.split(".")[:2])
CUDA_VERSION = torch.__version__.split("+")[-1]
print("torch: ", TORCH_VERSION, "; cuda: ", CUDA_VERSION)
# Install detectron2 that matches the above pytorch version
# See https://detectron2.readthedocs.io/tutorials/install.html for instructions
!pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/$CUDA_VERSION/torch$TORCH_VERSION/index.html
exit(0) # Automatically restarts runtime after installation
# Some basic setup:
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import os, json, cv2, random
from google.colab.patches import cv2_imshow
import copy
import time
import datetime
import logging
import random
import shutil
import torch
# import some common detectron2 utilities
from detectron2.engine.hooks import HookBase
from detectron2 import model_zoo
from detectron2.evaluation import inference_context, COCOEvaluator
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.logger import log_every_n_seconds
from detectron2.data import MetadataCatalog, DatasetCatalog, build_detection_train_loader, DatasetMapper, build_detection_test_loader
import detectron2.utils.comm as comm
from detectron2.data import detection_utils as utils
from detectron2.config import LazyConfig
import detectron2.data.transforms as T
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Define Augmentations The cell below defines augmentations used while training to ensure that models never see the same exact image twice during training. This mitigates overfitting and allows models to achieve substantially higher accuracy in their segmentations/measurements.
|
custom_transform_list = [T.ResizeShortestEdge([800,800]), #resize shortest edge of image to 800 pixels
T.RandomCrop('relative', (0.95, 0.95)), #randomly crop an area (95% size of original) from image
T.RandomLighting(100), #minor lighting randomization
T.RandomContrast(.85, 1.15), #minor contrast randomization
T.RandomFlip(prob=.5, horizontal=False, vertical=True), #random vertical flipping
T.RandomFlip(prob=.5, horizontal=True, vertical=False), #and horizontal flipping
T.RandomApply(T.RandomRotation([-30, 30], False), prob=.8), #random (80% probability) rotation up to 30 degrees; \
# more rotation does not seem to improve results
T.ResizeShortestEdge([800,800])] # resize img again for uniformity
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Mount Google Drive, set paths to dataset, model saving directories
|
from google.colab import drive
drive.mount('/content/drive')
#@markdown ### Add path to training dataset directory
dataset_dir = '/content/drive/MyDrive/training_dataset' #@param {type:"string"}
#@markdown ### Add path to model saving directory (automatically created if it does not yet exist)
model_save_dir = '/content/drive/MyDrive/NAME FOR MODEL SAVING FOLDER HERE' #@param {type:"string"}
os.makedirs(model_save_dir, exist_ok=True)
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Define dataset mapper, training, loss eval functions
|
from detectron2.engine import DefaultTrainer
from detectron2.data import DatasetMapper
from detectron2.structures import BoxMode
# a function to convert Via image annotation .json dict format to Detectron2 \
# training input dict format
def get_zircon_dicts(img_dir):
json_file = os.path.join(img_dir, "via_region_data.json")
with open(json_file) as f:
imgs_anns = json.load(f)['_via_img_metadata']
dataset_dicts = []
for idx, v in enumerate(imgs_anns.values()):
record = {}
filename = os.path.join(img_dir, v["filename"])
height, width = cv2.imread(filename).shape[:2]
record["file_name"] = filename
record["image_id"] = idx
record["height"] = height
record["width"] = width
#annos = v["regions"]
annos = {}
for n, eachitem in enumerate(v['regions']):
annos[str(n)] = eachitem
objs = []
for _, anno in annos.items():
#assert not anno["region_attributes"]
anno = anno["shape_attributes"]
px = anno["all_points_x"]
py = anno["all_points_y"]
poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = [p for x in poly for p in x]
obj = {
"bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": [poly],
"category_id": 0,
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
return dataset_dicts
# loss eval hook for getting vaidation loss, copying to metrics.json; \
# from https://gist.github.com/ortegatron/c0dad15e49c2b74de8bb09a5615d9f6b
class LossEvalHook(HookBase):
def __init__(self, eval_period, model, data_loader):
self._model = model
self._period = eval_period
self._data_loader = data_loader
def _do_loss_eval(self):
# Copying inference_on_dataset from evaluator.py
total = len(self._data_loader)
num_warmup = min(5, total - 1)
start_time = time.perf_counter()
total_compute_time = 0
losses = []
for idx, inputs in enumerate(self._data_loader):
if idx == num_warmup:
start_time = time.perf_counter()
total_compute_time = 0
start_compute_time = time.perf_counter()
if torch.cuda.is_available():
torch.cuda.synchronize()
total_compute_time += time.perf_counter() - start_compute_time
iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
seconds_per_img = total_compute_time / iters_after_start
if idx >= num_warmup * 2 or seconds_per_img > 5:
total_seconds_per_img = (time.perf_counter() - start_time) / iters_after_start
eta = datetime.timedelta(seconds=int(total_seconds_per_img * (total - idx - 1)))
log_every_n_seconds(
logging.INFO,
"Loss on Validation done {}/{}. {:.4f} s / img. ETA={}".format(
idx + 1, total, seconds_per_img, str(eta)
),
n=5,
)
loss_batch = self._get_loss(inputs)
losses.append(loss_batch)
mean_loss = np.mean(losses)
self.trainer.storage.put_scalar('validation_loss', mean_loss)
comm.synchronize()
return losses
def _get_loss(self, data):
# How loss is calculated on train_loop
metrics_dict = self._model(data)
metrics_dict = {
k: v.detach().cpu().item() if isinstance(v, torch.Tensor) else float(v)
for k, v in metrics_dict.items()
}
total_losses_reduced = sum(loss for loss in metrics_dict.values())
return total_losses_reduced
def after_step(self):
next_iter = self.trainer.iter + 1
is_final = next_iter == self.trainer.max_iter
if is_final or (self._period > 0 and next_iter % self._period == 0):
self._do_loss_eval()
#trainer for zircons which incorporates augmentation, hooks for eval
class ZirconTrainer(DefaultTrainer):
@classmethod
def build_train_loader(cls, cfg):
#return a custom train loader with augmentations; recompute_boxes \
# is important given cropping, rotation augs
return build_detection_train_loader(cfg, mapper=
DatasetMapper(cfg, is_train=True, recompute_boxes = True,
augmentations = custom_transform_list
),
)
@classmethod
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return COCOEvaluator(dataset_name, cfg, True, output_folder)
#set up validation loss eval hook
def build_hooks(self):
hooks = super().build_hooks()
hooks.insert(-1,LossEvalHook(
cfg.TEST.EVAL_PERIOD,
self.model,
build_detection_test_loader(
self.cfg,
self.cfg.DATASETS.TEST[0],
DatasetMapper(self.cfg,True)
)
))
return hooks
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Import train, val catalogs
|
#registers training, val datasets (converts annotations using get_zircon_dicts)
for d in ["train", "val"]:
DatasetCatalog.register("zircon_" + d, lambda d=d: get_zircon_dicts(dataset_dir + "/" + d))
MetadataCatalog.get("zircon_" + d).set(thing_classes=["zircon"])
zircon_metadata = MetadataCatalog.get("zircon_train")
train_cat = DatasetCatalog.get("zircon_train")
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Visualize train dataset
|
# visualize random sample from training dataset
dataset_dicts = get_zircon_dicts(os.path.join(dataset_dir, 'train'))
for d in random.sample(dataset_dicts, 4): #change int here to change sample size
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1], metadata=zircon_metadata, scale=0.5)
out = visualizer.draw_dataset_dict(d)
cv2_imshow(out.get_image()[:, :, ::-1])
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Define save to Drive function
|
# a function to save models (with iteration number in name), metrics to drive; \
# important in case training crashes or is left unattended and disconnects. \
def save_outputs_to_drive(model_name, iters):
root_output_dir = os.path.join(model_save_dir, model_name) #output_dir = save dir from user input
#creates individual model output directory if it does not already exist
os.makedirs(root_output_dir, exist_ok=True)
#creates a name for this version of model; include iteration number
curr_iters_str = str(round(iters/1000, 1)) + 'k'
curr_model_name = model_name + '_' + curr_iters_str + '.pth'
model_save_pth = os.path.join(root_output_dir, curr_model_name)
#get most recent model, current metrics, copy to drive
model_path = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
metrics_path = os.path.join(cfg.OUTPUT_DIR, 'metrics.json')
shutil.copy(model_path, model_save_pth)
shutil.copy(metrics_path, root_output_dir)
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Build, train model Set some parameters for training
|
#@markdown ### Add a base name for the model
model_save_name = 'your model name here' #@param {type:"string"}
#@markdown ### Final iteration before training stops
final_iteration = 8000 #@param {type:"slider", min:3000, max:15000, step:1000}
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Actually build and train model
|
#train from a pre-trained Mask RCNN model
cfg = get_cfg()
# train from base model: Default Mask RCNN
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml"))
# Load starting weights (COCO trained) from Detectron2 model zoo.
cfg.MODEL.WEIGHTS = "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/model_final_a3ec72.pkl"
cfg.DATASETS.TRAIN = ("zircon_train",) #load training dataset
cfg.DATASETS.TEST = ("zircon_val",) # load validation dataset
cfg.DATALOADER.NUM_WORKERS = 2
cfg.SOLVER.IMS_PER_BATCH = 2 #2 ims per batch seems to be good for model generalization
cfg.SOLVER.BASE_LR = 0.00025 # low but reasonable learning rate given pre-training; \
# by default initializes with a 1000 iteration warmup
cfg.SOLVER.MAX_ITER = 2000 #train for 2000 iterations before 1st save
cfg.SOLVER.GAMMA = 0.5
#decay learning rate by factor of GAMMA every 1000 iterations after 2000 iterations \
# and until 10000 iterations This works well for current version of training \
# dataset but should be modified (probably a longer interval) if dataset is ever\
# extended.
cfg.SOLVER.STEPS = (1999, 2999, 3999, 4999, 5999, 6999, 7999, 8999, 9999)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # use default ROI heads batch size
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only class here is zircon
cfg.MODEL.RPN.NMS_THRESH = 0.1 #sets NMS threshold lower than default; should(?) eliminate overlapping regions
cfg.TEST.EVAL_PERIOD = 200 # validation eval every 200 iterations
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = ZirconTrainer(cfg) #our zircon trainer, w/ built-in augs and val loss eval
trainer.resume_or_load(resume=False)
trainer.train() #start training
# stop training and save for the 1st time after 2000 iterations
save_outputs_to_drive(model_save_name, 2000)
# Saves, cold restarts training from saved model weights every 1000 iterations \
# until final iteration. This should probably be done via hooks without stopping \
# training but *seems* to produce faster decrease in validation loss.
for each_iters in [iter*1000 for iter in list(range(3,
int(final_iteration/1000) + 1,
1))]:
#reload model with last iteration model weights
resume_model_path = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
cfg.MODEL.WEIGHTS = resume_model_path
cfg.SOLVER.MAX_ITER = each_iters #increase max iterations
trainer = ZirconTrainer(cfg)
trainer.resume_or_load(resume=True)
trainer.train() #restart training
#save again
save_outputs_to_drive(model_save_name, each_iters)
# open tensorboard training metrics curves (metrics.json):
%load_ext tensorboard
%tensorboard --logdir output
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Inference & evaluation with final trained model Initialize model from saved weights:
|
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # final model; modify path to other non-final model to view their segmentations
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set a custom testing threshold
cfg.MODEL.RPN.NMS_THRESH = 0.1
predictor = DefaultPredictor(cfg)
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
View model segmentations for random sample of images from zircon validation dataset:
|
from detectron2.utils.visualizer import ColorMode
dataset_dicts = get_zircon_dicts(os.path.join(dataset_dir, 'val'))
for d in random.sample(dataset_dicts, 5):
im = cv2.imread(d["file_name"])
outputs = predictor(im) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
v = Visualizer(im[:, :, ::-1],
metadata=zircon_metadata,
scale=1.5,
instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
)
out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
cv2_imshow(out.get_image()[:, :, ::-1])
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Validation eval with COCO API metric:
|
from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
evaluator = COCOEvaluator("zircon_val", ("bbox", "segm"), False, output_dir="./output/")
val_loader = build_detection_test_loader(cfg, "zircon_val")
print(inference_on_dataset(trainer.model, val_loader, evaluator))
|
_____no_output_____
|
Apache-2.0
|
training dataset/ResNet_colab_zirc_dims_train_model.ipynb
|
MCSitar/colab-zirc-dims
|
Analysis
|
# Prepare data
demographic = pd.read_csv('../data/processed/demographic.csv')
severity = pd.read_csv('../data/processed/severity.csv', index_col=0)
features = demographic.columns
X = demographic.astype(np.float64)
y = (severity >= 4).sum(axis=1)
needs_to_label = {0:'no needs', 1:'low_needs', 2:'moderate needs', 3:'high needs', 4:'very high needs'}
labels = ["no needs", "low needs", "moderate needs", "high needs", "very high needs"]
severity_to_needs = {0:0, 1:1, 2:1, 3:2, 4:2, 5:3, 6:3, 7:4, 8:4}
y = np.array([severity_to_needs[i] for i in y])
# Color vector, for illustration purposes
colors = {0:'b', 1:'r', 2:'g', 3:'c', 4:'y'}
y_c = np.array([colors[i] for i in y])
|
_____no_output_____
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
Understanding the features
|
from yellowbrick.features import Rank2D
from yellowbrick.features.manifold import Manifold
from yellowbrick.features.pca import PCADecomposition
from yellowbrick.style import set_palette
set_palette('flatui')
|
_____no_output_____
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
Feature covariance plot
|
visualizer = Rank2D(algorithm='covariance')
visualizer.fit(X, y)
visualizer.transform(X)
visualizer.poof()
|
/home/muhadriy/.conda/envs/ml/lib/python3.6/site-packages/yellowbrick/features/rankd.py:262: FutureWarning: Method .as_matrix will be removed in a future version. Use .values instead.
X = X.as_matrix()
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
Principal Component Projection
|
visualizer = PCADecomposition(scale=True, color = y_c, proj_dim=3)
visualizer.fit_transform(X, y)
visualizer.poof()
|
_____no_output_____
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
Manifold projections
|
visualizer = Manifold(manifold='tsne', target='discrete')
visualizer.fit_transform(X, y)
visualizer.poof()
visualizer = Manifold(manifold='modified', target='discrete')
visualizer.fit_transform(X, y)
visualizer.poof()
|
_____no_output_____
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
No apparent structure from the PCA and Manifold projections. Class Balance
|
categories, counts = np.unique(y, return_counts=True)
fig, ax = plt.subplots(figsize=(9, 7))
sb.set(style="whitegrid")
sb.barplot(labels, counts, ax=ax, tick_label=labels)
ax.set(xlabel='Need Categories',
ylabel='Number of HHs');
|
_____no_output_____
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
Heavy class imbalances. Use appropriate scoring metrics/measures. Learning and Validation
|
from sklearn.model_selection import StratifiedKFold
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import RidgeClassifier
from yellowbrick.model_selection import LearningCurve
cv = StratifiedKFold(10)
sizes = np.linspace(0.1, 1., 20)
visualizer = LearningCurve(RidgeClassifier(), cv=cv, train_sizes=sizes,
scoring='balanced_accuracy', n_jobs=-1)
visualizer.fit(X,y)
visualizer.poof()
visualizer = LearningCurve(GaussianNB(), cv=cv, train_sizes=sizes,
scoring='balanced_accuracy', n_jobs=-1)
visualizer.fit(X,y)
visualizer.poof()
|
_____no_output_____
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
Classification
|
from sklearn.linear_model import RidgeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.utils.class_weight import compute_class_weight
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import balanced_accuracy_score
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42, stratify=y)
cv_ = StratifiedKFold(5)
class_weights = compute_class_weight(class_weight='balanced', classes= np.unique(y), y=y)
clf = RidgeClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))
print('Classification report: ')
print(classification_report_imbalanced(y_test, y_pred, target_names=labels))
plot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)
clf = KNeighborsClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))
print('Classification report: ')
print(classification_report_imbalanced(y_test, y_pred, target_names=labels))
plot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)
clf = GaussianNB()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))
print('Classification report: ')
print(classification_report_imbalanced(y_test, y_pred, target_names=labels))
plot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)
clf = ExtraTreesClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))
print('Classification report: ')
print(classification_report_imbalanced(y_test, y_pred, target_names=labels))
plot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)
clf = GradientBoostingClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))
print('Classification report: ')
print(classification_report_imbalanced(y_test, y_pred, target_names=labels))
plot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)
|
Balanced accuracy: 0.25
Classification report:
pre rec spe f1 geo iba sup
no needs 0.20 0.02 1.00 0.03 0.13 0.01 63
low needs 0.52 0.18 0.95 0.27 0.42 0.16 594
moderate needs 0.51 0.84 0.24 0.64 0.45 0.21 1258
high needs 0.47 0.22 0.92 0.30 0.45 0.19 655
very high needs 0.00 0.00 1.00 0.00 0.00 0.00 25
avg / total 0.49 0.51 0.60 0.45 0.43 0.19 2595
Normalized confusion matrix
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
Voting Classifier Hard Voting
|
clf1 = KNeighborsClassifier(weights='distance')
clf2 = GaussianNB()
clf3 = ExtraTreesClassifier(class_weight='balanced_subsample')
clf4 = GradientBoostingClassifier()
vote = VotingClassifier(estimators=[('knn', clf1), ('gnb', clf2), ('ext', clf3), ('gb', clf4)], voting='hard')
params = {'knn__n_neighbors': [2,3,4], 'gb__n_estimators':[50,100,200],
'gb__max_depth':[3,5,7], 'ext__n_estimators': [50,100,200]}
scoring_fns = ['f1_weighted', 'balanced_accuracy']
grid = GridSearchCV(estimator=vote, param_grid=params, cv=cv_,
verbose=2, n_jobs=-1, scoring=scoring_fns, refit='balanced_accuracy')
grid.fit(X_train, y_train)
y_pred = grid.predict(X_test)
print('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))
print('Classification report: ')
print(classification_report_imbalanced(y_test, y_pred, target_names=labels))
plot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)
clf1 = KNeighborsClassifier(weights='distance')
clf2 = GaussianNB()
clf3 = ExtraTreesClassifier(class_weight='balanced_subsample')
clf4 = GradientBoostingClassifier()
vote = VotingClassifier(estimators=[('knn', clf1), ('gnb', clf2), ('ext', clf3), ('gb', clf4)], voting='soft')
params = {'knn__n_neighbors': [2,3,4], 'gb__n_estimators':[50,100,200],
'gb__max_depth':[3,5,7], 'ext__n_estimators': [50,100,200]}
scoring_fns = ['f1_weighted', 'balanced_accuracy']
grid_soft = GridSearchCV(estimator=vote, param_grid=params, cv=cv_,
verbose=2, n_jobs=-1, scoring=scoring_fns, refit='balanced_accuracy')
grid_soft.fit(X_train, y_train)
y_pred = grid_soft.predict(X_test)
print('Balanced accuracy: {:.2f}'.format(balanced_accuracy_score(y_test,y_pred)))
print('Classification report: ')
print(classification_report_imbalanced(y_test, y_pred, target_names=labels))
plot_confusion_matrix(y_test, y_pred, classes=np.unique(y), normalize=True)
|
Fitting 5 folds for each of 81 candidates, totalling 405 fits
|
RSA-MD
|
notebooks/.ipynb_checkpoints/Analysis-checkpoint.ipynb
|
mmData/Hack4Good
|
Import packages
|
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
# general packages
import numpy as np
import matplotlib.pyplot as plt
import os
import seaborn as sns
# sklearn models
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# mne
import mne
import pickle
from mne.datasets import sample
from mne.decoding import (SlidingEstimator, GeneralizingEstimator,
cross_val_multiscore, LinearModel, get_coef)
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
sklearn models
|
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.metrics import confusion_matrix
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Load preprocessed data
|
with open(os.path.join('data','Xdict.pickle'),'rb') as handle1:
Xdict = pickle.load(handle1)
with open(os.path.join('data','ydict.pickle'),'rb') as handle2:
ydict = pickle.load(handle2)
subjects = list(set(Xdict.keys()))
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
FEATURE ENGINEERING Need to first make a master dataframe for the 5,6 numbers with corresponding result for all subjects compiled
|
s01 = ydict[1]
df1 = pd.DataFrame(s01, columns=['Result'])
df1['Subject'] = 1
df1['Time Series'] = [series[:-52] for series in Xdict[1].tolist()]
df1['Psd'] = [series[950:] for series in Xdict[1].tolist()]
df1
s02 = ydict[2]
df2 = pd.DataFrame(s02, columns=['Result'])
df2['Subject'] = 2
df2['Time Series'] = [series[:-52] for series in Xdict[2].tolist()]
df2['Psd'] = [series[950:] for series in Xdict[2].tolist()]
df2
s03 = ydict[3]
df3 = pd.DataFrame(s03, columns=['Result'])
df3['Subject'] = 3
df3['Time Series'] = [series[:-52] for series in Xdict[3].tolist()]
df3['Psd'] = [series[950:] for series in Xdict[3].tolist()]
df3
s04 = ydict[4]
df4 = pd.DataFrame(s04, columns=['Result'])
df4['Subject'] = 4
df4['Time Series'] = [series[:-52] for series in Xdict[4].tolist()]
df4['Psd'] = [series[950:] for series in Xdict[4].tolist()]
df4
s05 = ydict[5]
df5 = pd.DataFrame(s05, columns=['Result'])
df5['Subject'] = 5
df5['Time Series'] = [series[:-52] for series in Xdict[5].tolist()]
df5['Psd'] = [series[950:] for series in Xdict[5].tolist()]
df5
s06 = ydict[6]
df6 = pd.DataFrame(s06, columns=['Result'])
df6['Subject'] = 6
df6['Time Series'] = [series[:-52] for series in Xdict[6].tolist()]
df6['Psd'] = [series[950:] for series in Xdict[6].tolist()]
df6
s07 = ydict[7]
df7 = pd.DataFrame(s07, columns=['Result'])
df7['Subject'] = 7
df7['Time Series'] = [series[:-52] for series in Xdict[7].tolist()]
df7['Psd'] = [series[950:] for series in Xdict[7].tolist()]
df7
s08 = ydict[8]
df8 = pd.DataFrame(s08, columns=['Result'])
df8['Subject'] = 8
df8['Time Series'] = [series[:-52] for series in Xdict[8].tolist()]
df8['Psd'] = [series[950:] for series in Xdict[8].tolist()]
df8
s09 = ydict[9]
df9 = pd.DataFrame(s09, columns=['Result'])
df9['Subject'] = 9
df9['Time Series'] = [series[:-52] for series in Xdict[9].tolist()]
df9['Psd'] = [series[950:] for series in Xdict[9].tolist()]
df9
s10 = ydict[10]
df10 = pd.DataFrame(s10, columns=['Result'])
df10['Subject'] = 10
df10['Time Series'] = [series[:-52] for series in Xdict[10].tolist()]
df10['Psd'] = [series[950:] for series in Xdict[10].tolist()]
frames = [df1, df2, df3, df4, df5, df6, df7, df8, df9, df10]
resultframe = pd.concat(frames)
resultframe = resultframe.reset_index().drop('index', axis=1)
resultframe
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Splitting the psd into 52 different columns so each value can be used as a feature:
|
resultframe[['psd'+str(i) for i in range(1,53)]] = pd.DataFrame(resultframe.Psd.values.tolist(), index= resultframe.index)
resultframe = resultframe.drop('Psd', axis=1)
resultframe.head()
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Assuming the merged table is formed correctly, we now have our outcomes ('Results') and their corresponding first 950 time points series data, and subject information. We no longer have information regarding which electrode collected the data (irrelevant since no biological correspondence), however, if needed, we can still filter by subject as we retain that data. NOTE: This table is only for the 5,6 first number trials as it is in that scenario the patient has the ability to "Gamble". NOTE: One of the disadvantages of compiling all patient data and not separating by subject is that we are ignoring behavioral characteristics (risk aversion and risk loving) and rather finding common trends in the time series data regardless of personal characteristics. NEED TO CHECK: Are all electrode data included for each patient? Is the corresponding Result matched with respective time series? Currently, I will proceed relying on the dictionary Kata made and will assume the order and correspondence is proper. Dataset Characteristics/Confirming master dataframe created above:
|
countframe = resultframe.groupby("Subject").count().drop('Time Series', axis=1).drop(['psd'+str(i) for i in range(1,53)], axis=1)
countframe
plt.bar(countframe.index, countframe['Result'])
plt.xlabel('Subject')
plt.ylabel('Count')
plt.title('Number of Entries per subject')
plt.show();
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Note: Number of Entries = Number of trials with first number as 5,6 * Number of electrodes for the subjectIn preprocessing notebook, we determined the number of electrodes per subject to be as followed:
|
subject = [1,2,3,4,5,6,7,8,9,10]
electrodes = [5,6,59,5,61,7,11,10,19,16]
elecframe = pd.DataFrame(data={'Subject': subject, 'Num Electrode' : electrodes})
elecframe
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
In preprocessing notebook, we also determined the number of trials with 5 and 6 (in cleaned table, excluding all types of bad trials):
|
subject = [1,2,3,4,5,6,7,8,9,10]
num5 = [23, 24, 24, 12, 21, 22, 21, 24, 24, 16]
num6 = [20, 23, 24, 18, 21, 24, 22, 24, 24, 18]
trialframe = pd.DataFrame(data={'Subject': subject, 'Num 5': num5, 'Num 6': num6})
trialframe['Num Total Trials'] = trialframe['Num 5'] + trialframe['Num 6']
trialframe = trialframe.drop(['Num 5', 'Num 6'], axis=1)
trialframe
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Merging the two tables together:
|
confframe = pd.concat([elecframe, trialframe.drop('Subject', axis=1)], axis=1)
confframe['Expected Entries'] = confframe['Num Electrode'] * confframe['Num Total Trials']
confframe
checkframe = pd.merge(confframe, countframe, how='inner', left_on='Subject', right_index=True)
checkframe
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
We now confirmed that our expected number of entries per subject matches the actual number of entries we obtained in the master dataframe created above. This indicates that the table above is likely created properly and it is safe to use it for further analysis.Next, we need to understand the characteristics of our dataset, mainly to understand the probability of obtaining a correct prediction due to chance.
|
outframe = resultframe.groupby('Result').count().drop('Time Series', axis=1).drop(['psd'+str(i) for i in range(1,53)], axis=1).rename(index=str, columns={'Subject':'Count'})
outframe
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
We can observe that the distribution is not even between the two possible outcomes so we need to be careful when assessing the performance of our model. We will next calculate the prediction power of chance:
|
total = sum(outframe['Count'])
outframe['Probability'] = outframe['Count']/total
outframe
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
We can observe that the probability of getting a correct prediction due to purely chance is 56.988% (~57%) so we need to design a prediction model that performs better than this. We will now move on to feature engineering to create new features. Making new features: We currently have 52 power spectral density (psd) features obtained from preprocessed file. Need to create new features from our time series data
|
resultframe.head()
resultframe['Max'] = [max(i) for i in resultframe['Time Series']]
resultframe['Min'] = [min(i) for i in resultframe['Time Series']]
resultframe['Std'] = [np.std(i) for i in resultframe['Time Series']]
resultframe['Mean'] = [np.mean(i) for i in resultframe['Time Series']]
resultframe['p2.5'] = [np.percentile(i, 2.5) for i in resultframe['Time Series']]
resultframe['p97.5'] = [np.percentile(i, 97.5) for i in resultframe['Time Series']]
resultframe.head()
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Changing entries of "Result"Safebet = 0, Gamble = 1:
|
resultframe['Result'] = resultframe['Result'].map({'Safebet': 0, 'Gamble': 1})
resultframe.head()
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
We should center all our data to 0.0 since we care about relative wave form and not baseline amplitude. The difference in baseline amplitude can be ascribed to hardware differences (electrode readings) and should not be considered in our predictive model. Thus, we need to adapt our features above by centering the values around 0.0. Hence, mean is dropped as a feature and a new feature "Interval" which is max-min is introduced.Interval = Max - MinPercentile 2.5 and Percentile 97.5 values were determined as features above. Now, a new feature is going to be introduced "Percentile Interval" which is the difference between the two values. Percentile Interval = p97.5 - p2.5
|
resultframe['Max'] = resultframe['Max'] - resultframe['Mean']
resultframe['Min'] = resultframe['Min'] - resultframe['Mean']
resultframe['p2.5'] = resultframe['p2.5'] - resultframe['Mean']
resultframe['p97.5'] = resultframe['p97.5'] - resultframe['Mean']
resultframe['Mean'] = resultframe['Mean'] - resultframe['Mean']
resultframe['Interval'] = resultframe['Max'] - resultframe['Min']
resultframe['Percentile Interval'] = resultframe['p97.5'] - resultframe['p2.5']
#resultframe = resultframe[['Subject', 'Time Series', 'Max', 'Min', 'Std', 'Interval', 'p2.5', 'p97.5', 'Percentile Interval', 'Result']]
resultframe
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Since all the features currently in place are statistics that do not respect the temporal nature of our data (time-series data), we need to introduce features that also respect the morphology of the waves in the data. An example feature is number of peaks.Number of peaks = number of data points i where i > i-1 and i > i+1 and will not include the i=0 and i=949 entries
|
peaks = []
for series in resultframe['Time Series']:
no_peaks = 0
indices = range(2,949)
for index in indices:
if series[index] > series[index-1] and series[index] > series[index+1]:
no_peaks += 1
peaks.append(no_peaks)
len(peaks)
resultframe['Num Peaks'] = peaks
resultframe.head()
#resultframe = resultframe[['Subject', 'Time Series', 'Max', 'Min', 'Interval', 'Std', 'p2.5', 'p97.5', 'Percentile Interval', 'Num Peaks', 'Result']]
#resultframe.head()
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Categorizing all our data
|
resultframe['Num Peaks Cat'] = pd.cut(resultframe['Num Peaks'], 4,labels=[1,2,3,4])
#resultframe = resultframe[['Subject', 'Time Series', 'Max', 'Min', 'Interval', 'Std', 'p2.5', 'p97.5', 'Percentile Interval', 'Num Peaks', 'Num Peaks Cat', 'Result']]
resultframe.head()
resultframe['p2.5 Cat'] = pd.qcut(resultframe['p2.5'], 3,labels=[1,2,3])
resultframe['p97.5 Cat'] = pd.qcut(resultframe['p97.5'], 3,labels=[1,2,3])
resultframe['Std Cat'] = pd.qcut(resultframe['Std'], 3,labels=[1,2,3])
resultframe['Percentile Interval Cat'] = pd.qcut(resultframe['Percentile Interval'], 3,labels=[1,2,3])
#resultframe = resultframe[['Subject', 'Time Series', 'Max', 'Min', 'Interval', 'Std', 'p2.5', 'p97.5', 'Percentile Interval', 'Num Peaks', 'Num Peaks Cat', 'p2.5 Cat', 'p97.5 Cat', 'Std Cat', 'Percentile Interval Cat', 'Result']]
resultframe
resultframe['Num Peaks Cat'] = resultframe['Num Peaks Cat'].astype(int)
resultframe['p2.5 Cat'] = resultframe['p2.5 Cat'].astype(int)
resultframe['p97.5 Cat'] = resultframe['p97.5 Cat'].astype(int)
resultframe['Std Cat'] = resultframe['Std Cat'].astype(int)
resultframe['Percentile Interval Cat'] = resultframe['Percentile Interval Cat'].astype(int)
resultframe.head()
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Checking our X and y matrices (selecting only features we want to pass into the model)
|
resultframe.loc[:,["Subject", "Result"]][resultframe['Subject']==1].drop('Subject', axis=1).head()
#resultframe.iloc[:,[1,3]][resultframe['Subject']==1].drop("Subject", axis=1).head()
resultframe.drop(["Subject", "Time Series", "Result"], axis=1)
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Modeling Logistic Regression Initialize dataframe to track model performance per subject
|
performance_logistic = pd.DataFrame(index = Xdict.keys(), # subject
columns=['naive_train_accuracy',
'naive_test_accuracy',
'model_train_accuracy',
'model_test_accuracy'])
performance_logistic
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Train model
|
coefficients = dict()
# initialize dataframes to log predicted choice and true choice for each trial
predictions_logistic_train_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_test_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
LogisticRegressionModel = linear_model.LogisticRegression()
# two subclasses to start
for subject in subjects:
print(subject)
#X = resultframe.iloc[:,[0,5,8,10,11,12]][resultframe['Subject']==subject].drop("Subject", axis=1)
#y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)
X = resultframe.drop(["Time Series", "Result"], axis=1)[resultframe['Subject']==subject].drop("Subject", axis=1)
y = resultframe.loc[:,["Subject", "Result"]][resultframe['Subject']==subject].drop('Subject', axis=1)
# train-test split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)
# get naive performance (guessing most frequent category, the max of guessing one vs the other)
performance_logistic.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
performance_logistic.loc[subject, 'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
# make df to track predicted vs real choice for each subject
predictions_logistic_train = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_test = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_train['true_choice'] = ytrain['Result']
predictions_logistic_test['true_choice'] = ytest['Result']
# logistic regression
LogisticRegressionModel.fit(Xtrain, ytrain)
# store coefficients
coefficients[subject] = LogisticRegressionModel.coef_[0]
performance_logistic.loc[subject,'model_train_accuracy'] = LogisticRegressionModel.score(Xtrain,ytrain)
performance_logistic.loc[subject,'model_test_accuracy'] = LogisticRegressionModel.score(Xtest,ytest)
# complete the guesses for each person
predictions_logistic_train['predicted_choice'] = LogisticRegressionModel.predict(Xtrain)
predictions_logistic_test['predicted_choice'] = LogisticRegressionModel.predict(Xtest)
# concatenate dfs
predictions_logistic_train_master = pd.concat([predictions_logistic_train_master, predictions_logistic_train])
predictions_logistic_test_master = pd.concat([predictions_logistic_test_master, predictions_logistic_test])
%matplotlib inline
performance_logistic
train_accuracy_total = np.mean(predictions_logistic_train_master['true_choice'] == predictions_logistic_train_master['predicted_choice'])
test_accuracy_total = np.mean(predictions_logistic_test_master['true_choice'] == predictions_logistic_test_master['predicted_choice'])
train_accuracy_total, test_accuracy_total
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
FEATURE SELECTIONsince not much improvement has been seen in iter5, I will attempt to selectivly include features from our current feature set that demonstrates strong predictive powers. I will first see any collinear features
|
train, test = train_test_split(resultframe, test_size=0.2, random_state=100)
train_df = train.iloc[:, 2:]
train_df.head()
train_df.corr()
colormap = plt.cm.viridis
plt.figure(figsize=(12,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train_df.corr().round(2)\
,linewidths=0.1,vmax=1.0, square=True, cmap=colormap, \
linecolor='white', annot=True);
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
As seen in the chart above, the correlation between different features is generally pretty high. Thus, we need to be more selective in choosing features for this model as uncorrelated features are generally more powerful predictorsWill try these features: num peaks cat, percentile interval, std, p97.5 cat, p2.5 cat Random Forest Initialize dataframe to track model performance per subject
|
performance_forest = pd.DataFrame(index = Xdict.keys(), # subject
columns=['naive_train_accuracy',
'naive_test_accuracy',
'model_train_accuracy',
'model_test_accuracy'])
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Initialize dataframes to log predicted choice and true choice for each trial
|
feature_importances = dict()
predictions_forest_train_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_forest_test_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
random_forest = RandomForestClassifier()
# two subclasses to start
for subject in subjects:
print(subject)
X = resultframe.iloc[:,[0,4,6,7,8]][resultframe['Subject']==subject].drop("Subject", axis=1)
y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)
# train-test split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)
# get naive performance (guessing most frequent category, the max of guessing one vs the other)
performance_forest.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
performance_forest.loc[subject,'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
# make df to track predicted vs real choice for each subject
predictions_forest_train = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_forest_test = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_forest_train['true_choice'] = ytrain['Result']
predictions_forest_test['true_choice'] = ytest['Result']
# model
random_forest.fit(Xtrain, ytrain)
performance_forest.loc[subject,'model_train_accuracy'] = random_forest.score(Xtrain,ytrain)
performance_forest.loc[subject,'model_test_accuracy'] = random_forest.score(Xtest,ytest)
# store feature importances
feature_importances[subject] = random_forest.feature_importances_
# complete the guesses for each person
predictions_forest_train['predicted_choice'] = random_forest.predict(Xtrain)
predictions_forest_test['predicted_choice'] = random_forest.predict(Xtest)
# concatenate dfs
predictions_forest_train_master = pd.concat([predictions_forest_train_master, predictions_forest_train])
predictions_forest_test_master = pd.concat([predictions_forest_test_master, predictions_forest_test])
performance_forest
train_accuracy_total = np.mean(predictions_forest_train_master['true_choice'] == predictions_forest_train_master['predicted_choice'])
test_accuracy_total = np.mean(predictions_forest_test_master['true_choice'] == predictions_forest_test_master['predicted_choice'])
train_accuracy_total, test_accuracy_total
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Overfits a lot logistic regression modified with StandardScaler(), i.e., z-scoring the data before fitting model initialize dataframe to track model performance per subject
|
performance_logistic = pd.DataFrame(index = Xdict.keys(), # subject
columns=['naive_train_accuracy',
'naive_test_accuracy',
'model_train_accuracy',
'model_test_accuracy'])
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
initialize dataframes to log predicted choice and true choice for each trial
|
predictions_logistic_train_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_test_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
LogisticRegressionModel = linear_model.LogisticRegression()
from sklearn.feature_selection import SelectKBest, f_classif # use f_regression for afresh feature selection
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
# pipe = make_pipeline(SelectKBest(k=50), StandardScaler(), linear_model.LogisticRegressionCV())
pipe = make_pipeline(StandardScaler(), linear_model.LogisticRegressionCV())
LogisticRegressionModel = pipe
# two subclasses to start
for subject in subjects:
print(subject)
X = resultframe.iloc[:,[0,4,6,7,8]][resultframe['Subject']==subject].drop("Subject", axis=1)
y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)
# train-test split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)
# get naive performance (guessing most frequent category, the max of guessing one vs the other)
performance_logistic.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
performance_logistic.loc[subject,'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
# make df to track predicted vs real choice for each subject
predictions_logistic_train = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_test = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_train['true_choice'] = ytrain['Result']
predictions_logistic_test['true_choice'] = ytest['Result']
# logistic regression
LogisticRegressionModel.fit(Xtrain, ytrain)
performance_logistic.loc[subject,'model_train_accuracy'] = LogisticRegressionModel.score(Xtrain,ytrain)
performance_logistic.loc[subject,'model_test_accuracy'] = LogisticRegressionModel.score(Xtest,ytest)
# complete the guesses for each person
predictions_logistic_train['predicted_choice'] = LogisticRegressionModel.predict(Xtrain)
predictions_logistic_test['predicted_choice'] = LogisticRegressionModel.predict(Xtest)
# concatenate dfs
predictions_logistic_train_master = pd.concat([predictions_logistic_train_master, predictions_logistic_train])
predictions_logistic_test_master = pd.concat([predictions_logistic_test_master, predictions_logistic_test])
performance_logistic
train_accuracy_total = np.mean(predictions_logistic_train_master['true_choice'] == predictions_logistic_train_master['predicted_choice'])
test_accuracy_total = np.mean(predictions_logistic_test_master['true_choice'] == predictions_logistic_test_master['predicted_choice'])
train_accuracy_total, test_accuracy_total
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
random forest with StandardScaler() initialize dataframe to track model performance per subject
|
performance_forest = pd.DataFrame(index = Xdict.keys(), # subject
columns=['naive_train_accuracy',
'naive_test_accuracy',
'model_train_accuracy',
'model_test_accuracy'])
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
initialize dataframes to log predicted choice and true choice for each trial
|
feature_importances = dict()
predictions_forest_train_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_forest_test_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
random_forest = RandomForestClassifier()
# two subclasses to start
for subject in subjects:
print(subject)
X = resultframe.iloc[:,[0,4,6,7,8]][resultframe['Subject']==subject].drop("Subject", axis=1)
y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)
# standardize data here
scaler.fit(X)
X = scaler.transform(X)
# train-test split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)
# get naive performance (guessing most frequent category, the max of guessing one vs the other)
performance_forest.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
performance_forest.loc[subject,'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
# make df to track predicted vs real choice for each subject
predictions_forest_train = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_forest_test = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_forest_train['true_choice'] = ytrain['Result']
predictions_forest_test['true_choice'] = ytest['Result']
# model
random_forest.fit(Xtrain, ytrain)
performance_forest.loc[subject,'model_train_accuracy'] = random_forest.score(Xtrain,ytrain)
performance_forest.loc[subject,'model_test_accuracy'] = random_forest.score(Xtest,ytest)
# store feature importances
feature_importances[subject] = random_forest.feature_importances_
# complete the guesses for each person
predictions_forest_train['predicted_choice'] = random_forest.predict(Xtrain)
predictions_forest_test['predicted_choice'] = random_forest.predict(Xtest)
# concatenate dfs
predictions_forest_train_master = pd.concat([predictions_forest_train_master, predictions_forest_train])
predictions_forest_test_master = pd.concat([predictions_forest_test_master, predictions_forest_test])
performance_forest
train_accuracy_total = np.mean(predictions_forest_train_master['true_choice'] == predictions_forest_train_master['predicted_choice'])
test_accuracy_total = np.mean(predictions_forest_test_master['true_choice'] == predictions_forest_test_master['predicted_choice'])
train_accuracy_total, test_accuracy_total
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
logistic regression with StandardScaler() *and* selecting K best features (reducing the number of features, should reduce overfitting) initialize dataframe to track model performance per subject
|
performance_logistic = pd.DataFrame(index = Xdict.keys(), # subject
columns=['naive_train_accuracy',
'naive_test_accuracy',
'model_train_accuracy',
'model_test_accuracy'])
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
initialize dataframes to log predicted choice and true choice for each trial
|
predictions_logistic_train_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_test_master = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
LogisticRegressionModel = linear_model.LogisticRegression()
from sklearn.feature_selection import SelectKBest, f_classif # use f_regression for afresh feature selection
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
|
_____no_output_____
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
try different numbers of num_k
|
num_k = [1,2,3,4] # max number of features is 4
for k in num_k:
pipe = make_pipeline(SelectKBest(k=k), StandardScaler(), linear_model.LogisticRegressionCV())
LogisticRegressionModel = pipe
# two subclasses to start
for subject in subjects:
print(subject)
X = resultframe.iloc[:,[0,4,6,7,8]][resultframe['Subject']==subject].drop("Subject", axis=1)
y = resultframe.iloc[:,[0,-1]][resultframe['Subject']==subject].drop('Subject', axis=1)
# train-test split
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.2, random_state=100)
# get naive performance (guessing most frequent category, the max of guessing one vs the other)
performance_logistic.loc[subject,'naive_train_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
performance_logistic.loc[subject,'naive_test_accuracy'] = max(float(np.mean(ytrain=='Gamble')),float(np.mean(ytrain=='Safebet')))
# make df to track predicted vs real choice for each subject
predictions_logistic_train = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_test = pd.DataFrame(columns=['predicted_choice',
'true_choice'])
predictions_logistic_train['true_choice'] = ytrain['Result']
predictions_logistic_test['true_choice'] = ytest['Result']
# logistic regression
LogisticRegressionModel.fit(Xtrain, ytrain)
performance_logistic.loc[subject,'model_train_accuracy'] = LogisticRegressionModel.score(Xtrain,ytrain)
performance_logistic.loc[subject,'model_test_accuracy'] = LogisticRegressionModel.score(Xtest,ytest)
# complete the guesses for each person
predictions_logistic_train['predicted_choice'] = LogisticRegressionModel.predict(Xtrain)
predictions_logistic_test['predicted_choice'] = LogisticRegressionModel.predict(Xtest)
# concatenate dfs
predictions_logistic_train_master = pd.concat([predictions_logistic_train_master, predictions_logistic_train])
predictions_logistic_test_master = pd.concat([predictions_logistic_test_master, predictions_logistic_test])
train_accuracy_total = np.mean(predictions_logistic_train_master['true_choice'] == predictions_logistic_train_master['predicted_choice'])
test_accuracy_total = np.mean(predictions_logistic_test_master['true_choice'] == predictions_logistic_test_master['predicted_choice'])
print(k, train_accuracy_total, test_accuracy_total)
|
1
2
3
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
Trying other models
|
X = resultframe.iloc[:,[4,6,7,8]]
y = resultframe.iloc[:,-1]
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=100)
print ('Number of samples in training data:',len(x_train))
print ('Number of samples in test data:',len(x_test))
perceptron = Perceptron(max_iter=100)
perceptron.fit(x_train, y_train)
perceptron_train_acc = perceptron.score(x_train, y_train)
perceptron_test_acc = perceptron.score(x_test, y_test)
print ('perceptron training acuracy= ',perceptron_train_acc)
print('perceptron test accuracy= ',perceptron_test_acc)
adaboost = AdaBoostClassifier()
adaboost.fit(x_train, y_train)
adaboost_train_acc = adaboost.score(x_train, y_train)
adaboost_test_acc = adaboost.score(x_test, y_test)
print ('adaboost training acuracy= ',adaboost_train_acc)
print('adaboost test accuracy= ',adaboost_test_acc)
random_forest = RandomForestClassifier()
random_forest.fit(x_train, y_train)
random_forest_train_acc = random_forest.score(x_train, y_train)
random_forest_test_acc = random_forest.score(x_test, y_test)
print('random_forest training acuracy= ',random_forest_train_acc)
print('random_forest test accuracy= ',random_forest_test_acc)
|
random_forest training acuracy= 0.7377796779250392
random_forest test accuracy= 0.5162393162393163
|
MIT
|
Model History/Adi_iter6/Adi Iter 6.ipynb
|
dattasiddhartha/DataX-NeuralDecisionMaking
|
> ------ Gaussian boson sampling tutorial To get a feel for how Strawberry Fields works, let's try coding a quantum program, Gaussian boson sampling. Background information: Gaussian states---A Gaussian state is one that can be described by a [Gaussian function](https://en.wikipedia.org/wiki/Gaussian_function) in the phase space. For example, for a single mode Gaussian state, squeezed in the $x$ quadrature by squeezing operator $S(r)$, could be described by the following [Wigner quasiprobability distribution](Wigner quasiprobability distribution):$$W(x,p) = \frac{2}{\pi}e^{-2\sigma^2(x-\bar{x})^2 - 2(p-\bar{p})^2/\sigma^2}$$where $\sigma$ represents the **squeezing**, and $\bar{x}$ and $\bar{p}$ are the mean **displacement**, respectively. For multimode states containing $N$ modes, this can be generalised; Gaussian states are uniquely defined by a [multivariate Gaussian function](https://en.wikipedia.org/wiki/Multivariate_normal_distribution), defined in terms of the **vector of means** ${\mu}$ and a **covariance matrix** $\sigma$. The position and momentum basisFor example, consider a single mode in the position and momentum quadrature basis (the default for Strawberry Fields). Assuming a Gaussian state with displacement $\alpha = \bar{x}+i\bar{p}$ and squeezing $\xi = r e^{i\phi}$ in the phase space, it has a vector of means and a covariance matrix given by:$$ \mu = (\bar{x},\bar{p}),~~~~~~\sigma = SS\dagger=R(\phi/2)\begin{bmatrix}e^{-2r} & 0 \\0 & e^{2r} \\\end{bmatrix}R(\phi/2)^T$$where $S$ is the squeezing operator, and $R(\phi)$ is the standard two-dimensional rotation matrix. For multiple modes, in Strawberry Fields we use the convention $$ \mu = (\bar{x}_1,\bar{x}_2,\dots,\bar{x}_N,\bar{p}_1,\bar{p}_2,\dots,\bar{p}_N)$$and therefore, considering $\phi=0$ for convenience, the multimode covariance matrix is simply$$\sigma = \text{diag}(e^{-2r_1},\dots,e^{-2r_N},e^{2r_1},\dots,e^{2r_N})\in\mathbb{C}^{2N\times 2N}$$If a continuous-variable state *cannot* be represented in the above form (for example, a single photon Fock state or a cat state), then it is non-Gaussian. The annihilation and creation operator basisIf we are instead working in the creation and annihilation operator basis, we can use the transformation of the single mode squeezing operator$$ S(\xi) \left[\begin{matrix}\hat{a}\\\hat{a}^\dagger\end{matrix}\right] = \left[\begin{matrix}\cosh(r)&-e^{i\phi}\sinh(r)\\-e^{-i\phi}\sinh(r)&\cosh(r)\end{matrix}\right] \left[\begin{matrix}\hat{a}\\\hat{a}^\dagger\end{matrix}\right]$$resulting in$$\sigma = SS^\dagger = \left[\begin{matrix}\cosh(2r)&-e^{i\phi}\sinh(2r)\\-e^{-i\phi}\sinh(2r)&\cosh(2r)\end{matrix}\right]$$For multiple Gaussian states with non-zero squeezing, the covariance matrix in this basis simply generalises to$$\sigma = \text{diag}(S_1S_1^\dagger,\dots,S_NS_N^\dagger)\in\mathbb{C}^{2N\times 2N}$$ Introduction to Gaussian boson sampling---βIf you need to wait exponential time for \[your single photon sources to emit simultaneously\], then there would seem to be no advantage over classical computation. This is the reason why so far, boson sampling has only been demonstrated with 3-4 photons. When faced with these problems, until recently, all we could do was shrug our shoulders.β - [Scott Aaronson](https://www.scottaaronson.com/blog/?p=1579)While [boson sampling](https://en.wikipedia.org/wiki/Boson_sampling) allows the experimental implementation of a quantum sampling problem that it countably hard classically, one of the main issues it has in experimental setups is one of **scalability**, due to its dependence on an array of simultaneously emitting single photon sources.Currently, most physical implementations of boson sampling make use of a process known as [Spontaneous Parametric Down-Conversion](http://en.wikipedia.org/wiki/Spontaneous_parametric_down-conversion) to generate the single photon source inputs. Unfortunately, this method is non-deterministic - as the number of modes in the apparatus increases, the average time required until every photon source emits a simultaneous photon increases *exponentially*.In order to simulate a *deterministic* single photon source array, several variations on boson sampling have been proposed; the most well known being scattershot boson sampling ([Lund, 2014](https://link.aps.org/doi/10.1103/PhysRevLett.113.100502)). However, a recent boson sampling variation by [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) negates the need for single photon Fock states altogether, by showing that **incident Gaussian states** - in this case, single mode squeezed states - can produce problems in the same computational complexity class as boson sampling. Even more significantly, this negates the scalability problem with single photon sources, as single mode squeezed states can be easily simultaneously generated experimentally.Aside from changing the input states from single photon Fock states to Gaussian states, the Gaussian boson sampling scheme appears quite similar to that of boson sampling:1. $N$ single mode squeezed states $\left|{\xi_i}\right\rangle$, with squeezing parameters $\xi_i=r_ie^{i\phi_i}$, enter an $N$ mode linear interferometer with unitary $U$. 2. The output of the interferometer is denoted $\left|{\psi'}\right\rangle$. Each output mode is then measured in the Fock basis, $\bigotimes_i n_i\left|{n_i}\middle\rangle\middle\langle{n_i}\right|$.Without loss of generality, we can absorb the squeezing parameter $\phi$ into the interferometer, and set $\phi=0$ for convenience. The covariance matrix **in the creation and annihilation operator basis** at the output of the interferometer is then given by:$$\sigma_{out} = \frac{1}{2} \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right]\sigma_{in} \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right]$$Using phase space methods, [Hamilton et al.](https://link.aps.org/doi/10.1103/PhysRevLett.119.170501) showed that the probability of measuring a Fock state is given by$$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(U\bigoplus_i\tanh(r_i)U^T)]_{st}\right|^2}{n_1!n_2!\cdots n_N!\sqrt{|\sigma_{out}+I/2|}},$$i.e. the sampled single photon probability distribution is proportional to the **Hafnian** of a submatrix of $U\bigoplus_i\tanh(r_i)U^T$, dependent upon the output covariance matrix.**The Hafnian**The Hafnian of a matrix is defined by$$\text{Haf}(A) = \frac{1}{n!2^n}\sum_{\sigma=S_{2N}}\prod_{i=1}^N A_{\sigma(2i-1)\sigma(2i)}$$$S_{2N}$ is the set of all permutations of $2N$ elements. In graph theory, the Hafnian calculates the number of perfect matchings in an **arbitrary graph** with adjacency matrix $A$.Compare this to the permanent, which calculates the number of perfect matchings on a *bipartite* graph - the Hafnian turns out to be a generalisation of the permanent, with the relationship$$\begin{align}\text{Per(A)} = \text{Haf}\left(\left[\begin{matrix}0&A\\A^T&0\end{matrix}\right]\right)\end{align}$$As any algorithm that could calculate (or even approximate) the Hafnian could also calculate the permanent - a P problem - it follows that calculating or approximating the Hafnian must also be a classically hard problem. Equally squeezed input statesIn the case where all the input states are squeezed equally with squeezing factor $\xi=r$ (i.e. so $\phi=0$), we can simplify the denominator into a much nicer form. It can be easily seen that, due to the unitarity of $U$,$$\left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right] = \left[ \begin{matrix}UU^\dagger&0\\0&U^*U^T\end{matrix} \right] =I$$Thus, we have $$\begin{align}\sigma_{out} +\frac{1}{2}I &= \sigma_{out} + \frac{1}{2} \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right] = \left[ \begin{matrix}U&0\\0&U^*\end{matrix} \right] \frac{1}{2} \left(\sigma_{in}+I\right) \left[ \begin{matrix}U^\dagger&0\\0&U^T\end{matrix} \right]\end{align}$$where we have subtituted in the expression for $\sigma_{out}$. Taking the determinants of both sides, the two block diagonal matrices containing $U$ are unitary, and thus have determinant 1, resulting in$$\left|\sigma_{out} +\frac{1}{2}I\right| =\left|\frac{1}{2}\left(\sigma_{in}+I\right)\right|=\left|\frac{1}{2}\left(SS^\dagger+I\right)\right| $$By expanding out the right hand side, and using various trig identities, it is easy to see that this simply reduces to $\cosh^{2N}(r)$ where $N$ is the number of modes; thus the Gaussian boson sampling problem in the case of equally squeezed input modes reduces to$$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(UU^T\tanh(r))]_{st}\right|^2}{n_1!n_2!\cdots n_N!\cosh^N(r)},$$ The Gaussian boson sampling circuit---The multimode linear interferometer can be decomposed into two-mode beamsplitters (`BSgate`) and single-mode phase shifters (`Rgate`) (Reck, 1994), allowing for an almost trivial translation into a continuous-variable quantum circuit.For example, in the case of a 4 mode interferometer, with arbitrary $4\times 4$ unitary $U$, the continuous-variable quantum circuit for Gaussian boson sampling is given byIn the above,* the single mode squeeze states all apply identical squeezing $\xi=r$,* the detectors perform Fock state measurements (i.e. measuring the photon number of each mode),* the parameters of the beamsplitters and the rotation gates determines the unitary $U$.For $N$ input modes, we must have a minimum of $N$ columns in the beamsplitter array ([Clements, 2016](https://arxiv.org/abs/1603.08788)). Simulating boson sampling in Strawberry Fields---
|
import strawberryfields as sf
from strawberryfields.ops import *
from strawberryfields.utils import random_interferometer
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
Strawberry Fields makes this easy; there is an `Interferometer` quantum operation, and a utility function that allows us to generate the matrix representing a random interferometer.
|
U = random_interferometer(4)
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
The lack of Fock states and non-linear operations means we can use the Gaussian backend to simulate Gaussian boson sampling. In this example program, we are using input states with squeezing parameter $\xi=1$, and the randomly chosen interferometer generated above.
|
eng, q = sf.Engine(4)
with eng:
# prepare the input squeezed states
S = Sgate(1)
All(S) | q
# interferometer
Interferometer(U) | q
state = eng.run('gaussian')
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
We can see the decomposed beamsplitters and rotation gates, by calling `eng.print_applied()`:
|
eng.print_applied()
|
Run 0:
Sgate(1, 0) | (q[0])
Sgate(1, 0) | (q[1])
Sgate(1, 0) | (q[2])
Sgate(1, 0) | (q[3])
Rgate(-1.77) | (q[0])
BSgate(0.3621, 0) | (q[0], q[1])
Rgate(0.4065) | (q[2])
BSgate(0.7524, 0) | (q[2], q[3])
Rgate(-0.5894) | (q[1])
BSgate(0.9441, 0) | (q[1], q[2])
Rgate(0.2868) | (q[0])
BSgate(0.8913, 0) | (q[0], q[1])
Rgate(-1.631) | (q[0])
Rgate(-1.74) | (q[1])
Rgate(3.074) | (q[2])
Rgate(-0.9618) | (q[3])
BSgate(-1.482, 0) | (q[2], q[3])
Rgate(2.383) | (q[2])
BSgate(-0.9124, 0) | (q[1], q[2])
Rgate(2.188) | (q[1])
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
**Available decompositions**Check out our documentation to see the available CV decompositions available in Strawberry Fields. Analysis---Let's now verify the Gaussian boson sampling result, by comparing the output Fock state probabilities to the Hafnian, using the relationship$$\left|\left\langle{n_1,n_2,\dots,n_N}\middle|{\psi'}\right\rangle\right|^2 = \frac{\left|\text{Haf}[(UU^T\tanh(r))]_{st}\right|^2}{n_1!n_2!\cdots n_N!\cosh^N(r)}$$ Calculating the HafnianFor the right hand side numerator, we first calculate the submatrix $[(UU^T\tanh(r))]_{st}$:
|
B = (np.dot(U, U.T) * np.tanh(1))
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
In Gaussian boson sampling, we determine the submatrix by taking the rows and columns corresponding to the measured Fock state. For example, to calculate the submatrix in the case of the output measurement $\left|{1,1,0,0}\right\rangle$,
|
B[:,[0,1]][[0,1]]
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
To calculate the Hafnian in Python, we can use the direct definition$$\text{Haf}(A) = \frac{1}{n!2^n} \sum_{\sigma \in S_{2n}} \prod_{j=1}^n A_{\sigma(2j - 1), \sigma(2j)}$$Notice that this function counts each term in the definition multiple times, and renormalizes to remove the multiple counts by dividing by a factor $\frac{1}{n!2^n}$. **This function is extremely slow!**
|
from itertools import permutations
from scipy.special import factorial
def Haf(M):
n=len(M)
m=int(n/2)
haf=0.0
for i in permutations(range(n)):
prod=1.0
for j in range(m):
prod*=M[i[2*j],i[2*j+1]]
haf+=prod
return haf/(factorial(m)*(2**m))
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
Comparing to the SF result In Strawberry Fields, both Fock and Gaussian states have the method `fock_prob()`, which returns the probability of measuring that particular Fock state. Let's compare the case of measuring at the output state $\left|0,1,0,1\right\rangle$:
|
B = (np.dot(U,U.T) * np.tanh(1))[:, [1,3]][[1,3]]
np.abs(Haf(B))**2 / np.cosh(1)**4
state.fock_prob([0,1,0,1])
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
For the measurement result $\left|2,0,0,0\right\rangle$:
|
B = (np.dot(U,U.T) * np.tanh(1))[:, [0,0]][[0,0]]
np.abs(Haf(B))**2 / (2*np.cosh(1)**4)
state.fock_prob([2,0,0,0])
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
For the measurement result $\left|1,1,0,0\right\rangle$:
|
B = (np.dot(U,U.T) * np.tanh(1))[:, [0,1]][[0,1]]
np.abs(Haf(B))**2 / np.cosh(1)**4
state.fock_prob([1,1,0,0])
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
For the measurement result $\left|1,1,1,1\right\rangle$, this corresponds to the full matrix $B$:
|
B = (np.dot(U,U.T) * np.tanh(1))
np.abs(Haf(B))**2 / np.cosh(1)**4
state.fock_prob([1,1,1,1])
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
For the measurement result $\left|0,0,0,0\right\rangle$, this corresponds to a **null** submatrix, which has a Hafnian of 1:
|
1/np.cosh(1)**4
state.fock_prob([0,0,0,0])
|
_____no_output_____
|
Apache-2.0
|
examples/GaussianBosonSampling.ipynb
|
cclauss/strawberryfields
|
Pytorch: An automatic differentiation tool`Pytorch`λ₯Ό νμ©νλ©΄ 볡μ‘ν ν¨μμ λ―ΈλΆμ μμ½κ² + ν¨μ¨μ μΌλ‘ κ³μ°ν μ μμ΅λλ€!`Pytorch`λ₯Ό νμ©ν΄μ 볡μ‘ν μ¬μΈ΅ μ κ²½λ§μ νλ ¨ν λ, μ€μ°¨ν¨μμ λν νλΌλ―Έν°μ νΈλ―ΈλΆμΉλ₯Ό κ³μ°μ μμ½κ² μνν μ μμ΅λλ€! Pytorch 첫λ§λ¨μ°λ¦¬μκ² μλμ κ°μ κ°λ¨ν μ νμμ΄ μ£Όμ΄μ Έμλ€κ³ μκ°ν΄λ³ΌκΉμ?$$ y = wx $$ κ·Έλ¬λ©΄ $\frac{\partial y}{\partial w}$ μ μ΄λ»κ² κ³μ° ν μ μμκΉμ?μΌλ¨ μ§μ λ―ΈλΆμ ν΄λ³΄λ©΄$\frac{\partial y}{\partial w} = x$ μ΄ λλ, κ°λ¨νμμ μμ `pytorch`λ‘ ν΄λΉ κ°μ κ³μ°νλ λ°©λ²μ μμ보λλ‘ ν©μλ€!
|
# λν¬1 / μ¬μ΄μ¦1 μ΄λ©° κ°μ 1*2 μΈ pytorch tensorλ₯Ό νλ λ§λλλ€.
x = torch.ones(1) * 2
# λν¬1 / μ¬μ΄μ¦1 μ΄λ©° κ°μ 1 μΈ pytorch tensorλ₯Ό νλ λ§λλλ€.
w = torch.ones(1, requires_grad=True)
y = w * x
y
|
_____no_output_____
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
νΈλ―ΈλΆ κ³μ°νκΈ°!pytorchμμλ λ―ΈλΆκ°μ κ³μ°νκ³ μΆμ ν
μμ `.backward()` λ₯Ό λΆμ¬μ£Όλ κ²μΌλ‘, ν΄λΉ ν
μ κ³μ°μ μ°κ²° λμ΄μλ ν
μ μ€ `gradient`λ₯Ό κ³μ°ν΄μΌνλ ν
μ(λ€)μ λν νΈλ―ΈλΆμΉλ€μ κ³μ°ν μ μμ΅λλ€. `requires_grad=True`λ₯Ό ν΅ν΄μ μ΄λ€ ν
μμ λ―ΈλΆκ°μ κ³μ°ν μ§ ν λΉν΄μ€ μ μμ΅λλ€.
|
y.backward()
|
_____no_output_____
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
νΈλ―ΈλΆκ° νμΈνκΈ°!`ν
μ.grad` λ₯Ό νμ©ν΄μ νΉμ ν
μμ gradient κ°μ νμΈν΄λ³Ό μ μμ΅λλ€. νλ² `w.grad`λ₯Ό νμ©ν΄μ `y` μ λν `w`μ νΈλ―ΈλΆκ°μ νμΈν΄λ³ΌκΉμ?
|
w.grad
|
_____no_output_____
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
κ·Έλ¬λ©΄ requires_grad = False μΈ κ²½μ°λ?
|
x.grad
|
_____no_output_____
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
`torch.nn`, Neural Network ν¨ν€μ§`pytorch`μλ μ΄λ―Έ λ€μν neural networkλ€μ λͺ¨λλ€μ ꡬνν΄ λμμ΅λλ€. κ·Έ μ€μ κ°μ₯ κ°λ¨νμ§λ§ μ λ§ μμ£Ό μ°μ΄λ `nn.Linear` μ λν΄ μμ보면μ `pytorch`μ `nn.Module`μ λν΄μ μμ보λλ‘ ν©μλ€. `nn.Linear` λμ보기`nn.Linear` μ μμ λ°°μ΄ μ ννκ· λ° λ€μΈ΅ νΌμ
νΈλ‘ λͺ¨λΈμ ν μΈ΅μ ν΄λΉνλ νλΌλ―Έν° $w$, $b$ λ₯Ό κ°μ§κ³ μμ΅λλ€. μμλ‘ μ
λ ₯μ dimension μ΄ 10μ΄κ³ μΆλ ₯μ dimension μ΄ 1μΈ `nn.Linear` λͺ¨λμ λ§λ€μ΄ λ΄
μλ€!
|
lin = nn.Linear(in_features=10, out_features=1)
for p in lin.parameters():
print(p)
print(p.shape)
print('\n')
|
Parameter containing:
tensor([[ 0.0561, 0.1509, 0.0586, -0.0598, -0.1934, 0.2985, -0.0112, 0.0390,
0.2597, -0.1488]], requires_grad=True)
torch.Size([1, 10])
Parameter containing:
tensor([-0.2357], requires_grad=True)
torch.Size([1])
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
`Linear` λͺ¨λλ‘ $y = Wx+b$ κ³μ°νκΈ°μ ννκ·μλ κ·Έλ¬μ§λ§, λ€μΈ΅ νΌμ
νΈλ‘ λͺ¨λΈλ νλμ λ μ΄μ΄λ μλμ μμμ κ³μ°νλ κ²μ κΈ°μ΅νμμ£ ?$$y = Wx+b$$`nn.Linear`λ₯Ό νμ©ν΄μ μ μμμ κ³μ°ν΄λ³ΌκΉμ?κ²μ°μ μ½κ² νκΈ° μν΄μ Wμ κ°μ λͺ¨λ 1.0 μΌλ‘ b λ 5.0 μΌλ‘ λ§λ€μ΄λκ² μ΅λλ€.
|
lin.weight.data = torch.ones_like(lin.weight.data)
lin.bias.data = torch.ones_like(lin.bias.data) * 5.0
for p in lin.parameters():
print(p)
print(p.shape)
print('\n')
x = torch.ones(3, 10) # rank2 tensorλ₯Ό λ§λλλ€. : mini batch size = 3
y_hat = lin(x)
print(y_hat.shape)
print(y_hat)
|
torch.Size([3, 1])
tensor([[15.],
[15.],
[15.]], grad_fn=<AddmmBackward>)
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
μ§κΈ 무μ¨μΌμ΄ μΌμ΄λκ±°μ£ ?>Q1. μ Rank 2 tensor λ₯Ό μ
λ ₯μΌλ‘ μ¬μ©νλμ? >A1. νμ΄ν μΉμ `nn` μ μ μλμ΄μλ ν΄λμ€λ€μ μ
λ ₯μ κ°μ₯ 첫λ²μ§Έ λλ©μ Όμ `λ°°μΉ μ¬μ΄μ¦`λ‘ ν΄μν©λλ€. >Q2. lin(x) λ λλ체 무μμΈκ°μ? >A2. νμ΄μ¬μ μ΅μνμ λΆλ€μ `object()` λ `object.__call__()`μ μ μλμ΄μλ ν¨μλ₯Ό μ€νμν€μ λ€λ κ²μ μμ€ν
λ°μ. νμ΄ν μΉμ `nn.Module`μ `__call__()`μ μ€λ²λΌμ΄λνλ ν¨μμΈ `forward()`λ₯Ό ꡬννλ κ²μ __κΆμ₯__ νκ³ μμ΅λλ€. μΌλ°μ μΌλ‘, `forward()`μμμ μ€μ λ‘ νλΌλ―Έν°μ μΈνμ κ°μ§κ³ νΉμ λ μ΄μ΄μ μ°μ°κ³Ό μ μ ꡬννκ² λ©λλ€.μ¬λ¬κ°μ§ μ΄μ κ° μκ² μ§λ§, νμ΄ν μΉκ° λ΄λΆμ μΌλ‘ foward() μ μ€νμ μ /νλ‘ μ¬μ©μ μΉνμ μΈ νκ²½μ μ 곡νκΈ°μν΄μ μΆκ°μ μΈ μμ
λ€μ ν΄μ€λλ€. μ΄ λΆλΆμ λ€μ μ€μ΅μμ λ€μΈ΅ νΌμ
νΈλ‘ λͺ¨λΈμ λ§λ€λ©΄μ μ‘°κΈ λ μμΈν μ€λͺ
ν΄λ³Όκ²μ! Pytorch λ‘ κ°λ¨ν! μ ννκ· κ΅¬ννκΈ°μ λ² μ€μ΅μμ numpy λ‘ κ΅¬ννλ Linear regression λͺ¨λΈμ λ€μ νλ² νμ΄ν μΉλ‘ ꡬνν΄λ³ΌκΉμ? λͺ μ€μ΄λ©΄ λλ μ λλ‘ κ°λ¨ν©λλ€ :)
|
def generate_samples(n_samples: int,
w: float = 1.0,
b: float = 0.5,
x_range=[-1.0,1.0]):
xs = np.random.uniform(low=x_range[0], high=x_range[1], size=n_samples)
ys = w * xs + b
xs = torch.tensor(xs).view(-1,1).float() # νμ΄ν μΉ nn.Module μ λ°°μΉκ° 첫 λλ©μ Ό!
ys = torch.tensor(ys).view(-1,1).float()
return xs, ys
w = 1.0
b = 0.5
xs, ys = generate_samples(30, w=w, b=b)
lin_model = nn.Linear(in_features=1, out_features=1) # lim_model μμ±
for p in lin_model.parameters():
print(p)
print(p.grad)
ys_hat = lin_model(xs) # lin_model λ‘ μμΈ‘νκΈ°
|
_____no_output_____
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
Loss ν¨μλ? MSE!`pytorch`μμλ μμ£Ό μ°μ΄λ loss ν¨μλ€μ λν΄μλ 미리 ꡬνμ ν΄λμμ΅λλ€.μ΄λ² μ€μ΅μμλ __numpyλ‘ μ ννκ· λͺ¨λΈ λ§λ€κΈ°__ μμ μ¬μ©λλ MSE λ₯Ό μ€μ°¨ν¨μλ‘ μ¬μ©ν΄λ³ΌκΉμ?
|
criteria = nn.MSELoss()
loss = criteria(ys_hat, ys)
|
_____no_output_____
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
κ²½μ¬νκ°λ²μ νμ©ν΄μ νλΌλ―Έν° μ
λ°μ΄νΈνκΈ°!`pytorch`λ μ¬λ¬λΆλ€μ μν΄μ λ€μν optimizerλ€μ ꡬνν΄ λμμ΅λλ€. μΌλ¨μ κ°μ₯ κ°λ¨ν stochastic gradient descent (SGD)λ₯Ό νμ©ν΄ λ³ΌκΉμ? optimizerμ λ°λΌμ λ€μν μΈμλ€μ νμ©νμ§λ§ κΈ°λ³Έμ μΌλ‘ `params` μ `lr`μ μ§μ ν΄μ£Όλ©΄ λλ¨Έμ§λ optimizer λ§λ€ μλλ κ²μΌλ‘ μλ €μ§ μΈμλ€λ‘ optimizerμ μμ½κ² μμ±ν μ μμ΅λλ€.
|
opt = torch.optim.SGD(params=lin_model.parameters(), lr=0.01)
|
_____no_output_____
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
μμ§λ§μΈμ! opt.zero_grad()`pytorch`λ‘ νΈλ―ΈλΆμ κ³μ°νκΈ°μ μ, κΌ `opt.zero_grad()` ν¨μλ₯Ό μ΄μ©ν΄μ νΈλ―ΈλΆ κ³μ°μ΄ νμν ν
μλ€μ νΈλ―ΈλΆκ°μ μ΄κΈ°ν ν΄μ£Όλ κ²μ κΆμ₯λ립λλ€.
|
opt.zero_grad()
for p in lin_model.parameters():
print(p)
print(p.grad)
loss.backward()
opt.step()
for p in lin_model.parameters():
print(p)
print(p.grad)
|
Parameter containing:
tensor([[-0.5666]], requires_grad=True)
tensor([[-1.1548]])
Parameter containing:
tensor([0.6042], requires_grad=True)
tensor([-0.1280])
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
κ²½μ¬νκ°λ²μ νμ©ν΄μ μ΅μ νλΌλ―Έν°λ₯Ό μ°Ύμλ΄
μλ€!
|
def run_sgd(n_steps: int = 1000,
report_every: int = 100,
verbose=True):
lin_model = nn.Linear(in_features=1, out_features=1)
opt = torch.optim.SGD(params=lin_model.parameters(), lr=0.01)
sgd_losses = []
for i in range(n_steps):
ys_hat = lin_model(xs)
loss = criteria(ys_hat, ys)
opt.zero_grad()
loss.backward()
opt.step()
if i % report_every == 0:
if verbose:
print('\n')
print("{}th update: {}".format(i,loss))
for p in lin_model.parameters():
print(p)
sgd_losses.append(loss.log10().detach().numpy())
return sgd_losses
_ = run_sgd()
|
0th update: 0.8393566012382507
Parameter containing:
tensor([[0.1211]], requires_grad=True)
Parameter containing:
tensor([-0.1363], requires_grad=True)
100th update: 0.060856711119413376
Parameter containing:
tensor([[0.6145]], requires_grad=True)
Parameter containing:
tensor([0.4634], requires_grad=True)
200th update: 0.012306183576583862
Parameter containing:
tensor([[0.8169]], requires_grad=True)
Parameter containing:
tensor([0.5173], requires_grad=True)
300th update: 0.002916797064244747
Parameter containing:
tensor([[0.9110]], requires_grad=True)
Parameter containing:
tensor([0.5130], requires_grad=True)
400th update: 0.0006996632437221706
Parameter containing:
tensor([[0.9565]], requires_grad=True)
Parameter containing:
tensor([0.5069], requires_grad=True)
500th update: 0.00016797447460703552
Parameter containing:
tensor([[0.9787]], requires_grad=True)
Parameter containing:
tensor([0.5035], requires_grad=True)
600th update: 4.032816650578752e-05
Parameter containing:
tensor([[0.9896]], requires_grad=True)
Parameter containing:
tensor([0.5017], requires_grad=True)
700th update: 9.681772098701913e-06
Parameter containing:
tensor([[0.9949]], requires_grad=True)
Parameter containing:
tensor([0.5008], requires_grad=True)
800th update: 2.3242985207616584e-06
Parameter containing:
tensor([[0.9975]], requires_grad=True)
Parameter containing:
tensor([0.5004], requires_grad=True)
900th update: 5.579695425694808e-07
Parameter containing:
tensor([[0.9988]], requires_grad=True)
Parameter containing:
tensor([0.5002], requires_grad=True)
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
λ€λ₯Έ Optimizerλ μ¬μ©ν΄λ³ΌκΉμ?μμ
μκ°μ λ°°μ λ Adam μΌλ‘ μ΅μ νλ₯Ό νλ©΄ μ΄λ€κ²°κ³Όκ° λμ¬κΉμ?
|
def run_adam(n_steps: int = 1000,
report_every: int = 100,
verbose=True):
lin_model = nn.Linear(in_features=1, out_features=1)
opt = torch.optim.Adam(params=lin_model.parameters(), lr=0.01)
adam_losses = []
for i in range(n_steps):
ys_hat = lin_model(xs)
loss = criteria(ys_hat, ys)
opt.zero_grad()
loss.backward()
opt.step()
if i % report_every == 0:
if verbose:
print('\n')
print("{}th update: {}".format(i,loss))
for p in lin_model.parameters():
print(p)
adam_losses.append(loss.log10().detach().numpy())
return adam_losses
_ = run_adam()
|
0th update: 1.2440284490585327
Parameter containing:
tensor([[0.4118]], requires_grad=True)
Parameter containing:
tensor([-0.4825], requires_grad=True)
100th update: 0.05024972930550575
Parameter containing:
tensor([[1.0383]], requires_grad=True)
Parameter containing:
tensor([0.2774], requires_grad=True)
200th update: 0.0004788984660990536
Parameter containing:
tensor([[1.0159]], requires_grad=True)
Parameter containing:
tensor([0.4793], requires_grad=True)
300th update: 4.6914931317587616e-07
Parameter containing:
tensor([[1.0005]], requires_grad=True)
Parameter containing:
tensor([0.4994], requires_grad=True)
400th update: 3.263671667988466e-12
Parameter containing:
tensor([[1.0000]], requires_grad=True)
Parameter containing:
tensor([0.5000], requires_grad=True)
500th update: 4.133082697160666e-14
Parameter containing:
tensor([[1.0000]], requires_grad=True)
Parameter containing:
tensor([0.5000], requires_grad=True)
600th update: 4.133082697160666e-14
Parameter containing:
tensor([[1.0000]], requires_grad=True)
Parameter containing:
tensor([0.5000], requires_grad=True)
700th update: 4.133082697160666e-14
Parameter containing:
tensor([[1.0000]], requires_grad=True)
Parameter containing:
tensor([0.5000], requires_grad=True)
800th update: 4.133082697160666e-14
Parameter containing:
tensor([[1.0000]], requires_grad=True)
Parameter containing:
tensor([0.5000], requires_grad=True)
900th update: 4.133082697160666e-14
Parameter containing:
tensor([[1.0000]], requires_grad=True)
Parameter containing:
tensor([0.5000], requires_grad=True)
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
μ’ λ μμΈνκ² λΉκ΅ν΄λ³ΌκΉμ?`pytorch`μμ `nn.Linear`λ₯Ό λΉλ‘―ν λ§μ λͺ¨λλ€μ νΉλ³ν κ²½μ°κ° μλμ΄μ,λͺ¨λλ΄μ νλΌλ―Έν°κ° μμμ κ°μΌλ‘ __μ!__ μ΄κΈ°ν λ©λλ€. > "μ!" μ λν΄μλ μμ
μμ λ€λ£¨μ§ μμμ§λ§, νμ€ν νλ λ₯λ¬λμ΄ μ μλνκ² νλ μ€μν μμμ€μ νλμ
λλ€. Parameter initialization μ΄λΌκ³ λΆλ₯΄λ κΈ°λ²λ€μ΄λ©°, λλΆλΆμ `pytorch` λͺ¨λλ€μ κ°κ°μ λͺ¨λμ λ°λΌμ μΌλ°μ μΌλ‘ μ μλνλκ²μΌλ‘ μλ €μ Έμλ λ°©μμΌλ‘ νλΌλ―Έν°λ€μ΄ μ΄κΈ°ν λκ² μ½λ©λμ΄ μμ΅λλ€.κ·Έλμ λ§€ λ² λͺ¨λμ μμ±ν λλ§λ€ νλΌλ―Έν°μ μ΄κΈ°κ°μ΄ λ¬λΌμ§κ² λ©λλ€. μ΄λ²μλ μ‘°κΈ κ³΅μ ν λΉκ΅λ₯Ό μν΄μ μμμ νλ μ€νμ μ¬λ¬λ² λ°λ³΅ν΄μ νκ· μ μΌλ‘λ Adamμ΄ μ’μμ§ νμΈν΄λ³ΌκΉμ?
|
sgd_losses = [run_sgd(verbose=False) for _ in range(50)]
sgd_losses = np.stack(sgd_losses)
sgd_loss_mean = np.mean(sgd_losses, axis=0)
sgd_loss_std = np.std(sgd_losses, axis=-0)
adam_losses = [run_adam(verbose=False) for _ in range(50)]
adam_losses = np.stack(adam_losses)
adam_loss_mean = np.mean(adam_losses, axis=0)
adam_loss_std = np.std(adam_losses, axis=-0)
fig, ax = plt.subplots(1,1, figsize=(10,5))
ax.grid()
ax.fill_between(x=range(sgd_loss_mean.shape[0]),
y1=sgd_loss_mean + sgd_loss_std,
y2=sgd_loss_mean - sgd_loss_std,
alpha=0.3)
ax.plot(sgd_loss_mean, label='SGD')
ax.fill_between(x=range(adam_loss_mean.shape[0]),
y1=adam_loss_mean + adam_loss_std,
y2=adam_loss_mean - adam_loss_std,
alpha=0.3)
ax.plot(adam_loss_mean, label='Adam')
ax.legend()
|
_____no_output_____
|
MIT
|
[Preliminary] 00 Linear regression with pytorch.ipynb
|
Junyoungpark/2021-lg-AI-camp
|
Analyzing IMDB Data in Keras
|
# Imports
import numpy as np
import keras
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.preprocessing.text import Tokenizer
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(42)
|
Using TensorFlow backend.
|
MIT
|
4. Deep Learning/IMDB_In_Keras.ipynb
|
Arwa-Ibrahim/ML_Nano_Projects
|
1. Loading the dataThis dataset comes preloaded with Keras, so one simple command will get us training and testing data. There is a parameter for how many words we want to look at. We've set it at 1000, but feel free to experiment.
|
# Loading the data (it's preloaded in Keras)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=1000)
print(x_train.shape)
print(x_test.shape)
|
(25000,)
(25000,)
|
MIT
|
4. Deep Learning/IMDB_In_Keras.ipynb
|
Arwa-Ibrahim/ML_Nano_Projects
|
2. Examining the dataNotice that the data has been already pre-processed, where all the words have numbers, and the reviews come in as a vector with the words that the review contains. For example, if the word 'the' is the first one in our dictionary, and a review contains the word 'the', then there is a 1 in the corresponding vector.The output comes as a vector of 1's and 0's, where 1 is a positive sentiment for the review, and 0 is negative.
|
print(x_train[0])
print(y_train[0])
|
[1, 11, 2, 11, 4, 2, 745, 2, 299, 2, 590, 2, 2, 37, 47, 27, 2, 2, 2, 19, 6, 2, 15, 2, 2, 17, 2, 723, 2, 2, 757, 46, 4, 232, 2, 39, 107, 2, 11, 4, 2, 198, 24, 4, 2, 133, 4, 107, 7, 98, 413, 2, 2, 11, 35, 781, 8, 169, 4, 2, 5, 259, 334, 2, 8, 4, 2, 10, 10, 17, 16, 2, 46, 34, 101, 612, 7, 84, 18, 49, 282, 167, 2, 2, 122, 24, 2, 8, 177, 4, 392, 531, 19, 259, 15, 934, 40, 507, 39, 2, 260, 77, 8, 162, 2, 121, 4, 65, 304, 273, 13, 70, 2, 2, 8, 15, 745, 2, 5, 27, 322, 2, 2, 2, 70, 30, 2, 88, 17, 6, 2, 2, 29, 100, 30, 2, 50, 21, 18, 148, 15, 26, 2, 12, 152, 157, 10, 10, 21, 19, 2, 46, 50, 5, 4, 2, 112, 828, 6, 2, 4, 162, 2, 2, 517, 6, 2, 7, 4, 2, 2, 4, 351, 232, 385, 125, 6, 2, 39, 2, 5, 29, 69, 2, 2, 6, 162, 2, 2, 232, 256, 34, 718, 2, 2, 8, 6, 226, 762, 7, 2, 2, 5, 517, 2, 6, 2, 7, 4, 351, 232, 37, 9, 2, 8, 123, 2, 2, 2, 188, 2, 857, 11, 4, 86, 22, 121, 29, 2, 2, 10, 10, 2, 61, 514, 11, 14, 22, 9, 2, 2, 14, 575, 208, 159, 2, 16, 2, 5, 187, 15, 58, 29, 93, 6, 2, 7, 395, 62, 30, 2, 493, 37, 26, 66, 2, 29, 299, 4, 172, 243, 7, 217, 11, 4, 2, 2, 22, 4, 2, 2, 13, 70, 243, 7, 2, 19, 2, 11, 15, 236, 2, 136, 121, 29, 5, 2, 26, 112, 2, 180, 34, 2, 2, 5, 320, 4, 162, 2, 568, 319, 4, 2, 2, 2, 269, 8, 401, 56, 19, 2, 16, 142, 334, 88, 146, 243, 7, 11, 2, 2, 150, 11, 4, 2, 2, 10, 10, 2, 828, 4, 206, 170, 33, 6, 52, 2, 225, 55, 117, 180, 58, 11, 14, 22, 48, 50, 16, 101, 329, 12, 62, 30, 35, 2, 2, 22, 2, 11, 4, 2, 2, 35, 735, 18, 118, 204, 881, 15, 291, 10, 10, 2, 82, 93, 52, 361, 7, 4, 162, 2, 2, 5, 4, 785, 2, 49, 7, 4, 172, 2, 7, 665, 26, 303, 343, 11, 23, 4, 2, 11, 192, 2, 11, 4, 2, 9, 44, 84, 24, 2, 54, 36, 66, 144, 11, 68, 205, 118, 602, 55, 729, 174, 8, 23, 4, 2, 10, 10, 2, 11, 4, 2, 127, 316, 2, 37, 16, 2, 19, 12, 150, 138, 426, 2, 2, 79, 49, 542, 162, 2, 2, 84, 11, 4, 392, 555]
1
|
MIT
|
4. Deep Learning/IMDB_In_Keras.ipynb
|
Arwa-Ibrahim/ML_Nano_Projects
|
3. One-hot encoding the outputHere, we'll turn the input vectors into (0,1)-vectors. For example, if the pre-processed vector contains the number 14, then in the processed vector, the 14th entry will be 1.
|
# One-hot encoding the output into vector mode, each of length 1000
tokenizer = Tokenizer(num_words=1000)
x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
print(x_train[0])
print(x_train.shape)
x_train[1]
|
(25000, 1000)
|
MIT
|
4. Deep Learning/IMDB_In_Keras.ipynb
|
Arwa-Ibrahim/ML_Nano_Projects
|
And we'll also one-hot encode the output.
|
# One-hot encoding the output
num_classes = 2
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
print(y_train.shape)
print(y_test.shape)
|
(25000, 2)
(25000, 2)
|
MIT
|
4. Deep Learning/IMDB_In_Keras.ipynb
|
Arwa-Ibrahim/ML_Nano_Projects
|
4. Building the model architectureBuild a model here using sequential. Feel free to experiment with different layers and sizes! Also, experiment adding dropout to reduce overfitting.
|
# TODO: Build the model architecture
model = Sequential()
model.add(Dense(128, input_dim = x_train.shape[1]))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
# TODO: Compile the model using a loss function and an optimizer.
model.compile(loss = 'categorical_crossentropy', optimizer = 'Adam', metrics = ['accuracy'])
|
_____no_output_____
|
MIT
|
4. Deep Learning/IMDB_In_Keras.ipynb
|
Arwa-Ibrahim/ML_Nano_Projects
|
5. Training the modelRun the model here. Experiment with different batch_size, and number of epochs!
|
# TODO: Run the model. Feel free to experiment with different batch sizes and number of epochs.
model.fit(x_train, y_train, 10000 , verbose = 0)
|
_____no_output_____
|
MIT
|
4. Deep Learning/IMDB_In_Keras.ipynb
|
Arwa-Ibrahim/ML_Nano_Projects
|
6. Evaluating the modelThis will give you the accuracy of the model, as evaluated on the testing set. Can you get something over 85%?
|
score = model.evaluate(x_test, y_test, verbose=0)
print("Accuracy: ", score[1])
|
Accuracy: 0.85832
|
MIT
|
4. Deep Learning/IMDB_In_Keras.ipynb
|
Arwa-Ibrahim/ML_Nano_Projects
|
Graded AssessmentIn this assessment you will write a full end-to-end training process using gluon and MXNet. We will train the LeNet-5 classifier network on the MNIST dataset. The network will be defined for you but you have to fill in code to prepare the dataset, train the network, and evaluate it's performance on a held out dataset.
|
#Check CUDA version
!nvcc --version
#Install appropriate MXNet version
'''
For eg if CUDA version is 10.0 choose mxnet cu100mkl
where cu adds CUDA GPU support
and mkl adds Intel CPU Math Kernel Library support
'''
!pip install mxnet-cu101mkl gluoncv
from pathlib import Path
from mxnet import gluon, metric, autograd, init, nd
import os
import mxnet as mx
#I downloaded the files from Coursera and hosted on my gdrive:
from google.colab import drive
drive.mount('/content/drive')
# M5_DATA = Path(os.getenv('DATA_DIR', '../../data'), 'module_5')
M5_DATA = Path('/content/drive/My Drive/CourseraWork/MXNetAWS/data/module_5')
M5_IMAGES = Path(M5_DATA, 'images')
|
_____no_output_____
|
MIT
|
Module_5_LeNet_on_MNIST (1).ipynb
|
vigneshb-it19/AWS-Computer-Vision-GluonCV
|
--- Question 1 Prepare and the data and construct the dataloader* First, get the MNIST dataset from `gluon.data.vision.datasets`. Use* Don't forget the ToTensor and normalize Transformations. Use `0.13` and `0.31` as the mean and standard deviation respectively* Construct the dataloader with the batch size provide. Ensure that the train_dataloader is shuffled.**CAUTION!**: Although the notebook interface has internet connectivity, the **autograders are not permitted to access the internet**. We have already downloaded the correct models and data for you to use so you don't need access to the internet. Set the `root` parameter to `M5_IMAGES` when using a preset dataset. Usually, in the real world, you have internet access, so setting the `root` parameter isn't required (and it's set to `~/.mxnet` by default).
|
import os
from pathlib import Path
from mxnet.gluon.data.vision import transforms
import numpy as np
def get_mnist_data(batch=128):
"""
Should construct a dataloader with the MNIST Dataset with the necessary transforms applied.
:param batch: batch size for the DataLoader.
:type batch: int
:return: a tuple of the training and validation DataLoaders
:rtype: (gluon.data.DataLoader, gluon.data.DataLoader)
"""
def transformer(data, label):
data = data.flatten().expand_dims(0).astype(np.float32)/255
data = data-0.13/0.31
label = label.astype(np.float32)
return data, label
train_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=True, transform=transformer)
validation_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=False, transform=transformer)
train_dataloader = gluon.data.DataLoader(train_dataset, batch_size=batch, last_batch='keep',shuffle=True)
validation_dataloader = gluon.data.DataLoader(validation_dataset, batch_size=batch, last_batch='keep')
return train_dataloader, validation_dataloader
t, v = get_mnist_data()
assert isinstance(t, gluon.data.DataLoader)
assert isinstance(v, gluon.data.DataLoader)
d, l = next(iter(t))
assert d.shape == (128, 1, 28, 28) #check Channel First and Batch Size
assert l.shape == (128,)
assert nd.max(d).asscalar() <= 2.9 # check for normalization
assert nd.min(d).asscalar() >= -0.5 # check for normalization
|
_____no_output_____
|
MIT
|
Module_5_LeNet_on_MNIST (1).ipynb
|
vigneshb-it19/AWS-Computer-Vision-GluonCV
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.