hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
list
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
list
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
list
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
list
cell_types
list
cell_type_groups
list
cb950994d7256de165e16ac5194eecdcd87d2f52
1,044,119
ipynb
Jupyter Notebook
overview.ipynb
ktetz/the-machine
4b15ad0cb75e4c76dd8e24a0bd2234a76be4b7a0
[ "MIT" ]
null
null
null
overview.ipynb
ktetz/the-machine
4b15ad0cb75e4c76dd8e24a0bd2234a76be4b7a0
[ "MIT" ]
null
null
null
overview.ipynb
ktetz/the-machine
4b15ad0cb75e4c76dd8e24a0bd2234a76be4b7a0
[ "MIT" ]
null
null
null
7,457.992857
1,041,492
0.956417
[ [ [ "import os, sys\nimport pandas as pd\nfrom IPython.display import Image", "_____no_output_____" ] ], [ [ "**[scikit-learn estimators](http://scikit-learn.org/stable/tutorial/machine_learning_map/index.html)**", "_____no_output_____" ] ], [ [ "Image(data='http://scikit-learn.org/stable/_static/ml_map.png') ", "_____no_output_____" ] ], [ [ "# frameworks", "_____no_output_____" ], [ "## not python", "_____no_output_____" ], [ "## python", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb951b787d839372bc7c4225585ce8ec4c4df0d4
1,022,808
ipynb
Jupyter Notebook
Predict Beer .ipynb
MOAZ47/Predict-the-score-of-Beer
d78b5a4ab121b351ea4c76b49e38967e44e156a8
[ "MIT" ]
null
null
null
Predict Beer .ipynb
MOAZ47/Predict-the-score-of-Beer
d78b5a4ab121b351ea4c76b49e38967e44e156a8
[ "MIT" ]
null
null
null
Predict Beer .ipynb
MOAZ47/Predict-the-score-of-Beer
d78b5a4ab121b351ea4c76b49e38967e44e156a8
[ "MIT" ]
null
null
null
469.824529
799,748
0.924294
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport seaborn as sns\nimport nltk", "_____no_output_____" ], [ "train = pd.read_csv(\"C:\\\\Users\\\\Moaz\\\\Desktop\\\\moaz\\\\Jupyter Python NB\\\\Machine Hack Practice\\\\Beer Train Data Set.csv\")\ntest = pd.read_csv(\"C:\\\\Users\\\\Moaz\\\\Desktop\\\\moaz\\\\Jupyter Python NB\\\\Machine Hack Practice\\\\Beer Test Data Set.csv\")", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "train.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 185643 entries, 0 to 185642\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ABV 170513 non-null float64\n 1 Brewing Company 185643 non-null int64 \n 2 Food Paring 185643 non-null object \n 3 Glassware Used 185643 non-null object \n 4 Beer Name 185643 non-null int64 \n 5 Ratings 185643 non-null object \n 6 Style Name 185643 non-null object \n 7 Cellar Temperature 178862 non-null object \n 8 Serving Temperature 185450 non-null object \n 9 Score 185643 non-null float64\ndtypes: float64(2), int64(2), object(6)\nmemory usage: 14.2+ MB\n" ], [ "train.isnull().sum()", "_____no_output_____" ], [ "train[[\"Minimum Temperature\", \"Maximum Temperature\"]]=train[\"Cellar Temperature\"].str.split(\"-\", expand=True, n=1).astype(float)", "_____no_output_____" ], [ "train[[\"Minimum Serving Temperature\", \"Maximum Serving Temperature\"]]=train[\"Serving Temperature\"].str.split(\"-\", expand=True, n=1).astype(float)", "_____no_output_____" ], [ "# Filling empty vaues with MEAN value\navg_abv = train[\"ABV\"].astype(\"float\").mean(axis=0)\ntrain[\"ABV\"].replace(np.nan, avg_abv, inplace=True)\n\navg_min_temp = train[\"Minimum Temperature\"].astype(\"float\").mean(axis=0)\ntrain[\"Minimum Temperature\"].replace(np.nan, avg_min_temp, inplace=True)\n\navg_min_temp = train[\"Maximum Temperature\"].astype(\"float\").mean(axis=0)\ntrain[\"Maximum Temperature\"].replace(np.nan, avg_min_temp, inplace=True)\n\navg_minserv_temp = train[\"Minimum Serving Temperature\"].astype(\"float\").mean(axis=0)\ntrain[\"Minimum Serving Temperature\"].replace(np.nan, avg_minserv_temp, inplace=True)\n\navg_minserv_temp = train[\"Maximum Serving Temperature\"].astype(\"float\").mean(axis=0)\ntrain[\"Maximum Serving Temperature\"].replace(np.nan, avg_minserv_temp, inplace=True)", "_____no_output_____" ], [ "train.isnull().sum()", "_____no_output_____" ], [ "freq = nltk.FreqDist(train['Food Paring'])\nfor key,value in freq.items():\n print(str(key)+' : '+str(value))", "(Curried,Thai)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Poultry,Fish,Shellfish,Salmon) : 25577\n(PanAsian)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan,tangyBrick,Edam,Feta)General(Salad)Meat(Poultry) : 12648\nMeat(Pork,Poultry) : 1280\n(Indian,LatinAmerican,PanAsian)General(Aperitif) : 247\nMeat(Poultry,Fish,Shellfish) : 2444\n(Italian,German)Cheese(nuttyAsiago,Colby,Parmesan)Meat(Fish,Shellfish,Salmon) : 816\nCheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan)Meat(Pork,GrilledMeat) : 1301\nCheese(butteryBrie,Gouda,Havarti,Swiss)General(Chocolate,Digestive)Meat(Beef,SmokedMeat,Game,GrilledMeat) : 5125\n(Barbecue,Indian,LatinAmerican,Thai,PanAsian)Cheese(pepperyMontereyPepperJack)Meat(Shellfish) : 1064\n(Barbecue)Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Beef,GrilledMeat) : 1229\n(Curried,Thai)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan,pungentGorgonzola,Limburger)General(Salad,Aperitif)Meat(Poultry,Fish,Shellfish) : 8982\n(Salad) : 4088\n(Barbecue,Curried,Indian,LatinAmerican,Italian,Thai,Chinese,Japanese,PanAsian,Mediterranean,MiddleEastern) : 731\nCheese(tangyBrick,Edam,Feta) : 2164\nCheese(sharpBlue,Cheddar)Meat(Beef,Poultry,Fish) : 5924\n(Barbecue,German)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Beef,SmokedMeat,Game,GrilledMeat,Salmon) : 394\nCheese(butteryBrie,Gouda,Havarti,Swiss)Meat(SmokedMeat,Salmon) : 1379\nCheese(pepperyMontereyPepperJack,pungentGorgonzola,Limburger)General(Salad) : 5523\n(German)General(Chocolate,Dessert)Meat(GrilledMeat) : 517\n(Curried,Indian)Cheese(nuttyAsiago,Colby,Parmesan,sharpBlue,Cheddar)Meat(Shellfish) : 1451\n(Barbecue,LatinAmerican)Cheese(earthyCamembert,Fontina)General(Chocolate,Dessert)Meat(Beef,Shellfish,SmokedMeat,GrilledMeat) : 1364\n(German) : 3979\n(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Beef,SmokedMeat,Game,GrilledMeat,Salmon) : 725\nCheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Digestive) : 682\n(Barbecue,Italian)Cheese(earthyCamembert,Fontina)Meat(Pork,Poultry,Fish,Shellfish) : 2160\n(Curried)Cheese(nuttyAsiago,Colby,Parmesan,pepperyMontereyPepperJack)Meat(Poultry,Fish) : 331\n(German)Cheese(tangyBrick,Edam,Feta)General(Salad)Meat(Poultry,Fish,Shellfish) : 3670\nCheese(butteryBrie,Gouda,Havarti,Swiss,pungentGorgonzola,Limburger)General(Chocolate)Meat(Beef) : 1283\n(Barbecue)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Chocolate,Dessert)Meat(Beef,SmokedMeat,GrilledMeat) : 11992\n(Thai)Cheese(tangyBrick,Edam,Feta)General(Salad,Aperitif)Meat(Fish) : 2909\n(LatinAmerican,German)Meat(Pork,Poultry) : 918\n(Barbecue)Cheese(butteryBrie,Gouda,Havarti,Swiss,earthyCamembert,Fontina)General(Chocolate,Dessert)Meat(Beef,Shellfish,SmokedMeat,Game,GrilledMeat) : 4911\nCheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Digestive)Meat(Beef,SmokedMeat,Game) : 774\n(Dessert,Aperitif,Digestive) : 1578\n(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Game,GrilledMeat,Salmon) : 10147\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Salad,Aperitif) : 2376\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,earthyCamembert,Fontina)General(Chocolate)Meat(Game) : 977\n(Aperitif,Digestive)Meat(Game,Salmon) : 1419\n(Barbecue)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan)General(Chocolate)Meat(Beef) : 3958\n(Indian,MiddleEastern)Cheese(nuttyAsiago,Colby,Parmesan,tangyBrick,Edam,Feta)General(Salad,Aperitif)Meat(Fish,Shellfish) : 628\n(German)Cheese(earthyCamembert,Fontina)General(Chocolate)Meat(Game) : 944\nCheese(pepperyMontereyPepperJack,tangyBrick,Edam,Feta)General(Salad)Meat(Poultry,Fish,Shellfish) : 3265\n(Barbecue,LatinAmerican)General(Chocolate)Meat(SmokedMeat,GrilledMeat) : 1161\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Salad)Meat(Pork,Fish,Shellfish) : 1110\n(Dessert)Meat(Poultry) : 1381\n(German)General(Salad)Meat(Fish) : 1681\nNone,yet : 5417\n(Mediterranean)Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Pork,Poultry) : 1987\n(Barbecue,Curried,Indian,LatinAmerican,Chinese)Cheese(sharpBlue,Cheddar)General(Aperitif,Digestive)Meat(Shellfish,Game) : 833\n(German)Cheese(pepperyMontereyPepperJack)General(Salad)Meat(Pork) : 1438\n(Italian,MiddleEastern)Cheese(pepperyMontereyPepperJack)General(Salad)Meat(Fish) : 4234\n(Japanese,German)Cheese(pepperyMontereyPepperJack)General(Aperitif)Meat(Poultry,Fish) : 2955\n(LatinAmerican,German)Meat(Beef,SmokedMeat,GrilledMeat) : 590\n(Chocolate,Salad,Dessert,Aperitif) : 396\n(Barbecue)Cheese(earthyCamembert,Fontina)Meat(Beef,SmokedMeat,Game,GrilledMeat) : 788\n(German)General(Salad)Meat(Pork,Fish,Shellfish) : 1888\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Aperitif)Meat(Pork,Poultry,Fish,Shellfish) : 431\nCheese(earthyCamembert,Fontina,sharpBlue,Cheddar)Meat(GrilledMeat) : 328\n(Curried,Indian,Thai,Chinese,Japanese,PanAsian)Cheese(sharpBlue,Cheddar) : 1778\nCheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Dessert,Digestive) : 1322\n(LatinAmerican)Meat(Beef,Poultry) : 608\n(German)Meat(SmokedMeat,Game,GrilledMeat) : 838\nCheese(nuttyAsiago,Colby,Parmesan)General(Digestive) : 910\n(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,tangyBrick,Edam,Feta)General(Salad)Meat(Pork,Poultry,Fish,Shellfish) : 507\n(Indian,Mediterranean,MiddleEastern)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Fish,Shellfish) : 2176\nCheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger,tangyBrick,Edam,Feta)General(Salad) : 1175\n(German)Cheese(earthyCamembert,Fontina)Meat(SmokedMeat,Game,GrilledMeat) : 864\nCheese(earthyCamembert,Fontina)General(Chocolate)Meat(GrilledMeat) : 266\nCheese(sharpBlue,Cheddar,tangyBrick,Edam,Feta)General(Aperitif,Digestive) : 68\nCheese(pepperyMontereyPepperJack)General(Chocolate)Meat(GrilledMeat) : 713\n(Curried,German)Cheese(nuttyAsiago,Colby,Parmesan)General(Digestive)Meat(Salmon) : 917\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Dessert,Digestive) : 75\nCheese(earthyCamembert,Fontina)General(Aperitif) : 660\n(German)General(Salad)Meat(Poultry,Fish) : 161\n(Japanese) : 58\n(German)Cheese(sharpBlue,Cheddar)General(Salad)Meat(Pork) : 195\nCheese(pungentGorgonzola,Limburger,tangyBrick,Edam,Feta)General(Digestive)Meat(Shellfish,Game,GrilledMeat) : 219\n(Barbecue,LatinAmerican)Cheese(pepperyMontereyPepperJack)Meat(Fish,SmokedMeat) : 365\n(Salad)Meat(Poultry,Game) : 240\n(Curried,Thai,PanAsian)Cheese(sharpBlue,Cheddar)Meat(Game,GrilledMeat) : 334\nCheese(butteryBrie,Gouda,Havarti,Swiss,pungentGorgonzola,Limburger)General(Dessert,Digestive)Meat(Game) : 167\nCheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar,pungentGorgonzola,Limburger) : 104\n(Aperitif)Meat(Fish,Shellfish,Salmon) : 51\n(Thai,Chinese,Japanese,PanAsian)Meat(Pork,Poultry,Fish,Shellfish) : 89\n(Barbecue,LatinAmerican)Cheese(nuttyAsiago,Colby,Parmesan)General(Chocolate)Meat(Salmon) : 179\nCheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Aperitif,Digestive) : 93\n(Dessert,Aperitif) : 18\n(Chocolate,Salad,Dessert,Apritif) : 1\n" ], [ "train['Food Paring'] = train['Food Paring'].replace(\"(Curried,Thai)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Poultry,Fish,Shellfish,Salmon)\" , \"Thai, Cheese, Meat\" )\ntrain['Food Paring'] = train['Food Paring'].replace(\"(PanAsian)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan,tangyBrick,Edam,Feta)General(Salad)Meat(Poultry)\" , \"Pan-Asian, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Meat(Pork,Poultry)\" , \"Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Indian,LatinAmerican,PanAsian)General(Aperitif)\" , \"Indian, Latin-American, Pan-Asian, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Meat(Poultry,Fish,Shellfish)\" , \"Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Italian,German)Cheese(nuttyAsiago,Colby,Parmesan)Meat(Fish,Shellfish,Salmon)\" , \"Italian, German, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan)Meat(Pork,GrilledMeat)\" , \"Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Chocolate,Digestive)Meat(Beef,SmokedMeat,Game,GrilledMeat)\" , \"Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,Indian,LatinAmerican,Thai,PanAsian)Cheese(pepperyMontereyPepperJack)Meat(Shellfish)\" , \"Barbecue, Indian, Latin-American, Thai, Pan-Asian, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue)Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Beef,GrilledMeat)\" , \"Barbecue, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Curried,Thai)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan,pungentGorgonzola,Limburger)General(Salad,Aperitif)Meat(Poultry,Fish,Shellfish)\" , \"Thai, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Salad)\" , \"General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,Curried,Indian,LatinAmerican,Italian,Thai,Chinese,Japanese,PanAsian,Mediterranean,MiddleEastern)\" , \"Barbecue, Indian, Latin-American, Italian, Thai, Japanese, Pan-Asian, Mediterranean, Middle-East\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(tangyBrick,Edam,Feta)\" , \"Cheese\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar)Meat(Beef,Poultry,Fish)\" , \"Cheese\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,German)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Beef,SmokedMeat,Game,GrilledMeat,Salmon)\" , \"Barbecue, German, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss)Meat(SmokedMeat,Salmon)\" , \"Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(pepperyMontereyPepperJack,pungentGorgonzola,Limburger)General(Salad)\" , \"Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)General(Chocolate,Dessert)Meat(GrilledMeat)\" , \"German, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Curried,Indian)Cheese(nuttyAsiago,Colby,Parmesan,sharpBlue,Cheddar)Meat(Shellfish)\" , \"Indian, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,LatinAmerican)Cheese(earthyCamembert,Fontina)General(Chocolate,Dessert)Meat(Beef,Shellfish,SmokedMeat,GrilledMeat)\" , \"Barbecue, Latin-American, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)\" , \"German\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Beef,SmokedMeat,Game,GrilledMeat,Salmon)\" , \"Barbecue, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Digestive)\" , \"Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,Italian)Cheese(earthyCamembert,Fontina)Meat(Pork,Poultry,Fish,Shellfish)\" , \"Barbecue, Italian, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Curried)Cheese(nuttyAsiago,Colby,Parmesan,pepperyMontereyPepperJack)Meat(Poultry,Fish)\" , \"Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(tangyBrick,Edam,Feta)General(Salad)Meat(Poultry,Fish,Shellfish)\" , \"German, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss,pungentGorgonzola,Limburger)General(Chocolate)Meat(Beef)\" , \"Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Chocolate,Dessert)Meat(Beef,SmokedMeat,GrilledMeat)\" , \"Barbecue, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Thai)Cheese(tangyBrick,Edam,Feta)General(Salad,Aperitif)Meat(Fish)\" , \"Thai, Cheese, General Food, Meat\")\n\n", "_____no_output_____" ], [ "train['Food Paring'] = train['Food Paring'].replace(\"(LatinAmerican,German)Meat(Pork,Poultry)\" , \"Latin-American, German, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue)Cheese(butteryBrie,Gouda,Havarti,Swiss,earthyCamembert,Fontina)General(Chocolate,Dessert)Meat(Beef,Shellfish,SmokedMeat,Game,GrilledMeat)\" , \"Barbecue, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Digestive)Meat(Beef,SmokedMeat,Game)\" , \"Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Dessert,Aperitif,Digestive)\" , \"Dessert\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Game,GrilledMeat,Salmon)\" , \"Barbecue, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Salad,Aperitif)\" , \"German, Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,earthyCamembert,Fontina)General(Chocolate)Meat(Game)\" , \"German, Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Aperitif,Digestive)Meat(Game,Salmon)\" , \"Dessert, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan)General(Chocolate)Meat(Beef)\" , \"Barbecue, Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Indian,MiddleEastern)Cheese(nuttyAsiago,Colby,Parmesan,tangyBrick,Edam,Feta)General(Salad,Aperitif)Meat(Fish,Shellfish)\" , \"Indian, Middle-East, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(earthyCamembert,Fontina)General(Chocolate)Meat(Game)\" , \"German, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(pepperyMontereyPepperJack,tangyBrick,Edam,Feta)General(Salad)Meat(Poultry,Fish,Shellfish)\" , \"Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,LatinAmerican)General(Chocolate)Meat(SmokedMeat,GrilledMeat)\" , \"Barbecue, Latin-American, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Salad)Meat(Pork,Fish,Shellfish)\" , \"German, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Dessert)Meat(Poultry)\" , \"Dessert, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)General(Salad)Meat(Fish)\" , \"German, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"None,yet\" , \"None yet\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Mediterranean)Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Pork,Poultry)\" , \"Mediterranean, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,Curried,Indian,LatinAmerican,Chinese)Cheese(sharpBlue,Cheddar)General(Aperitif,Digestive)Meat(Shellfish,Game)\" , \"Barbecue, Indian, Latin-American, Chinese, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(pepperyMontereyPepperJack)General(Salad)Meat(Pork)\" , \"German, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Italian,MiddleEastern)Cheese(pepperyMontereyPepperJack)General(Salad)Meat(Fish)\" , \"Italian, Middle-East, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Japanese,German)Cheese(pepperyMontereyPepperJack)General(Aperitif)Meat(Poultry,Fish)\" , \"Japanese, German, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(LatinAmerican,German)Meat(Beef,SmokedMeat,GrilledMeat)\" , \"Latin-American, German, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Chocolate,Salad,Dessert,Aperitif)\" , \"Dessert\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue)Cheese(earthyCamembert,Fontina)Meat(Beef,SmokedMeat,Game,GrilledMeat)\" , \"Barbecue, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)General(Salad)Meat(Pork,Fish,Shellfish)\" , \"German, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Aperitif)Meat(Pork,Poultry,Fish,Shellfish)\" , \"German, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(earthyCamembert,Fontina,sharpBlue,Cheddar)Meat(GrilledMeat)\" , \"Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Curried,Indian,Thai,Chinese,Japanese,PanAsian)Cheese(sharpBlue,Cheddar)\" , \"Indian, Thai, Chinese, Japanese, Pan-Aisan, Cheese\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Dessert,Digestive)\" , \"Cheese, General Food\")\n\n", "_____no_output_____" ], [ "train['Food Paring'] = train['Food Paring'].replace(\"(LatinAmerican)Meat(Beef,Poultry)\" , \"Latin-American, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Meat(SmokedMeat,Game,GrilledMeat)\" , \"German, Meat \")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(nuttyAsiago,Colby,Parmesan)General(Digestive)\" , \"Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,tangyBrick,Edam,Feta)General(Salad)Meat(Pork,Poultry,Fish,Shellfish)\" , \"Barbecue, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Indian,Mediterranean,MiddleEastern)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Fish,Shellfish)\" , \"Indian, Mediterranean, Middle-East, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger,tangyBrick,Edam,Feta)General(Salad)\" , \"Cheese, General\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(earthyCamembert,Fontina)Meat(SmokedMeat,Game,GrilledMeat)\" , \"German, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(earthyCamembert,Fontina)General(Chocolate)Meat(GrilledMeat)\" , \"Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,tangyBrick,Edam,Feta)General(Aperitif,Digestive)\" , \"Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(pepperyMontereyPepperJack)General(Chocolate)Meat(GrilledMeat)\" , \"Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Curried,German)Cheese(nuttyAsiago,Colby,Parmesan)General(Digestive)Meat(Salmon)\" , \"German, Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Dessert,Digestive)\" , \"German, Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(earthyCamembert,Fontina)General(Aperitif)\" , \"Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)General(Salad)Meat(Poultry,Fish)\" , \"German, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Japanese)\" , \"Japanese\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(German)Cheese(sharpBlue,Cheddar)General(Salad)Meat(Pork)\" , \"German, Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(pungentGorgonzola,Limburger,tangyBrick,Edam,Feta)General(Digestive)Meat(Shellfish,Game,GrilledMeat)\" , \"Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,LatinAmerican)Cheese(pepperyMontereyPepperJack)Meat(Fish,SmokedMeat)\" , \"Barbecue, Latin-American, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Salad)Meat(Poultry,Game)\" , \"General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Curried,Thai,PanAsian)Cheese(sharpBlue,Cheddar)Meat(Game,GrilledMeat)\" , \"Thai, Cheese, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss,pungentGorgonzola,Limburger)General(Dessert,Digestive)Meat(Game)\" , \"Cheese, General Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar,pungentGorgonzola,Limburger)\" , \"Cheese\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Aperitif)Meat(Fish,Shellfish,Salmon)\" , \"Dessert, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Thai,Chinese,Japanese,PanAsian)Meat(Pork,Poultry,Fish,Shellfish)\" , \"Thai, Chinese, Japanese, Pan-Asian, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Barbecue,LatinAmerican)Cheese(nuttyAsiago,Colby,Parmesan)General(Chocolate)Meat(Salmon)\" , \"Barbecue, Latin-American, Geberal Food, Meat\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Aperitif,Digestive)\" , \"Cheese, General Food\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Dessert,Aperitif)\" , \"Dessert\")\ntrain['Food Paring'] = train['Food Paring'].replace(\"(Chocolate,Salad,Dessert,Apritif)\" , \"Dessert\")\n", "_____no_output_____" ], [ "train['Food Paring'].nunique()", "_____no_output_____" ], [ "freq = nltk.FreqDist(train['Glassware Used'])\nfor key,value in freq.items():\n print(str(key)+' : '+str(value))", "PintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein) : 91275\nPintGlass(orBecker,Nonic,Tumbler),PilsenerGlass(orPokal),Mug(orSeidel,Stein) : 5217\nPilsenerGlass(orPokal) : 5758\nFlute,PilsenerGlass(orPokal),Mug(orSeidel,Stein) : 4689\nPintGlass(orBecker,Nonic,Tumbler),Snifter,OversizedWineGlass : 5807\nPintGlass(orBecker,Nonic,Tumbler),PilsenerGlass(orPokal) : 1560\nSnifter,Tulip,Goblet(orChalice),OversizedWineGlass : 1229\nPintGlass(orBecker,Nonic,Tumbler),Tulip,OversizedWineGlass : 8982\nMug(orSeidel,Stein),Stange(SlenderCylinder) : 394\nPintGlass(orBecker,Nonic,Tumbler),Snifter,Tulip : 1379\nFlute,Tulip,OversizedWineGlass : 5523\nFlute,WeizenGlass : 517\nFlute,PilsenerGlass(orPokal) : 4254\nPintGlass(orBecker,Nonic,Tumbler) : 3888\nWeizenGlass : 4748\nGoblet(orChalice) : 1283\nSnifter,Tulip,OversizedWineGlass : 15135\nSnifter,Tulip,Goblet(orChalice) : 774\nMug(orSeidel,Stein) : 918\nPintGlass(orBecker,Nonic,Tumbler),Goblet(orChalice) : 2376\nPilsenerGlass(orPokal),Mug(orSeidel,Stein) : 977\nTulip,OversizedWineGlass : 628\nFlute,PilsenerGlass(orPokal),Mug(orSeidel,Stein),Stange(SlenderCylinder) : 2722\nStange(SlenderCylinder),WeizenGlass : 1681\nSnifter,Goblet(orChalice) : 1987\nPintGlass(orBecker,Nonic,Tumbler),Stange(SlenderCylinder) : 1438\nFlute,Snifter,Tulip,Stange(SlenderCylinder) : 501\nStange(SlenderCylinder) : 2752\nPintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein),OversizedWineGlass : 2255\nFlute,Snifter,Tulip : 594\nPintGlass(orBecker,Nonic,Tumbler),Snifter : 1322\nPintGlass(orBecker,Nonic,Tumbler),Snifter,Mug(orSeidel,Stein) : 910\nTulip,Goblet(orChalice),OversizedWineGlass : 1175\nNone,yet : 193\nFlute,Snifter,OversizedWineGlass : 75\nSnifter,OversizedWineGlass : 386\nFlute : 51\nPintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein),WeizenGlass : 179\nFlute,Stange(SlenderCylinder) : 111\n" ], [ "train['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein)','Pint Glass, Mug')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),PilsenerGlass(orPokal),Mug(orSeidel,Stein)','Pint Glass, Pilsener Glass, Mug')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PilsenerGlass(orPokal)','Pilsener Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,PilsenerGlass(orPokal),Mug(orSeidel,Stein)','Flute, Pint Glass, Mug')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Snifter,OversizedWineGlass','Pint Glass, Snifter, Over-sized Wine Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),PilsenerGlass(orPokal)','Pint Glass, Pilsener Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Snifter,Tulip,Goblet(orChalice),OversizedWineGlass','Snifter, Tulip, Goblet, Over-sized Wine Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Tulip,OversizedWineGlass','Pint Glass, Tulip, Over-sized Wine Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Mug(orSeidel,Stein),Stange(SlenderCylinder)','Mug, Stange')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Snifter,Tulip','Pint Glass, Nonic, Tumbler')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,Tulip,OversizedWineGlass','Flute, Tulip, Over-sized Wine Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,WeizenGlass','Flute, Weizen Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,PilsenerGlass(orPokal)','Flute, Pilsener Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler)','Pint Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('WeizenGlass','Weizen Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Goblet(orChalice)','Goblet')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Snifter,Tulip,OversizedWineGlass','Snifter, Tulip, Over-sized Wine Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Snifter,Tulip,Goblet(orChalice)','Snifter, Tulip, Goblet')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Mug(orSeidel,Stein)','Mug')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Goblet(orChalice)','Pint Glass, Goblet')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PilsenerGlass(orPokal),Mug(orSeidel,Stein)','Pilsener Glass, Mug')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Tulip,OversizedWineGlass','Tulip, Over-sized Wine Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,PilsenerGlass(orPokal),Mug(orSeidel,Stein),Stange(SlenderCylinder)','Flute, Mug, Stange')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Stange(SlenderCylinder),WeizenGlass','Stange, Weizen Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Snifter,Goblet(orChalice)','Snifter, Goblet')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Stange(SlenderCylinder)','Pint Glass, Stange')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,Snifter,Tulip,Stange(SlenderCylinder)','Flute, Snifter, Tulip, Stange ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Stange(SlenderCylinder)','Stange ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein),OversizedWineGlass','Pint Glass, Mug, Over-sized Wine Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,Snifter,Tulip','Flute, Snifter, Tulip ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Snifter','Pint Glass, Snifter ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Snifter,Mug(orSeidel,Stein)','Pint Glass, Snifter, Mug ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Tulip,Goblet(orChalice),OversizedWineGlass','Tulip, Over-sized Wine Glass')\ntrain['Glassware Used'] = train['Glassware Used'].replace('None,yet','None yet ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,Snifter,OversizedWineGlass','Flute, Snifter, Over-sized Wine Glass ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Snifter,OversizedWineGlass','Snifter, Over-sized Wine Glass ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute','Flute ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('PintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein),WeizenGlass','Pint Glass, Mug, Weizen Glass ')\ntrain['Glassware Used'] = train['Glassware Used'].replace('Flute,Stange(SlenderCylinder)','Flute, Stange ')\n\n", "_____no_output_____" ], [ "train['Glassware Used'].nunique()", "_____no_output_____" ], [ "train['Style Name'].nunique()", "_____no_output_____" ], [ "from sklearn.preprocessing import LabelEncoder\nlabel_encoder = LabelEncoder()", "_____no_output_____" ], [ "train['Food Paring label']= label_encoder.fit_transform(train['Food Paring']) \ntrain['Glassware Used label']= label_encoder.fit_transform(train['Glassware Used']) \ntrain['Style Name label']= label_encoder.fit_transform(train['Style Name']) ", "_____no_output_____" ], [ "train['Ratings'] = pd.to_numeric(train['Ratings'],errors='coerce')\ntrain['Beer Name'] = train['Beer Name'].astype(float)\ntrain['Brewing Company'] = train['Brewing Company'].astype(float)", "_____no_output_____" ], [ "train.head()", "_____no_output_____" ], [ "train.dtypes", "_____no_output_____" ], [ "train1 = train[['ABV', 'Ratings', 'Minimum Temperature','Maximum Temperature','Minimum Serving Temperature','Maximum Serving Temperature', 'Food Paring label', 'Glassware Used label', 'Style Name label', 'Score']]\n", "_____no_output_____" ], [ "train1.isnull().sum()", "_____no_output_____" ], [ "# Replace empty values by mean rating values\navg_rating = train1[\"Ratings\"].astype(\"float\").mean(axis=0)\ntrain1[\"Ratings\"].replace(np.nan, avg_rating, inplace=True)", "_____no_output_____" ], [ "train1.isnull().sum()", "_____no_output_____" ], [ "sns.set(style=\"ticks\", color_codes=True)\nsns.pairplot(train1)", "_____no_output_____" ], [ "#A simple correlation plot usong seaborn. The below plot shows how the different variables correlate with each other\n\ncorr = train1.corr()\nfig, ax = plt.subplots(figsize=(10,10))\nax = sns.heatmap(\n corr, \n vmin=-1, vmax=1 , center=2,\n square=True,\n annot=True,\n linewidths=.5,\n cmap=\"YlGnBu\" )\n\n#Rotating labels on x axis\nax.set_xticklabels(\n ax.get_xticklabels(),\n rotation=55,\n horizontalalignment='right'\n)", "_____no_output_____" ] ], [ [ "## TEST SET", "_____no_output_____" ] ], [ [ "test.head()", "_____no_output_____" ], [ "test.isnull().sum()", "_____no_output_____" ], [ "test[[\"Minimum Temperature\", \"Maximum Temperature\"]] = test[\"Cellar Temperature\"].str.split(\"-\", expand=True, n=1).astype(float)\ntest[[\"Minimum Serving Temperature\", \"Maximum Serving Temperature\"]] = test[\"Serving Temperature\"].str.split(\"-\", expand=True, n=1).astype(float)", "_____no_output_____" ], [ "avg_abv1 = test[\"ABV\"].astype(\"float\").mean(axis=0)\ntest[\"ABV\"].replace(np.nan, avg_abv1, inplace=True)\n\navg_min_temp1 = test[\"Minimum Temperature\"].astype(\"float\").mean(axis=0)\ntest[\"Minimum Temperature\"].replace(np.nan, avg_min_temp1, inplace=True)\n\navg_max_temp1 = test[\"Maximum Temperature\"].astype(\"float\").mean(axis=0)\ntest[\"Maximum Temperature\"].replace(np.nan, avg_max_temp1, inplace=True)\n\navg_minserv_temp1 = test[\"Minimum Serving Temperature\"].astype(\"float\").mean(axis=0)\ntest[\"Minimum Serving Temperature\"].replace(np.nan, avg_minserv_temp1, inplace=True)\n\navg_maxserv_temp1 = test[\"Maximum Serving Temperature\"].astype(\"float\").mean(axis=0)\ntest[\"Maximum Serving Temperature\"].replace(np.nan, avg_maxserv_temp1, inplace=True)", "_____no_output_____" ], [ "freq = nltk.FreqDist(test['Food Paring'])\nfor key,value in freq.items():\n print(str(key)+' : '+str(value))", "(Curried,Thai)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Poultry,Fish,Shellfish,Salmon) : 2842\n(Barbecue)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Chocolate,Dessert)Meat(Beef,SmokedMeat,GrilledMeat) : 1332\nCheese(earthyCamembert,Fontina)General(Aperitif) : 73\n(LatinAmerican,German)Meat(Pork,Poultry) : 102\n(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Game,GrilledMeat,Salmon) : 1127\nCheese(butteryBrie,Gouda,Havarti,Swiss)General(Chocolate,Digestive)Meat(Beef,SmokedMeat,Game,GrilledMeat) : 570\nMeat(Poultry,Fish,Shellfish) : 272\nCheese(pepperyMontereyPepperJack,pungentGorgonzola,Limburger)General(Salad) : 614\n(Dessert)Meat(Poultry) : 154\n(Curried,Thai)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan,pungentGorgonzola,Limburger)General(Salad,Aperitif)Meat(Poultry,Fish,Shellfish) : 998\n(Curried,Indian,Thai,Chinese,Japanese,PanAsian)Cheese(sharpBlue,Cheddar) : 198\n(PanAsian)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan,tangyBrick,Edam,Feta)General(Salad)Meat(Poultry) : 1405\n(Indian,Mediterranean,MiddleEastern)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Fish,Shellfish) : 242\nCheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Digestive) : 76\nCheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Dessert,Digestive) : 147\nCheese(sharpBlue,Cheddar)Meat(Beef,Poultry,Fish) : 658\n(Salad) : 454\n(Italian,German)Cheese(nuttyAsiago,Colby,Parmesan)Meat(Fish,Shellfish,Salmon) : 91\nCheese(pepperyMontereyPepperJack,tangyBrick,Edam,Feta)General(Salad)Meat(Poultry,Fish,Shellfish) : 363\n(German) : 442\n(Thai)Cheese(tangyBrick,Edam,Feta)General(Salad,Aperitif)Meat(Fish) : 323\n(Curried,Indian)Cheese(nuttyAsiago,Colby,Parmesan,sharpBlue,Cheddar)Meat(Shellfish) : 161\n(Barbecue)Cheese(butteryBrie,Gouda,Havarti,Swiss,earthyCamembert,Fontina)General(Chocolate,Dessert)Meat(Beef,Shellfish,SmokedMeat,Game,GrilledMeat) : 546\nCheese(pepperyMontereyPepperJack)General(Chocolate)Meat(GrilledMeat) : 79\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,earthyCamembert,Fontina)General(Chocolate)Meat(Game) : 109\n(German)Cheese(earthyCamembert,Fontina)Meat(SmokedMeat,Game,GrilledMeat) : 96\n(Dessert,Aperitif,Digestive) : 175\n(Indian,LatinAmerican,PanAsian)General(Aperitif) : 27\nCheese(butteryBrie,Gouda,Havarti,Swiss,pungentGorgonzola,Limburger)General(Chocolate)Meat(Beef) : 142\nCheese(nuttyAsiago,Colby,Parmesan)General(Digestive) : 101\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Salad,Aperitif) : 264\n(German)Cheese(tangyBrick,Edam,Feta)General(Salad)Meat(Poultry,Fish,Shellfish) : 408\n(Indian,MiddleEastern)Cheese(nuttyAsiago,Colby,Parmesan,tangyBrick,Edam,Feta)General(Salad,Aperitif)Meat(Fish,Shellfish) : 70\n(German)Cheese(earthyCamembert,Fontina)General(Chocolate)Meat(Game) : 105\n(Italian,MiddleEastern)Cheese(pepperyMontereyPepperJack)General(Salad)Meat(Fish) : 471\n(German)General(Salad)Meat(Pork,Fish,Shellfish) : 210\n(Barbecue)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan)General(Chocolate)Meat(Beef) : 440\n(Aperitif,Digestive)Meat(Game,Salmon) : 158\n(Barbecue,Italian)Cheese(earthyCamembert,Fontina)Meat(Pork,Poultry,Fish,Shellfish) : 240\n(Barbecue,Curried,Indian,LatinAmerican,Italian,Thai,Chinese,Japanese,PanAsian,Mediterranean,MiddleEastern) : 81\n(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,tangyBrick,Edam,Feta)General(Salad)Meat(Pork,Poultry,Fish,Shellfish) : 56\n(German)Meat(SmokedMeat,Game,GrilledMeat) : 93\n(Japanese,German)Cheese(pepperyMontereyPepperJack)General(Aperitif)Meat(Poultry,Fish) : 328\n(Barbecue)Cheese(earthyCamembert,Fontina)Meat(Beef,SmokedMeat,Game,GrilledMeat) : 88\nCheese(butteryBrie,Gouda,Havarti,Swiss)Meat(SmokedMeat,Salmon) : 153\n(Mediterranean)Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Pork,Poultry) : 221\n(German)Cheese(pepperyMontereyPepperJack)General(Salad)Meat(Pork) : 160\n(Barbecue,Curried,Indian,LatinAmerican,Chinese)Cheese(sharpBlue,Cheddar)General(Aperitif,Digestive)Meat(Shellfish,Game) : 93\n(Curried,Thai,PanAsian)Cheese(sharpBlue,Cheddar)Meat(Game,GrilledMeat) : 37\nNone,yet : 602\nCheese(tangyBrick,Edam,Feta) : 241\n(Barbecue,LatinAmerican)Cheese(earthyCamembert,Fontina)General(Chocolate,Dessert)Meat(Beef,Shellfish,SmokedMeat,GrilledMeat) : 152\nCheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger,tangyBrick,Edam,Feta)General(Salad) : 130\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Salad)Meat(Pork,Fish,Shellfish) : 123\n(Barbecue,Indian,LatinAmerican,Thai,PanAsian)Cheese(pepperyMontereyPepperJack)Meat(Shellfish) : 118\n(German)General(Salad)Meat(Fish) : 187\nCheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan)Meat(Pork,GrilledMeat) : 144\n(LatinAmerican)Meat(Beef,Poultry) : 67\n(Curried,German)Cheese(nuttyAsiago,Colby,Parmesan)General(Digestive)Meat(Salmon) : 102\nMeat(Pork,Poultry) : 142\n(Barbecue,LatinAmerican)Cheese(nuttyAsiago,Colby,Parmesan)General(Chocolate)Meat(Salmon) : 20\n(German)General(Chocolate,Dessert)Meat(GrilledMeat) : 57\nCheese(earthyCamembert,Fontina)General(Chocolate)Meat(GrilledMeat) : 29\n(Chocolate,Salad,Dessert,Aperitif) : 44\n(LatinAmerican,German)Meat(Beef,SmokedMeat,GrilledMeat) : 66\n(Barbecue)Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Beef,GrilledMeat) : 137\nCheese(butteryBrie,Gouda,Havarti,Swiss,pungentGorgonzola,Limburger)General(Dessert,Digestive)Meat(Game) : 19\nCheese(sharpBlue,Cheddar,tangyBrick,Edam,Feta)General(Aperitif,Digestive) : 7\nCheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Digestive)Meat(Beef,SmokedMeat,Game) : 86\n(Barbecue,LatinAmerican)General(Chocolate)Meat(SmokedMeat,GrilledMeat) : 129\n(Salad)Meat(Poultry,Game) : 27\n(German)General(Salad)Meat(Poultry,Fish) : 18\nCheese(pungentGorgonzola,Limburger,tangyBrick,Edam,Feta)General(Digestive)Meat(Shellfish,Game,GrilledMeat) : 24\n(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Beef,SmokedMeat,Game,GrilledMeat,Salmon) : 81\n(German)Cheese(sharpBlue,Cheddar)General(Salad)Meat(Pork) : 22\n(Thai,Chinese,Japanese,PanAsian)Meat(Pork,Poultry,Fish,Shellfish) : 10\n(Barbecue,LatinAmerican)Cheese(pepperyMontereyPepperJack)Meat(Fish,SmokedMeat) : 40\nCheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Aperitif,Digestive) : 10\n(Curried)Cheese(nuttyAsiago,Colby,Parmesan,pepperyMontereyPepperJack)Meat(Poultry,Fish) : 37\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Dessert,Digestive) : 8\nCheese(earthyCamembert,Fontina,sharpBlue,Cheddar)Meat(GrilledMeat) : 36\nCheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar,pungentGorgonzola,Limburger) : 12\n(Barbecue,German)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Beef,SmokedMeat,Game,GrilledMeat,Salmon) : 44\n(Dessert,Aperitif) : 2\n(German)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Aperitif)Meat(Pork,Poultry,Fish,Shellfish) : 48\n(Japanese) : 6\n(Aperitif)Meat(Fish,Shellfish,Salmon) : 6\n" ], [ "test['Food Paring'] = test['Food Paring'].replace(\"(Curried,Thai)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Poultry,Fish,Shellfish,Salmon)\" , \"Thai, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Chocolate,Dessert)Meat(Beef,SmokedMeat,GrilledMeat)\" , \"Barbecue, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(earthyCamembert,Fontina)General(Aperitif)\" , \"Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(LatinAmerican,German)Meat(Pork,Poultry)\" , \"Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Game,GrilledMeat,Salmon)\" , \"Barbecue, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Chocolate,Digestive)Meat(Beef,SmokedMeat,Game,GrilledMeat)\" , \"Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Meat(Poultry,Fish,Shellfish)\" , \"Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(pepperyMontereyPepperJack,pungentGorgonzola,Limburger)General(Salad)\" , \"Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Dessert)Meat(Poultry)\" , \"Dessert, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Curried,Thai)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan,pungentGorgonzola,Limburger)General(Salad,Aperitif)Meat(Poultry,Fish,Shellfish)\" , \"Thai, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Curried,Indian,Thai,Chinese,Japanese,PanAsian)Cheese(sharpBlue,Cheddar)\" , \"Indian, Thai, Chinese, Japanese, PanAsian, Cheese\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(PanAsian)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan,tangyBrick,Edam,Feta)General(Salad)Meat(Poultry)\" , \"PanAsian, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Indian,Mediterranean,MiddleEastern)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Fish,Shellfish)\" , \"Indian, Mediterranean, MiddleEastern, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Digestive)\" , \"Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Dessert,Digestive)\" , \"Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar)Meat(Beef,Poultry,Fish)\" , \"Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Salad)\" , \"Salad\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Italian,German)Cheese(nuttyAsiago,Colby,Parmesan)Meat(Fish,Shellfish,Salmon)\" , \"Italian, German, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(pepperyMontereyPepperJack,tangyBrick,Edam,Feta)General(Salad)Meat(Poultry,Fish,Shellfish)\" , \"Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)\" , \"German\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Thai)Cheese(tangyBrick,Edam,Feta)General(Salad,Aperitif)Meat(Fish)\" , \"Thai, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Curried,Indian)Cheese(nuttyAsiago,Colby,Parmesan,sharpBlue,Cheddar)Meat(Shellfish)\" , \"Indian, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue)Cheese(butteryBrie,Gouda,Havarti,Swiss,earthyCamembert,Fontina)General(Chocolate,Dessert)Meat(Beef,Shellfish,SmokedMeat,Game,GrilledMeat)\" , \"Barbecue, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(pepperyMontereyPepperJack)General(Chocolate)Meat(GrilledMeat)\" , \"Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,earthyCamembert,Fontina)General(Chocolate)Meat(Game)\" , \"German, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(earthyCamembert,Fontina)Meat(SmokedMeat,Game,GrilledMeat)\" , \"German, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Dessert,Aperitif,Digestive)\" , \"Dessert\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Indian,LatinAmerican,PanAsian)General(Aperitif)\" , \"Indian, LatinAmerican, PanAsian, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss,pungentGorgonzola,Limburger)General(Chocolate)Meat(Beef)\" , \"Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(nuttyAsiago,Colby,Parmesan)General(Digestive)\" , \"Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Salad,Aperitif)\" , \"German, Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(tangyBrick,Edam,Feta)General(Salad)Meat(Poultry,Fish,Shellfish)\" , \"German, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Indian,MiddleEastern)Cheese(nuttyAsiago,Colby,Parmesan,tangyBrick,Edam,Feta)General(Salad,Aperitif)Meat(Fish,Shellfish)\" , \"Indian, MiddleEastern, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(earthyCamembert,Fontina)General(Chocolate)Meat(Game)\" , \"German, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Italian,MiddleEastern)Cheese(pepperyMontereyPepperJack)General(Salad)Meat(Fish)\" , \"Italian, MiddleEastern, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)General(Salad)Meat(Pork,Fish,Shellfish)\" , \"German, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue)Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan)General(Chocolate)Meat(Beef)\" , \"Barbecue, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Aperitif,Digestive)Meat(Game,Salmon)\" , \"Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,Italian)Cheese(earthyCamembert,Fontina)Meat(Pork,Poultry,Fish,Shellfish)\" , \"Barbecue, Italian, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,Curried,Indian,LatinAmerican,Italian,Thai,Chinese,Japanese,PanAsian,Mediterranean,MiddleEastern)\" , \"Barbecue, Curried, Indian, LatinAmerican, Italian, Thai, Chinese, Japanese, PanAsian, Mediterranean, MiddleEastern\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar,tangyBrick,Edam,Feta)General(Salad)Meat(Pork,Poultry,Fish,Shellfish)\" , \"Barbecue, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Meat(SmokedMeat,Game,GrilledMeat)\" , \"German, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Japanese,German)Cheese(pepperyMontereyPepperJack)General(Aperitif)Meat(Poultry,Fish)\" , \"Japanese, German, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue)Cheese(earthyCamembert,Fontina)Meat(Beef,SmokedMeat,Game,GrilledMeat)\" , \"Barbecue, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss)Meat(SmokedMeat,Salmon)\" , \"Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Mediterranean)Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Pork,Poultry)\" , \"Mediterranean, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(pepperyMontereyPepperJack)General(Salad)Meat(Pork)\" , \"German, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,Curried,Indian,LatinAmerican,Chinese)Cheese(sharpBlue,Cheddar)General(Aperitif,Digestive)Meat(Shellfish,Game)\" , \"Barbecue, Curried, Indian, LatinAmerican, Chinese, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Curried,Thai,PanAsian)Cheese(sharpBlue,Cheddar)Meat(Game,GrilledMeat)\" , \"Curried, Thai, PanAsian, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"None,yet\" , \"None\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(tangyBrick,Edam,Feta)\" , \"Cheese\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,LatinAmerican)Cheese(earthyCamembert,Fontina)General(Chocolate,Dessert)Meat(Beef,Shellfish,SmokedMeat,GrilledMeat)\" , \"Barbecue, LatinAmerican, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger,tangyBrick,Edam,Feta)General(Salad)\" , \"Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Salad)Meat(Pork,Fish,Shellfish)\" , \"German, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,Indian,LatinAmerican,Thai,PanAsian)Cheese(pepperyMontereyPepperJack)Meat(Shellfish)\" , \"Barbecue, Indian, LatinAmerican, Thai, PanAsian, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)General(Salad)Meat(Fish)\" , \"German, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(earthyCamembert,Fontina,nuttyAsiago,Colby,Parmesan)Meat(Pork,GrilledMeat)\" , \"Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(LatinAmerican)Meat(Beef,Poultry)\" , \"LatinAmerican, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Curried,German)Cheese(nuttyAsiago,Colby,Parmesan)General(Digestive)Meat(Salmon)\" , \"German, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Meat(Pork,Poultry)\" , \"Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,LatinAmerican)Cheese(nuttyAsiago,Colby,Parmesan)General(Chocolate)Meat(Salmon)\" , \"Barbecue, LatinAmerican, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)General(Chocolate,Dessert)Meat(GrilledMeat)\" , \"German, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(earthyCamembert,Fontina)General(Chocolate)Meat(GrilledMeat)\" , \"Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Chocolate,Salad,Dessert,Aperitif)\" , \"Dessert\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(LatinAmerican,German)Meat(Beef,SmokedMeat,GrilledMeat)\" , \"LatinAmerican, German, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue)Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)Meat(Beef,GrilledMeat)\" , \"Barbecue, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss,pungentGorgonzola,Limburger)General(Dessert,Digestive)Meat(Game)\" , \"Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,tangyBrick,Edam,Feta)General(Aperitif,Digestive)\" , \"Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Digestive)Meat(Beef,SmokedMeat,Game)\" , \"Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,LatinAmerican)General(Chocolate)Meat(SmokedMeat,GrilledMeat)\" , \"Barbecue, LatinAmerican, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Salad)Meat(Poultry,Game)\" , \"Salad, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)General(Salad)Meat(Poultry,Fish)\" , \"German, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(pungentGorgonzola,Limburger,tangyBrick,Edam,Feta)General(Digestive)Meat(Shellfish,Game,GrilledMeat)\" , \"Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Beef,SmokedMeat,Game,GrilledMeat,Salmon)\" , \"Barbecue, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(sharpBlue,Cheddar)General(Salad)Meat(Pork)\" , \"German, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Thai,Chinese,Japanese,PanAsian)Meat(Pork,Poultry,Fish,Shellfish)\" , \"Thai, Chinese, Japanese, PanAsian, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,LatinAmerican)Cheese(pepperyMontereyPepperJack)Meat(Fish,SmokedMeat)\" , \"Barbecue, LatinAmerican, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(sharpBlue,Cheddar,pungentGorgonzola,Limburger)General(Aperitif,Digestive)\" , \"Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Curried)Cheese(nuttyAsiago,Colby,Parmesan,pepperyMontereyPepperJack)Meat(Poultry,Fish)\" , \"Curried, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar)General(Dessert,Digestive)\" , \"German, Cheese, General Food\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(earthyCamembert,Fontina,sharpBlue,Cheddar)Meat(GrilledMeat)\" , \"Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"Cheese(butteryBrie,Gouda,Havarti,Swiss,sharpBlue,Cheddar,pungentGorgonzola,Limburger)\" , \"Cheese\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Barbecue,German)Cheese(pepperyMontereyPepperJack,sharpBlue,Cheddar)Meat(Beef,SmokedMeat,Game,GrilledMeat,Salmon)\" , \"Barbecue, German, Cheese, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Dessert,Aperitif)\" , \"Dessert\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(German)Cheese(butteryBrie,Gouda,Havarti,Swiss)General(Aperitif)Meat(Pork,Poultry,Fish,Shellfish)\" , \"German, Cheese, General Food, Meat\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Japanese)\" , \"Japanese\")\ntest['Food Paring'] = test['Food Paring'].replace(\"(Aperitif)Meat(Fish,Shellfish,Salmon)\" , \"Meat\")", "_____no_output_____" ], [ "test['Food Paring'].nunique()", "_____no_output_____" ], [ "freq = nltk.FreqDist(test['Glassware Used'])\nfor key,value in freq.items():\n print(str(key)+' : '+str(value))", "PintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein) : 10141\nSnifter,Tulip,OversizedWineGlass : 1681\nFlute,PilsenerGlass(orPokal),Mug(orSeidel,Stein) : 521\nPintGlass(orBecker,Nonic,Tumbler),Snifter,OversizedWineGlass : 646\nPilsenerGlass(orPokal) : 640\nFlute,Tulip,OversizedWineGlass : 614\nPintGlass(orBecker,Nonic,Tumbler) : 433\nPintGlass(orBecker,Nonic,Tumbler),Tulip,OversizedWineGlass : 998\nFlute,PilsenerGlass(orPokal),Mug(orSeidel,Stein),Stange(SlenderCylinder) : 303\nPintGlass(orBecker,Nonic,Tumbler),Snifter : 147\nPintGlass(orBecker,Nonic,Tumbler),PilsenerGlass(orPokal),Mug(orSeidel,Stein) : 579\nMug(orSeidel,Stein) : 102\nPilsenerGlass(orPokal),Mug(orSeidel,Stein) : 109\nStange(SlenderCylinder) : 306\nGoblet(orChalice) : 142\nPintGlass(orBecker,Nonic,Tumbler),Snifter,Mug(orSeidel,Stein) : 101\nPintGlass(orBecker,Nonic,Tumbler),Goblet(orChalice) : 264\nWeizenGlass : 528\nTulip,OversizedWineGlass : 70\nFlute,PilsenerGlass(orPokal) : 473\nPintGlass(orBecker,Nonic,Tumbler),Snifter,Tulip : 153\nSnifter,Goblet(orChalice) : 221\nPintGlass(orBecker,Nonic,Tumbler),Stange(SlenderCylinder) : 160\nTulip,Goblet(orChalice),OversizedWineGlass : 130\nPintGlass(orBecker,Nonic,Tumbler),PilsenerGlass(orPokal) : 173\nStange(SlenderCylinder),WeizenGlass : 187\nPintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein),WeizenGlass : 20\nPintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein),OversizedWineGlass : 251\nFlute,WeizenGlass : 57\nFlute,Snifter,Tulip : 65\nFlute,Snifter,Tulip,Stange(SlenderCylinder) : 56\nSnifter,Tulip,Goblet(orChalice),OversizedWineGlass : 137\nSnifter,OversizedWineGlass : 43\nSnifter,Tulip,Goblet(orChalice) : 86\nNone,yet : 21\nFlute,Stange(SlenderCylinder) : 12\nFlute,Snifter,OversizedWineGlass : 8\nMug(orSeidel,Stein),Stange(SlenderCylinder) : 44\nFlute : 6\n" ], [ "test['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein)\" , \"PintGlass, Mug\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Snifter,Tulip,OversizedWineGlass\" , \"Snifter, Tulip, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,PilsenerGlass(orPokal),Mug(orSeidel,Stein)\" , \"Flute, PilsenerGlass, Mug\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Snifter,OversizedWineGlass\" , \"PintGlass, Snifter, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PilsenerGlass(orPokal\" , \"PilsenerGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,Tulip,OversizedWineGlass\" , \"Flute, Tulip, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler)\" , \"PintGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Tulip,OversizedWineGlass\" , \"PintGlass, Tulip, OversizedWineGlass\") \ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,PilsenerGlass(orPokal),Mug(orSeidel,Stein),Stange(SlenderCylinder)\" , \"Flute, PilsenerGlass, Mug, Stange\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Snifter\" , \"PintGlass, Snifter\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),PilsenerGlass(orPokal),Mug(orSeidel,Stein)\" , \"PintGlass, PilsenerGlass, Mug\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Mug(orSeidel,Stein)\" , \"Mug\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PilsenerGlass(orPokal),Mug(orSeidel,Stein)\" , \"PilsenerGlass, Mug\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Stange(SlenderCylinder)\" , \"Stange\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Goblet(orChalice)\" , \"Goblet\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Snifter,Mug(orSeidel,Stein)\",\"PintGlass, Snifter, Mug\") \ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Goblet(orChalice)\" , \"PintGlass, Goblet\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"WeizenGlass\" , \"WeizenGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Tulip,OversizedWineGlass\" , \"Tulip, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,PilsenerGlass(orPokal)\" , \"Flute, PilsenerGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Snifter,Tulip\" , \"PintGlass, Snifter, Tulip\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Snifter,Goblet(orChalice)\" , \"Snifter, Goblet\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Stange(SlenderCylinder)\" , \"PintGlass, Stange\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Tulip,Goblet(orChalice),OversizedWineGlass\" , \"Tulip, Goblet, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),PilsenerGlass(orPokal)\" , \"PintGlass, PilsenerGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Stange(SlenderCylinder),WeizenGlass\" , \"Stange, WeizenGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein),WeizenGlass\" , \"PintGlass, Mug, WeizenGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"PintGlass(orBecker,Nonic,Tumbler),Mug(orSeidel,Stein),OversizedWineGlass\" , \"PintGlass, Mug, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,WeizenGlass\" , \"Flute, WeizenGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,Snifter,Tulip\" , \"Flute, Snifter, Tulip\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,Snifter,Tulip,Stange(SlenderCylinder)\" , \"Flute, Snifter, Tulip, Stange\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Snifter,Tulip,Goblet(orChalice),OversizedWineGlass\" , \"Snifter, Tulip, Goblet, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Snifter,OversizedWineGlass\" , \"Snifter, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Snifter,Tulip,Goblet(orChalice)\" , \"Snifter, Tulip, Goblet\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"None,yet\" , \"None\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,Stange(SlenderCylinder)\" , \"Flute,Stange\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute,Snifter,OversizedWineGlass\" , \"Flute, Snifter, OversizedWineGlass\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Mug(orSeidel,Stein),Stange(SlenderCylinder)\" , \"Mug, Stange\")\ntest['Glassware Used'] = test['Glassware Used'].replace(\"Flute\" , \"Flute\")\n", "_____no_output_____" ], [ "test['Glassware Used'].nunique()", "_____no_output_____" ], [ "test.dtypes", "_____no_output_____" ], [ "test['Food Paring label']= label_encoder.fit_transform(test['Food Paring']) \ntest['Glassware Used label']= label_encoder.fit_transform(test['Glassware Used']) \ntest['Style Name label']= label_encoder.fit_transform(test['Style Name']) ", "_____no_output_____" ], [ "test['Ratings'] = pd.to_numeric(test['Ratings'],errors='coerce')\ntest['Beer Name'] = test['Beer Name'].astype(float)\ntest['Brewing Company'] = test['Brewing Company'].astype(float)", "_____no_output_____" ], [ "test.isnull().sum()", "_____no_output_____" ], [ "test_avg_ratings = test['Ratings'].astype(float).mean()\ntest['Ratings'].replace(np.nan, test_avg_ratings, inplace=True)", "_____no_output_____" ], [ "test.isnull().sum()", "_____no_output_____" ], [ "test.columns", "_____no_output_____" ], [ "test1 = test[['ABV', 'Ratings', 'Minimum Temperature', 'Maximum Temperature','Minimum Serving Temperature', 'Maximum Serving Temperature', 'Food Paring label', 'Glassware Used label', 'Style Name label']]", "_____no_output_____" ], [ "test1.isnull().sum()", "_____no_output_____" ] ], [ [ "## Data Pre-Processing", "_____no_output_____" ] ], [ [ "x = train1.iloc[:,:-1]\ny = train1.iloc[:,-1]\nprint(x.columns)\n", "Index(['ABV', 'Ratings', 'Minimum Temperature', 'Maximum Temperature',\n 'Minimum Serving Temperature', 'Maximum Serving Temperature',\n 'Food Paring label', 'Glassware Used label', 'Style Name label'],\n dtype='object')\n" ], [ "from sklearn.preprocessing import StandardScaler\nx = StandardScaler().fit(x).transform(x)\nx[:3]", "_____no_output_____" ] ], [ [ "### Regression", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, random_state = 0)", "_____no_output_____" ], [ "from sklearn.linear_model import LinearRegression\nreg = LinearRegression()\nreg.fit(x_train, y_train)", "_____no_output_____" ], [ "y_pred = reg.predict(x_test)\nprint(y_pred)", "[3.19830794 3.30957503 3.07962486 ... 3.16934654 3.23573775 3.25829787]\n" ], [ "# Calculating score from Root Mean Log Squared Error\ndef rmlse(y_test, y_pred):\n error = np.square(np.log10(y_pred +1) - np.log10(y_test +1)).mean() ** 0.5\n score = 1 - error\n return score\n", "_____no_output_____" ], [ "print(\"\\n----------------------------\\nRMLSE Score = \", rmlse(y_test, y_pred))", "\n----------------------------\nRMLSE Score = 0.7615472587302806\n" ] ], [ [ "### SVR", "_____no_output_____" ] ], [ [ "from sklearn.svm import SVR\nsvr = SVR(kernel='rbf')", "_____no_output_____" ], [ "# Training the regressor with training data\nsvr.fit(x_train, y_train)", "_____no_output_____" ], [ "y_pred2 = svr.predict(x_test)\nprint(y_pred2)", "[3.63264068 3.79316237 3.64473313 ... 3.70577344 3.65590063 3.68563509]\n" ], [ "print(\"----------------------------\\nRMLSE Score = \", rmlse(y_test, y_pred2))", "----------------------------\nRMLSE Score = 0.7502329428531098\n" ], [ "pd.DataFrame({'Score' : y_pred}).to_excel(\"C:\\\\Users\\\\Moaz\\\\Desktop\\\\moaz\\\\Jupyter Python NB\\\\Machine Hack\\\\beer_score.xlsx\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb95264bc4ddfc6217a83abc70906fb96b0df540
21,632
ipynb
Jupyter Notebook
assets/ist718lab9/IST718_WK9_TensorFlow.ipynb
dskw1/dskw1.github.io
ee85aaa7c99c4320cfac95e26063beaac3ae6fcb
[ "MIT" ]
null
null
null
assets/ist718lab9/IST718_WK9_TensorFlow.ipynb
dskw1/dskw1.github.io
ee85aaa7c99c4320cfac95e26063beaac3ae6fcb
[ "MIT" ]
1
2022-03-24T18:28:16.000Z
2022-03-24T18:28:16.000Z
assets/ist718lab9/IST718_WK9_TensorFlow.ipynb
dskw1/dskw1.github.io
ee85aaa7c99c4320cfac95e26063beaac3ae6fcb
[ "MIT" ]
1
2021-09-01T16:54:38.000Z
2021-09-01T16:54:38.000Z
36.664407
308
0.529031
[ [ [ "# SINGLE LAYER AND MULTI LAYER NETWORKS FOR MNIST\n# BASED ON CODE FROM TENSORFLOW TUTORIAL\n\nimport tensorflow as tf", "_____no_output_____" ] ], [ [ "# OBTAIN \n## (& SCRUB -- this data comes scrubbed)", "_____no_output_____" ] ], [ [ "from tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)", "WARNING:tensorflow:From <ipython-input-2-8bf8ae5a5303>:2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease write your own downloading logic.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/contrib/learn/python/learn/datasets/base.py:252: _internal_retry.<locals>.wrap.<locals>.wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use urllib or similar directly.\nSuccessfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-images-idx3-ubyte.gz\nSuccessfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.one_hot on tensors.\nSuccessfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nSuccessfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\n" ] ], [ [ "# MODEL", "_____no_output_____" ] ], [ [ "# MODEL\n# CREATE PLACEHOLDER VARIABLES FOR OPERATION MANIPULATION\n# THE 784 MATCHES THE VECTOR SIZE OF THE MNIST IMAGES - 28*28\n\nx = tf.placeholder(tf.float32, [None, 784])\n\n# MODEL\n# CREATE WEIGHTS & BIASES VARIABLES\n# IN TF, OUR MODEL PARAMETERS ARE OFTEN MANAGED AS VARIABLES\n\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\n# MODEL\n# CREATE MODEL - USES SOFTMAX AS THE ACTIVATION FUNCTION\n# REMEMBER GOAL FOR ACTIVATION FUNCTION IS TO \"SHAPE\" THE \n# OUTPUT INTO A PROBABILITY DISTRO OVER THE 10 CLASSES\n\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\n# MODEL\n# CREATE PREDICTED VARIABLE Y-HAT\n# AND USE CROSS ENTROPY TO DETERMINE LOSS\n# CROSS ENTROPY - HOW INEFFICIENT ARE OUR PREDICTIONS?\n\ny_ = tf.placeholder(tf.float32, [None, 10])\ncross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n\n# MODEL\n# TRAIN USING GRADIENT DESCENT\n# LEARNING RATE AT MIDPOINT - .5 - MAKE SMALL ADJUSTMENTS TO MINIMIZE COST\n\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)", "_____no_output_____" ] ], [ [ "## MODEL -- RUN MODEL", "_____no_output_____" ] ], [ [ "# MODEL - RUN\n\nsess = tf.InteractiveSession()\ntf.global_variables_initializer().run()\nfor _ in range(10000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})", "_____no_output_____" ] ], [ [ "# EVALUATE", "_____no_output_____" ] ], [ [ "# EVALUATE MODEL\n\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\nprint(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))\n", "0.9255\n" ] ], [ [ "# BLOCK TWO", "_____no_output_____" ], [ "Alternative Approach", "_____no_output_____" ] ], [ [ "# WEIGHT INITIALIZATION\n\ndef weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)", "_____no_output_____" ], [ "# CREATE CONVOLUTION AND POOLING LAYERS\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')", "_____no_output_____" ], [ "# FIRST CONVOLUTION LAYER\n\nW_conv1 = weight_variable([5, 5, 1, 32])\nb_conv1 = bias_variable([32])\n\nx_image = tf.reshape(x, [-1, 28, 28, 1]) # BASE IMAGE SIZE OF 28 * 28\n\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1) # RESULTING IMAGE SIZE IS 14 * 14\n", "_____no_output_____" ], [ "# SECOND CONOLUTION LAYER \n# MORE THAN ONE LAYER? DEEP LEARNING\n\nW_conv2 = weight_variable([5, 5, 32, 64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)", "_____no_output_____" ], [ "# FULLY CONNECTED LAYER - BEFORE OUTPUT\n\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\nb_fc1 = bias_variable([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\nh_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # ADD THE RECTIFIED LINEAR UNIT\n", "_____no_output_____" ], [ "# DROP LAYER - REDUCE OVERFITTING\n# USE OF rate IS AN UPDATE BASED ON TF\n\nkeep_prob = tf.placeholder(tf.float32)\nrate = 1 - keep_prob\nh_fc1_drop = tf.nn.dropout(h_fc1, rate)", "WARNING:tensorflow:From <ipython-input-12-9276a630c381>:4: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n" ], [ "# LAST LAYER - OUTPUT\n\nW_fc2 = weight_variable([1024, 10])\nb_fc2 = bias_variable([10])\n\ny_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2", "_____no_output_____" ] ], [ [ "## MODEL -- RUN MODEL", "_____no_output_____" ] ], [ [ "# RUN THE MODEL\n\ncross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\ncorrect_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(10000):\n batch = mnist.train.next_batch(50)\n if i % 100 == 0:\n train_accuracy = accuracy.eval(feed_dict={\n x: batch[0], y_: batch[1], rate: 1.0})\n print('step %d, training accuracy %g' % (i, train_accuracy))\n train_step.run(feed_dict={x: batch[0], y_: batch[1], rate: 0.5})\n\n print('test accuracy %g' % accuracy.eval(feed_dict={\n x: mnist.test.images, y_: mnist.test.labels, rate: 1.0}))", "WARNING:tensorflow:From <ipython-input-14-665e1460b7b7>:3: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\n\nFuture major versions of TensorFlow will allow gradients to flow\ninto the labels input on backprop by default.\n\nSee `tf.nn.softmax_cross_entropy_with_logits_v2`.\n\nstep 0, training accuracy 0.08\nstep 100, training accuracy 0.84\nstep 200, training accuracy 0.92\nstep 300, training accuracy 0.9\nstep 400, training accuracy 0.94\nstep 500, training accuracy 0.98\nstep 600, training accuracy 0.92\nstep 700, training accuracy 0.96\nstep 800, training accuracy 0.94\nstep 900, training accuracy 0.98\nstep 1000, training accuracy 0.94\nstep 1100, training accuracy 0.96\nstep 1200, training accuracy 1\nstep 1300, training accuracy 0.98\nstep 1400, training accuracy 1\nstep 1500, training accuracy 0.96\nstep 1600, training accuracy 0.98\nstep 1700, training accuracy 0.94\nstep 1800, training accuracy 0.96\nstep 1900, training accuracy 1\nstep 2000, training accuracy 1\nstep 2100, training accuracy 0.98\nstep 2200, training accuracy 1\nstep 2300, training accuracy 1\nstep 2400, training accuracy 0.96\nstep 2500, training accuracy 1\nstep 2600, training accuracy 0.92\nstep 2700, training accuracy 1\nstep 2800, training accuracy 0.98\nstep 2900, training accuracy 1\nstep 3000, training accuracy 0.94\nstep 3100, training accuracy 1\nstep 3200, training accuracy 0.94\nstep 3300, training accuracy 0.96\nstep 3400, training accuracy 1\nstep 3500, training accuracy 1\nstep 3600, training accuracy 0.98\nstep 3700, training accuracy 0.98\nstep 3800, training accuracy 1\nstep 3900, training accuracy 0.98\nstep 4000, training accuracy 1\nstep 4100, training accuracy 0.98\nstep 4200, training accuracy 1\nstep 4300, training accuracy 1\nstep 4400, training accuracy 1\nstep 4500, training accuracy 1\nstep 4600, training accuracy 1\nstep 4700, training accuracy 1\nstep 4800, training accuracy 1\nstep 4900, training accuracy 1\nstep 5000, training accuracy 0.98\nstep 5100, training accuracy 1\nstep 5200, training accuracy 1\nstep 5300, training accuracy 0.98\nstep 5400, training accuracy 0.98\nstep 5500, training accuracy 1\nstep 5600, training accuracy 1\nstep 5700, training accuracy 1\nstep 5800, training accuracy 1\nstep 5900, training accuracy 1\nstep 6000, training accuracy 0.96\nstep 6100, training accuracy 1\nstep 6200, training accuracy 1\nstep 6300, training accuracy 1\nstep 6400, training accuracy 1\nstep 6500, training accuracy 1\nstep 6600, training accuracy 1\nstep 6700, training accuracy 0.98\nstep 6800, training accuracy 0.98\nstep 6900, training accuracy 1\nstep 7000, training accuracy 1\nstep 7100, training accuracy 0.96\nstep 7200, training accuracy 1\nstep 7300, training accuracy 1\nstep 7400, training accuracy 1\nstep 7500, training accuracy 1\nstep 7600, training accuracy 1\nstep 7700, training accuracy 1\nstep 7800, training accuracy 1\nstep 7900, training accuracy 0.98\nstep 8000, training accuracy 1\nstep 8100, training accuracy 1\nstep 8200, training accuracy 0.98\nstep 8300, training accuracy 1\nstep 8400, training accuracy 0.98\nstep 8500, training accuracy 1\nstep 8600, training accuracy 1\nstep 8700, training accuracy 1\nstep 8800, training accuracy 1\nstep 8900, training accuracy 1\nstep 9000, training accuracy 1\nstep 9100, training accuracy 0.98\nstep 9200, training accuracy 1\nstep 9300, training accuracy 1\nstep 9400, training accuracy 0.98\nstep 9500, training accuracy 1\nstep 9600, training accuracy 1\nstep 9700, training accuracy 1\nstep 9800, training accuracy 1\nstep 9900, training accuracy 1\ntest accuracy 0.9915\n" ], [ "", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb953ec4950513bf3d8b37ad2b85c116316961ff
230,416
ipynb
Jupyter Notebook
notebooks/06-vae-fa-Res-Res.ipynb
elijahc/vae
5cd80518f876d4ca9e97de2ece7c266e3df09cb7
[ "MIT" ]
null
null
null
notebooks/06-vae-fa-Res-Res.ipynb
elijahc/vae
5cd80518f876d4ca9e97de2ece7c266e3df09cb7
[ "MIT" ]
null
null
null
notebooks/06-vae-fa-Res-Res.ipynb
elijahc/vae
5cd80518f876d4ca9e97de2ece7c266e3df09cb7
[ "MIT" ]
null
null
null
152.795756
54,824
0.849251
[ [ [ "# Importance of XCov term in loss function\n- How does the model behave differently without XCov?\n- Does amount of input variation matter? (None,Med,hi)?", "_____no_output_____" ] ], [ [ "import os\nimport json\nimport numpy as np\nimport pandas as pd\nimport scipy\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\n\nfrom src.data_loader import Shifted_Data_Loader\nfrom src.plot import orig_vs_transformed as plot_ovt\nfrom src.plot import enc_dec_samples\nfrom src.models import GResNet,EResNet,ResBlock,EncResBlock\nfrom src.config import get_config\nfrom src.trainer import Trainer\nfrom src.utils import prepare_dirs_and_logger\nfrom keras.datasets import fashion_mnist,mnist\nfrom keras.layers import Dense\n# from tabulate import tabulate", "Using TensorFlow backend.\n" ], [ "config,_ = get_config()\n\n# Boilerplate\nsetattr(config, 'proj_root', '/home/elijahc/projects/vae')\nsetattr(config, 'log_dir', '/home/elijahc/projects/vae/logs')\nsetattr(config, 'dev_mode',True)\n# setattr(config,'model_dir','/home/elijahc/projects/vae/models/2019-01-17/')\n\n# Architecture Params\nsetattr(config, 'enc_blocks', [32,64,128,256])\nsetattr(config, 'dec_blocks', [4,2,1])\nsetattr(config, 'z_dim', 10)\nsetattr(config, 'y_dim', 10)\n\n# Training Params\nsetattr(config, 'batch_size', 512)\nsetattr(config, 'dataset', 'fashion_mnist')\nsetattr(config, 'epochs', 100)\nsetattr(config, 'monitor', 'val_G_loss')\nsetattr(config, 'min_delta', 0.25)\nsetattr(config, 'optimizer', 'adam')\n\n# Loss Weights\nsetattr(config, 'xcov', 0)\nsetattr(config, 'recon', 25)\nsetattr(config, 'xent', 15)", "_____no_output_____" ], [ "if not config.dev_mode:\n print('setting up...')\n prepare_dirs_and_logger(config)\n \nvars(config)", "_____no_output_____" ], [ "translation_amt = 0.8 # Med\nDL = Shifted_Data_Loader(dataset=config.dataset,flatten=False,\n rotation=None,\n translation=translation_amt,\n )", "input_shape: (56, 56, 1)\ndataset: fashion_mnist\nscale: 2\ntx_max: 0.8\nrot_max: None\nloading fashion_mnist...\nsx_train: (60000, 56, 56, 1)\nmaking training data...\nmaking testing data...\n" ], [ "DL.input_shape", "_____no_output_____" ], [ "G_builder = GResNet(y_dim=config.y_dim,z_dim=config.z_dim,dec_blocks=config.dec_blocks,flatten_out=False)\nE_builder = EResNet(blocks=config.enc_blocks,z_dim=config.z_dim,y_dim=config.y_dim)\ntrainer = Trainer(config,DL,E_builder,G_builder,)\n# setattr(trainer.config,'model_dir','/home/elijahc/projects/vae/models/2019-01-22/')", "building encoder...\nbuilding decoder/generator...\n" ], [ "pt,idx = plot_ovt(DL,cmap='gray')", "_____no_output_____" ], [ "from keras.utils import to_categorical\nRF = to_categorical(np.ones(len(DL.sx_train)),num_classes=2)", "_____no_output_____" ], [ "trainer.compile_model()\ntrainer.E.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_image (InputLayer) (None, 56, 56, 1) 0 \n__________________________________________________________________________________________________\nblock_1_conv_0 (Conv2D) (None, 56, 56, 32) 320 input_image[0][0] \n__________________________________________________________________________________________________\nblock_1_BN_0 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_conv_0[0][0] \n__________________________________________________________________________________________________\nblock_1_ReLU_0 (Activation) (None, 56, 56, 32) 0 block_1_BN_0[0][0] \n__________________________________________________________________________________________________\nblock_1_conv_1 (Conv2D) (None, 56, 56, 32) 1056 block_1_ReLU_0[0][0] \n__________________________________________________________________________________________________\nblock_1_BN_2 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_1_ReLU_2 (Activation) (None, 56, 56, 32) 0 block_1_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 56, 56, 32) 64 input_image[0][0] \n__________________________________________________________________________________________________\nblock_1_conv_2 (Conv2D) (None, 56, 56, 32) 9248 block_1_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_1_Add_1 (Add) (None, 56, 56, 32) 0 conv2d_1[0][0] \n block_1_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_2_BN_1 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_2_ReLU_1 (Activation) (None, 56, 56, 32) 0 block_2_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_2_conv_1 (Conv2D) (None, 28, 28, 64) 18496 block_2_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_2_BN_2 (BatchNormalizatio (None, 28, 28, 64) 256 block_2_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_2_ReLU_2 (Activation) (None, 28, 28, 64) 0 block_2_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 28, 28, 64) 2112 block_1_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_2_conv_2 (Conv2D) (None, 28, 28, 64) 36928 block_2_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_2_Add_1 (Add) (None, 28, 28, 64) 0 conv2d_2[0][0] \n block_2_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_3_BN_1 (BatchNormalizatio (None, 28, 28, 64) 256 block_2_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_3_ReLU_1 (Activation) (None, 28, 28, 64) 0 block_3_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_3_conv_1 (Conv2D) (None, 14, 14, 128) 73856 block_3_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_3_BN_2 (BatchNormalizatio (None, 14, 14, 128) 512 block_3_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_3_ReLU_2 (Activation) (None, 14, 14, 128) 0 block_3_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 14, 14, 128) 8320 block_2_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_3_conv_2 (Conv2D) (None, 14, 14, 128) 147584 block_3_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_3_Add_1 (Add) (None, 14, 14, 128) 0 conv2d_3[0][0] \n block_3_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_4_BN_1 (BatchNormalizatio (None, 14, 14, 128) 512 block_3_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_4_ReLU_1 (Activation) (None, 14, 14, 128) 0 block_4_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_4_conv_1 (Conv2D) (None, 7, 7, 256) 295168 block_4_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_4_BN_2 (BatchNormalizatio (None, 7, 7, 256) 1024 block_4_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_4_ReLU_2 (Activation) (None, 7, 7, 256) 0 block_4_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 7, 7, 256) 33024 block_3_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_4_conv_2 (Conv2D) (None, 7, 7, 256) 590080 block_4_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_4_Add_1 (Add) (None, 7, 7, 256) 0 conv2d_4[0][0] \n block_4_conv_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_1 (BatchNor (None, 7, 7, 256) 1024 block_4_Add_1[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 7, 7, 256) 0 batch_normalization_1[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_1 (AveragePoo (None, 1, 1, 256) 0 activation_1[0][0] \n__________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 256) 0 average_pooling2d_1[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 256) 65792 flatten_1[0][0] \n__________________________________________________________________________________________________\nenc_merge (Dense) (None, 21) 5397 dense_1[0][0] \n__________________________________________________________________________________________________\nlambda_1 (Lambda) [(None, 10), (None, 0 enc_merge[0][0] \n__________________________________________________________________________________________________\nclass (Activation) (None, 10) 0 lambda_1[0][0] \n__________________________________________________________________________________________________\nz_lat (Activation) (None, 10) 0 lambda_1[0][1] \n__________________________________________________________________________________________________\nD_real (Activation) (None, 1) 0 lambda_1[0][2] \n==================================================================================================\nTotal params: 1,291,413\nTrainable params: 1,289,429\nNon-trainable params: 1,984\n__________________________________________________________________________________________________\n" ], [ "trainer.go(x=DL.sx_train,\n y={\n 'class':DL.y_train_oh,\n# 'D_real':RF,\n 'G':DL.sx_train},\n validation_split=0.05,\n verbose=0)", "Epoch G_loss val_G_loss val_class_acc\n0: 86.1352 52.5537 0.097 \n1: 38.0113 42.2609 0.1523 \n2: 32.1898 37.2275 0.1993 \n3: 29.4017 32.4158 0.248 \n4: 27.3468 31.6093 0.2983 \n5: 26.4317 29.0003 0.3647 \n6: 25.3481 28.0716 0.411 \n7: 24.5 26.1463 0.4547 \n8: 23.7742 26.0564 0.504 \n9: 23.2274 23.5676 0.5277 \n10: 22.1996 23.3823 0.5767 \n11: 21.7846 22.1662 0.5837 \n12: 21.3568 21.9129 0.6203 \n13: 20.8236 22.9447 0.623 \n14: 20.4471 21.5126 0.6267 \n15: 19.9928 21.4372 0.6363 \n16: 19.5777 20.0247 0.6527 \n17: 19.2437 19.8281 0.665 \n18: 18.9462 19.3488 0.661 \n19: 18.7253 20.1944 0.6697 \n20: 18.5359 19.5014 0.6887 \n21: 18.2445 19.8302 0.688 \n22: 18.2356 20.3455 0.698 \n23: 17.9324 41.2121 0.4177 \n24: 17.6012 19.1286 0.6953 \n25: 17.3937 19.2833 0.6953 \n26: 17.2721 19.396 0.7237 \n27: 17.0666 19.39 0.7033 \n28: 16.9319 18.4714 0.7083 \n29: 16.6381 34.7501 0.4743 \n30: 16.7333 20.1384 0.7273 \n31: 16.4846 20.2865 0.7047 \n32: 16.4313 19.4395 0.7387 \n33: 16.2427 16.7755 0.7347 \n34: 16.1384 19.7296 0.733 \n35: 16.1617 16.4348 0.7497 \n36: 15.7645 23.1094 0.65 \n37: 15.7763 27.88 0.5887 \n38: 15.5888 21.9117 0.679 \n39: 15.5829 18.342 0.747 \n40: 15.6182 35.784 0.5323 \n41: 15.4899 42.1078 0.4703 \n42: 15.3338 22.6625 0.7023 \n43: 15.3061 17.3387 0.739 \n44: 15.0634 18.0824 0.7487 \n45: 15.0352 19.6317 0.7543 \n" ], [ "hist_df = pd.DataFrame.from_records(trainer.model.history.history)\nhist_df.head()", "_____no_output_____" ], [ "sns.set_context('paper')\nmetrics = ['loss','G_loss','class_acc']\nfig,axs = plt.subplots(nrows=len(metrics),sharex=True,figsize=(5,10))\nfor metric_name,ax in zip(metrics,axs):\n sns.scatterplot(data=hist_df[[metric_name,'val_'+metric_name]],ax=ax)", "_____no_output_____" ], [ "# if not config.dev_mode:\n# trainer.save_model()", "_____no_output_____" ], [ "from keras.models import Model\nfrom keras.layers import Input", "_____no_output_____" ], [ "generator = trainer.G", "_____no_output_____" ], [ "trainer.E.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_image (InputLayer) (None, 56, 56, 1) 0 \n__________________________________________________________________________________________________\nblock_1_conv_0 (Conv2D) (None, 56, 56, 32) 320 input_image[0][0] \n__________________________________________________________________________________________________\nblock_1_BN_0 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_conv_0[0][0] \n__________________________________________________________________________________________________\nblock_1_ReLU_0 (Activation) (None, 56, 56, 32) 0 block_1_BN_0[0][0] \n__________________________________________________________________________________________________\nblock_1_conv_1 (Conv2D) (None, 56, 56, 32) 1056 block_1_ReLU_0[0][0] \n__________________________________________________________________________________________________\nblock_1_BN_2 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_1_ReLU_2 (Activation) (None, 56, 56, 32) 0 block_1_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 56, 56, 32) 64 input_image[0][0] \n__________________________________________________________________________________________________\nblock_1_conv_2 (Conv2D) (None, 56, 56, 32) 9248 block_1_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_1_Add_1 (Add) (None, 56, 56, 32) 0 conv2d_1[0][0] \n block_1_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_2_BN_1 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_2_ReLU_1 (Activation) (None, 56, 56, 32) 0 block_2_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_2_conv_1 (Conv2D) (None, 28, 28, 64) 18496 block_2_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_2_BN_2 (BatchNormalizatio (None, 28, 28, 64) 256 block_2_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_2_ReLU_2 (Activation) (None, 28, 28, 64) 0 block_2_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 28, 28, 64) 2112 block_1_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_2_conv_2 (Conv2D) (None, 28, 28, 64) 36928 block_2_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_2_Add_1 (Add) (None, 28, 28, 64) 0 conv2d_2[0][0] \n block_2_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_3_BN_1 (BatchNormalizatio (None, 28, 28, 64) 256 block_2_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_3_ReLU_1 (Activation) (None, 28, 28, 64) 0 block_3_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_3_conv_1 (Conv2D) (None, 14, 14, 128) 73856 block_3_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_3_BN_2 (BatchNormalizatio (None, 14, 14, 128) 512 block_3_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_3_ReLU_2 (Activation) (None, 14, 14, 128) 0 block_3_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 14, 14, 128) 8320 block_2_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_3_conv_2 (Conv2D) (None, 14, 14, 128) 147584 block_3_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_3_Add_1 (Add) (None, 14, 14, 128) 0 conv2d_3[0][0] \n block_3_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_4_BN_1 (BatchNormalizatio (None, 14, 14, 128) 512 block_3_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_4_ReLU_1 (Activation) (None, 14, 14, 128) 0 block_4_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_4_conv_1 (Conv2D) (None, 7, 7, 256) 295168 block_4_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_4_BN_2 (BatchNormalizatio (None, 7, 7, 256) 1024 block_4_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_4_ReLU_2 (Activation) (None, 7, 7, 256) 0 block_4_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 7, 7, 256) 33024 block_3_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_4_conv_2 (Conv2D) (None, 7, 7, 256) 590080 block_4_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_4_Add_1 (Add) (None, 7, 7, 256) 0 conv2d_4[0][0] \n block_4_conv_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_1 (BatchNor (None, 7, 7, 256) 1024 block_4_Add_1[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 7, 7, 256) 0 batch_normalization_1[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_1 (AveragePoo (None, 1, 1, 256) 0 activation_1[0][0] \n__________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 256) 0 average_pooling2d_1[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 256) 65792 flatten_1[0][0] \n__________________________________________________________________________________________________\nenc_merge (Dense) (None, 21) 5397 dense_1[0][0] \n__________________________________________________________________________________________________\nlambda_1 (Lambda) [(None, 10), (None, 0 enc_merge[0][0] \n__________________________________________________________________________________________________\nclass (Activation) (None, 10) 0 lambda_1[0][0] \n__________________________________________________________________________________________________\nz_lat (Activation) (None, 10) 0 lambda_1[0][1] \n__________________________________________________________________________________________________\nD_real (Activation) (None, 1) 0 lambda_1[0][2] \n==================================================================================================\nTotal params: 1,291,413\nTrainable params: 1,289,429\nNon-trainable params: 1,984\n__________________________________________________________________________________________________\n" ], [ "z_encoder = Model(trainer.input,trainer.z_lat)\nclassifier = Model(trainer.input,trainer.y_class)\n# y_lat_encoder = Model(trainer.E.input,trainer.y_lat)\n# decoder_inp = Input(shape=(config.y_dim+config.z_dim,))\n# dec_layers = trainer.model.layers[-(1+(5*2)):]\n# print(dec_layers)\n# _gen_x = dec_layers[0](decoder_inp)\n# l = dec_layers[1]\n# isinstance(l,keras.layers.core.Reshape)\n# F = None\n# for l in dec_layers[1:]:\n# print(type(l))\n \n# if isinstance(l,keras.layers.merge.Add):\n# _gen_x = l([F,_gen_x])\n# else:\n# _gen_x = l(_gen_x)\n \n# if isinstance(l,keras.layers.convolutional.Conv2DTranspose):\n# if l.kernel_size==(1,1):\n# F = _gen_x\n \n# # generator = Model(decoder_inp,_gen_x)", "_____no_output_____" ], [ "classifier.summary()", "__________________________________________________________________________________________________\nLayer (type) Output Shape Param # Connected to \n==================================================================================================\ninput_image (InputLayer) (None, 56, 56, 1) 0 \n__________________________________________________________________________________________________\nblock_1_conv_0 (Conv2D) (None, 56, 56, 32) 320 input_image[0][0] \n__________________________________________________________________________________________________\nblock_1_BN_0 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_conv_0[0][0] \n__________________________________________________________________________________________________\nblock_1_ReLU_0 (Activation) (None, 56, 56, 32) 0 block_1_BN_0[0][0] \n__________________________________________________________________________________________________\nblock_1_conv_1 (Conv2D) (None, 56, 56, 32) 1056 block_1_ReLU_0[0][0] \n__________________________________________________________________________________________________\nblock_1_BN_2 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_1_ReLU_2 (Activation) (None, 56, 56, 32) 0 block_1_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_1 (Conv2D) (None, 56, 56, 32) 64 input_image[0][0] \n__________________________________________________________________________________________________\nblock_1_conv_2 (Conv2D) (None, 56, 56, 32) 9248 block_1_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_1_Add_1 (Add) (None, 56, 56, 32) 0 conv2d_1[0][0] \n block_1_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_2_BN_1 (BatchNormalizatio (None, 56, 56, 32) 128 block_1_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_2_ReLU_1 (Activation) (None, 56, 56, 32) 0 block_2_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_2_conv_1 (Conv2D) (None, 28, 28, 64) 18496 block_2_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_2_BN_2 (BatchNormalizatio (None, 28, 28, 64) 256 block_2_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_2_ReLU_2 (Activation) (None, 28, 28, 64) 0 block_2_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_2 (Conv2D) (None, 28, 28, 64) 2112 block_1_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_2_conv_2 (Conv2D) (None, 28, 28, 64) 36928 block_2_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_2_Add_1 (Add) (None, 28, 28, 64) 0 conv2d_2[0][0] \n block_2_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_3_BN_1 (BatchNormalizatio (None, 28, 28, 64) 256 block_2_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_3_ReLU_1 (Activation) (None, 28, 28, 64) 0 block_3_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_3_conv_1 (Conv2D) (None, 14, 14, 128) 73856 block_3_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_3_BN_2 (BatchNormalizatio (None, 14, 14, 128) 512 block_3_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_3_ReLU_2 (Activation) (None, 14, 14, 128) 0 block_3_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_3 (Conv2D) (None, 14, 14, 128) 8320 block_2_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_3_conv_2 (Conv2D) (None, 14, 14, 128) 147584 block_3_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_3_Add_1 (Add) (None, 14, 14, 128) 0 conv2d_3[0][0] \n block_3_conv_2[0][0] \n__________________________________________________________________________________________________\nblock_4_BN_1 (BatchNormalizatio (None, 14, 14, 128) 512 block_3_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_4_ReLU_1 (Activation) (None, 14, 14, 128) 0 block_4_BN_1[0][0] \n__________________________________________________________________________________________________\nblock_4_conv_1 (Conv2D) (None, 7, 7, 256) 295168 block_4_ReLU_1[0][0] \n__________________________________________________________________________________________________\nblock_4_BN_2 (BatchNormalizatio (None, 7, 7, 256) 1024 block_4_conv_1[0][0] \n__________________________________________________________________________________________________\nblock_4_ReLU_2 (Activation) (None, 7, 7, 256) 0 block_4_BN_2[0][0] \n__________________________________________________________________________________________________\nconv2d_4 (Conv2D) (None, 7, 7, 256) 33024 block_3_Add_1[0][0] \n__________________________________________________________________________________________________\nblock_4_conv_2 (Conv2D) (None, 7, 7, 256) 590080 block_4_ReLU_2[0][0] \n__________________________________________________________________________________________________\nblock_4_Add_1 (Add) (None, 7, 7, 256) 0 conv2d_4[0][0] \n block_4_conv_2[0][0] \n__________________________________________________________________________________________________\nbatch_normalization_1 (BatchNor (None, 7, 7, 256) 1024 block_4_Add_1[0][0] \n__________________________________________________________________________________________________\nactivation_1 (Activation) (None, 7, 7, 256) 0 batch_normalization_1[0][0] \n__________________________________________________________________________________________________\naverage_pooling2d_1 (AveragePoo (None, 1, 1, 256) 0 activation_1[0][0] \n__________________________________________________________________________________________________\nflatten_1 (Flatten) (None, 256) 0 average_pooling2d_1[0][0] \n__________________________________________________________________________________________________\ndense_1 (Dense) (None, 256) 65792 flatten_1[0][0] \n__________________________________________________________________________________________________\nenc_merge (Dense) (None, 21) 5397 dense_1[0][0] \n__________________________________________________________________________________________________\nlambda_1 (Lambda) [(None, 10), (None, 0 enc_merge[0][0] \n__________________________________________________________________________________________________\nclass (Activation) (None, 10) 0 lambda_1[0][0] \n==================================================================================================\nTotal params: 1,291,413\nTrainable params: 1,289,429\nNon-trainable params: 1,984\n__________________________________________________________________________________________________\n" ], [ "DL.y_test_oh.shape", "_____no_output_____" ], [ "classifier.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['acc'])\nclassifier.evaluate(DL.sx_test,DL.y_test_oh,batch_size=config.batch_size)", "10000/10000 [==============================] - 2s 197us/step\n" ], [ "z_enc = z_encoder.predict(DL.sx_test,batch_size=config.batch_size)\n# y_lat = y_lat_encoder.predict(DL.sx_test,batch_size=config.batch_size)\ny_lat = classifier.predict(DL.sx_test,batch_size=config.batch_size)", "_____no_output_____" ], [ "_lat_vec = np.concatenate([y_lat,z_enc],axis=1)\n_lat_vec.shape", "_____no_output_____" ], [ "z_enc_mu = np.mean(z_enc,axis=0)\nz_enc_cov = np.cov(z_enc,rowvar=False)", "_____no_output_____" ], [ "np.random.multivariate_normal(z_enc_mu,z_enc_cov,size=50).shape", "_____no_output_____" ], [ "regen = generator.predict(_lat_vec,batch_size=config.batch_size)", "_____no_output_____" ], [ "rand_im = np.random.randint(0,10000)\nplt.imshow(regen[rand_im].reshape(56,56),cmap='gray')", "_____no_output_____" ], [ "_lat_vec[rand_im]", "_____no_output_____" ], [ "DL2 = Shifted_Data_Loader(dataset=config.dataset,flatten=False,\n rotation=None,\n translation=translation_amt,\n )", "input_shape: (56, 56, 1)\ndataset: fashion_mnist\nscale: 2\ntx_max: 0.8\nrot_max: None\nloading fashion_mnist...\nsx_train: (60000, 56, 56, 1)\nmaking training data...\nmaking testing data...\n" ], [ "enc_dec_samples(DL.x_test,DL.sx_test,z_enc,y_lat,generator)", "_____no_output_____" ], [ "z_enc2 = z_encoder.predict(DL2.sx_test,batch_size=config.batch_size)\ny_lat2 = classifier.predict(DL2.sx_test,batch_size=config.batch_size)\n_lat_vec2 = np.concatenate([y_lat2,z_enc2],axis=1)\nregen2 = generator.predict(_lat_vec2,batch_size=config.batch_size)", "_____no_output_____" ], [ "from src.plot import remove_axes,remove_labels\nfrom src.utils import gen_trajectory", "_____no_output_____" ], [ "examples = 5\nrand_im = np.random.randint(0,10000,size=examples)\nfix,axs = plt.subplots(examples,11,figsize=(8,4))\n_lat_s = []\nregen_s = []\nout = gen_trajectory(z_enc[rand_im],z_enc2[rand_im],delta=.25)\nout_y = gen_trajectory(y_lat[rand_im],y_lat2[rand_im],delta=.25)\n\nfor z,y in zip(out,out_y):\n _lat = np.concatenate([y,z],axis=1)\n _lat_s.append(_lat)\n regen_s.append(generator.predict(_lat,batch_size=config.batch_size))\n\ni=0\nfor axr,idx in zip(axs,rand_im):\n axr[0].imshow(DL.x_test[idx].reshape(28,28),cmap='gray')\n axr[1].imshow(DL.sx_test[idx].reshape(56,56),cmap='gray')\n axr[2].imshow(regen[idx].reshape(56,56),cmap='gray')\n for j,a in enumerate(axr[3:-3]):\n a.imshow(regen_s[j][i,:].reshape(56,56),cmap='gray')\n# a.imshow(s.reshape(56,56),cmap='gray')\n axr[-3].imshow(regen2[idx].reshape(56,56),cmap='gray')\n axr[-2].imshow(DL2.sx_test[idx].reshape(56,56),cmap='gray')\n axr[-1].imshow(DL2.x_test[idx].reshape(28,28),cmap='gray')\n for a in axr:\n remove_axes(a)\n remove_labels(a)\n i+=1\n# plt.imshow(regen[rand_im].reshape(56,56),cmap='gray')", "_____no_output_____" ], [ "fix.savefig('../../updates/2019-02-05/assets/img/translocate_{}.png'.format(translation_amt))", "_____no_output_____" ], [ "dxs = DL.dx[1]-14\ndys = DL.dy[1]-14", "_____no_output_____" ], [ "from sklearn.preprocessing import MinMaxScaler\n\nfeat_range = (0,50)\nz_enc_scaled = [MinMaxScaler(feat_range).fit_transform(z_enc[:,i].reshape(-1,1)).tolist() for i in np.arange(config.z_dim+config.y_dim)]\nz_enc_scaled = np.squeeze(np.array(z_enc_scaled,dtype=int))", "_____no_output_____" ], [ "from collections import Counter\nimport dit\nfrom dit import Distribution\n\ndef mutual_information(X,Y):\n XY_c = Counter(zip(X,Y))\n XY_pmf = {k:v/float(sum(XY_c.values())) for k,v in XY_c.items()}\n XY_jdist = Distribution(XY_pmf)\n \n return dit.shannon.mutual_information(XY_jdist,[0],[1])", "_____no_output_____" ], [ "z_dx_I = [mutual_information(z_enc_scaled[i],dxs.astype(int)+14) for i in np.arange(25)]", "_____no_output_____" ], [ "z_dy_I = [mutual_information(z_enc_scaled[i],dys.astype(int)+14) for i in np.arange(25)]", "_____no_output_____" ], [ "z_class_I = [mutual_information(z_enc_scaled[i],DL.y_test) for i in np.arange(25)]", "_____no_output_____" ], [ "z_I_df = pd.DataFrame.from_records({'class':z_class_I,'dy':z_dy_I,'dx':z_dx_I})\nz_I_df['class'] = z_I_df['class'].values.round(decimals=1)", "_____no_output_____" ], [ "config.translation_amt = translation_amt\nconfig.translation_amt", "_____no_output_____" ], [ "dir_path = '../data/xcov_importance/dist_{}/'.format(translation_amt)\n\nz_I_df.to_pickle('../data/xcov_importance/dist_{}/z_mutual_info.pk'.format(translation_amt))\nnp.save('../data/xcov_importance/dist_{}/dxs'.format(translation_amt), DL.dx[1]-14)\nnp.save('../data/xcov_importance/dist_{}/dys'.format(translation_amt), DL.dy[1]-14)\nnp.save('../data/xcov_importance/dist_{}/z_enc'.format(translation_amt), z_enc)\n\nhist_df.to_pickle(os.path.join(dir_path,'training_hist.df'))\n\nwith open(os.path.join(dir_path,'config.json'), 'w') as fp:\n json.dump(vars(config), fp)", "_____no_output_____" ], [ "sns.set_context('talk')\nfig,ax = plt.subplots(1,1,figsize=(6,5))\nax.set_ylim(0,0.9)\nax.set_xlim(0,0.9)\npoints = plt.scatter(x=z_I_df['dx'],y=z_I_df['dy'],c=z_I_df['class'],cmap='plasma')\nplt.colorbar(points)", "_____no_output_____" ], [ "fig,ax = plt.subplots(1,1,figsize=(5,5))\nax.scatter(z_dx_I,z_dy_I)\n# ax.set_ylim(0,0.8)\n# ax.set_xlim(0,0.8)", "_____no_output_____" ], [ "plt.scatter(np.arange(25),sorted(z_class_I,reverse=True))\n# plt.scatter(np.arange(25),z_dx_I)\n# plt.scatter(np.arange(25),z_dy_I)", "_____no_output_____" ], [ "from src.metrics import var_expl,norm_var_expl\nfrom collections import Counter\n\ndtheta = DL.dtheta[1]\nfve_dx = norm_var_expl(features=z_enc,cond=dxs,bins=21)\nfve_dy = norm_var_expl(features=z_enc,cond=dys,bins=21)\nfve_class = norm_var_expl(features=z_enc, cond=DL.y_test, bins=21)\n# fve_dt = norm_var_expl(features=z_enc,cond=dtheta,bins=21)", "_____no_output_____" ], [ "# fve_dx_norm = (dxs.var()-fve_dx)/dxs.var()\n# fve_dy_norm = (dys.var()-fve_dy)/dys.var()\n# fve_dth_norm = (dtheta.var()-fve_dt)/dtheta.var()\nfve_dx_norm = fve_dx\nfve_dy_norm = fve_dy", "_____no_output_____" ], [ "import seaborn as sns\nsns.set_context('talk')", "_____no_output_____" ], [ "fve_dx_norm.shape\n# np.save(os.path.join(config.model_dir,'fve_dx_norm'),fve_dx_norm)", "_____no_output_____" ], [ "fig,ax = plt.subplots(1,1,figsize=(5,5))\nplt.scatter(fve_dx_norm.mean(axis=0),fve_dy_norm.mean(axis=0))\nplt.xlabel('fve_dx')\nplt.ylabel('fve_dy')\nplt.tight_layout()\n# plt.savefig(os.path.join(config.model_dir,'fve_dx.png'))\n# plt.ylim(-0.125,0.25)\nxdim = np.argmax(fve_dx_norm.mean(axis=0))", "_____no_output_____" ], [ "fve_dy_norm.mean(axis=0)\n# np.save(os.path.join(config.model_dir,'fve_dy_norm'),fve_dy_norm)", "_____no_output_____" ], [ "plt.scatter(np.arange(config.z_dim),fve_dy_norm.mean(axis=0))\nplt.xlabel('Z_n')\nplt.ylabel('fve_dy')\nplt.tight_layout()\n# plt.savefig(os.path.join(config.model_dir,'fve_dy.png'))\n# plt.ylim(-0.125,0.25)\nydim = np.argmax(fve_dy_norm.mean(axis=0))", "_____no_output_____" ], [ "plt.scatter(np.arange(config.z_dim),fve_class.mean(axis=0))\nplt.xlabel('Z_n')\nplt.ylabel('fve_class')\n# plt.ylim(0.0,0.5)\nnp.argmax(fve_class.mean(axis=0))", "_____no_output_____" ], [ "from src.plot import Z_color_scatter\nZ_color_scatter(z_enc,[xdim,ydim],dxs)", "_____no_output_____" ], [ "Z_color_scatter(z_enc,[xdim,ydim],dys)", "_____no_output_____" ], [ "Z_color_scatter(z_enc,[7,18],dtheta)", "_____no_output_____" ], [ "from plt.", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9564912c2961d25201e2b380dcc2c6f279bdd3
17,006
ipynb
Jupyter Notebook
Lab3-diffusion-MRI/freesurfer-to-native-space.ipynb
computational-medicine/BMED360-2021
2c6052b9affedf1fee23c89d23941bf08eb2614c
[ "MIT" ]
2
2021-04-19T23:22:17.000Z
2021-04-20T14:04:58.000Z
Lab3-diffusion-MRI/freesurfer-to-native-space.ipynb
computational-medicine/BMED360-2021
2c6052b9affedf1fee23c89d23941bf08eb2614c
[ "MIT" ]
null
null
null
Lab3-diffusion-MRI/freesurfer-to-native-space.ipynb
computational-medicine/BMED360-2021
2c6052b9affedf1fee23c89d23941bf08eb2614c
[ "MIT" ]
null
null
null
33.214844
166
0.557509
[ [ [ "# Freesurfer space to native space using `mri_vol2vol`\n\nBMED360-2021: `freesurfer-to-native-space.ipynb`", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport os\nimport pathlib\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nfrom os.path import expanduser, join, basename, split\nimport sys\nsys.path.append('.') # path to utils.py\nimport utils\nimport time\nimport shutil # copy files\ncwd = os.getcwd()", "_____no_output_____" ] ], [ [ "### We will use the `fs711_subjects` Freesurfer tree previously run for the `bids_bg_bmed360` sample", "_____no_output_____" ] ], [ [ "fs711_home = '/usr/local/freesurfer'\nworking_dir = join(cwd, 'data')\nbids_dir = '%s/bids_bg_bmed360' % (working_dir)\nfs711_subj = '%s/fs711_subjects' % (working_dir)\ndmri_res = '%s/dmri_results' % (working_dir)", "_____no_output_____" ], [ "if not os.path.exists(dmri_res):\n os.makedirs(dmri_res)\nelse:\n print('subdirectory dmri_results already exists')", "subdirectory dmri_results already exists\n" ] ], [ [ "The Freesurfer environment:\n```\n%%bash -s '/usr/local/freesurfer' './data/fs711_subjects'\n\necho $1\necho $2\n\nFREESURFER_HOME=${1}; export FREESURFER_HOME\nPATH=${FREESURFER_HOME}/bin:${PATH}; export PATH\nSUBJECTS_DIR=${2}; export SUBJECTS_DIR\nFSLDIR=/usr/local/fsl; export FSLDIR\nPATH=${FSLDIR}/bin:${PATH}; export PATH\n. ${FSLDIR}/etc/fslconf/fsl.sh\nsource ${FREESURFER_HOME}/SetUpFreeSurfer.sh\n```", "_____no_output_____" ], [ "## How to Convert from FreeSurfer Space Back to Native Anatomical Space\nSee: https://surfer.nmr.mgh.harvard.edu/fswiki/FsAnat-to-NativeAnat\n\nQuestion: I have successfully run a subject's data through [FreeSurfer](https://surfer.nmr.mgh.harvard.edu/fswiki/FreeSurfer). \nFreeSurfer creates volumes in 1 mm$^3$, 256$^3$ space, but I want the FreeSurfer results in the space of my original anatomical. How do I do this?<br>\n\nThe exact command you use depends on what you want to convert, an image (like brain.mgz) or a segmentation (like aseg.mgz).", "_____no_output_____" ], [ "For an image:", "_____no_output_____" ] ], [ [ "# cd $SUBJECTS_DIR/<subjid>/mri\n# mri_vol2vol --mov brain.mgz --targ rawavg.mgz --regheader --o brain-in-rawavg.mgz --no-save-reg", "_____no_output_____" ] ], [ [ "For a segmentation (aseg.mgz, aparc+aseg.mgz, wmparc.mgz, etc):", "_____no_output_____" ] ], [ [ "# cd $SUBJECTS_DIR/<subjid>/mri\n# mri_label2vol --seg aseg.mgz --temp rawavg.mgz --o aseg-in-rawavg.mgz --regheader aseg.mgz", "_____no_output_____" ] ], [ [ "Map the surface to the native space:", "_____no_output_____" ] ], [ [ "# mri_surf2surf --sval-xyz pial --reg register.native.dat rawavg.mgz --tval lh.pial.native --tval-xyz rawavg.mgz --hemi lh --s subjectname", "_____no_output_____" ] ], [ [ "The output will be stored in $SUBJECTS_DIR/subjectname/surf/lh.pial.native and can be viewed with freeview rawavg.mgz -f ../surf/lh.pial.native<br>\nTo verify that this worked, run", "_____no_output_____" ] ], [ [ "# freeview -v rawavg.mgz -f lh.pial.native", "_____no_output_____" ], [ "MRI_VOL2VOL = '%s/bin/mri_vol2vol' % (fs711_home)\nprint(os.popen(MRI_VOL2VOL).read())", "\nmri_vol2vol\n\n --mov movvol : input (or output template with --inv)\n --targ targvol : output template (or input with --inv)\n --o outvol : output volume\n --disp dispvol : displacement volume\n --downsample N1 N2 N3 : downsample input volume (do not include a targ or regsitration)\n sets --fill-average, --fill-upsample 2, and --regheader\n\n --reg register.dat : tkRAS-to-tkRAS matrix (tkregister2 format)\n --lta register.lta : Linear Transform Array (usually only 1 transform)\n --lta-inv register.lta : LTA, invert (may not be the same as --lta --inv with --fstal)\n --fsl register.fsl : fslRAS-to-fslRAS matrix (FSL format)\n --xfm register.xfm : ScannerRAS-to-ScannerRAS matrix (MNI format)\n --regheader : ScannerRAS-to-ScannerRAS matrix = identity\n --mni152reg : target MNI152 space (need FSL installed)\n --s subject : set matrix = identity and use subject for any templates\n\n --inv : sample from targ to mov\n\n --tal : map to a sub FOV of MNI305 (with --reg only)\n --talres resolution : set voxel size 1mm or 2mm (def is 1)\n --talxfm xfmfile : default is talairach.xfm (looks in mri/transforms)\n\n --m3z morph : non-linear morph encoded in the m3z format\n --noDefM3zPath : flag indicating that the code should not be looking for \n the non-linear m3z morph in the default location (subj/mri/transforms), but should use \n the morph name as is\n --inv-morph : compute and use the inverse of the m3z morph\n\n --fstarg <vol> : optionally use vol from subject in --reg as target. default is orig.mgz \n --crop scale : crop and change voxel size\n --slice-crop sS sE : crop output slices to be within sS and sE\n --slice-reverse : reverse order of slices, update vox2ras\n --slice-bias alpha : apply half-cosine bias field\n\n --trilin : trilinear interpolation (default)\n --nearest : nearest neighbor interpolation\n --cubic : cubic B-Spline interpolation\n --interp interptype : interpolation cubic, trilin, nearest (def is trilin)\n --fill-average : compute mean of all source voxels in a given target voxel\n --fill-conserve : compute sum of all source voxels in a given target voxel\n --fill-upsample USF : source upsampling factor for --fill-xxx (default is 2)\n\n --mul mulval : multiply output by mulval\n\n --precision precisionid : output precision (def is float)\n --keep-precision : set output precision to that of input\n --kernel : save the trilinear interpolation kernel instead\n\n --gcam mov srclta gcam dstlta vsm interp out\n srclta, gcam, or vsm can be set to 0 to indicate identity\n direction is automatically determined from srclta and dstlta\n interp 0=nearest, 1=trilin, 5=cubicbspline\n\n --spm-warp mov movlta warp interp output\n mov is the input to be mapped \n movlta maps mov to the vbm input space (use 0 to ignore)\n if movlta=0, then input is anything that shares a RAS space with the VBM input\n warp is typically y_rinput.nii\n interp 0=nearest, 1=trilin\n\n --no-resample : do not resample, just change vox2ras matrix\n\n --rot Ax Ay Az : rotation angles (deg) to apply to reg matrix\n --trans Tx Ty Tz : translation (mm) to apply to reg matrix\n --shear Sxy Sxz Syz : xz is in-plane\n --reg-final regfinal.dat : final reg after rot and trans (but not inv)\n\n --synth : replace input with white gaussian noise\n --seed seed : seed for synth (def is to set from time of day)\n\n --save-reg : write out output volume registration matrix\n\n --help : go ahead, make my day\n --debug\n --version\n\n\n" ], [ "def my_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype):\n \"\"\"\n Ex. \n cd $SUBJECTS_DIR/<subjid>/mri\n mri_vol2vol --mov brain.mgz --targ rawavg.mgz --regheader --o brain-in-rawavg.mgz --no-save-reg\n --interp interptype : interpolation cubic, trilin, nearest (def is trilin)\n \"\"\"\n \n fs_mri = join('%s' % (subj_dir), 'sub_%d_tp%d/mri' % (sub, ses)) \n cmd = [\n MRI_VOL2VOL,\n '--mov', '%s/%s.mgz' % (fs_mri, inp_image),\n '--targ', '%s' % (targ_image),\n '--regheader', \n '--interp', '%s' % (interptype),\n '--o', '%s/sub_%d_tp%d_%s_in_%s.nii.gz' % (out_dir, sub, ses, inp_image, targ_name),\n '--no-save-reg']\n # ' 2>', error_output_log,'>', output_log] \n cmd_str = \" \".join(cmd)\n #print('cmd_str = \\n%s\\n' % cmd_str)\n \n # EXECUTE\n os.system(cmd_str)", "_____no_output_____" ] ], [ [ "### Testing the native space conversion on one subject (sub_102_tp1) \n**using the `sub-102_ses-1_T1w.nii.gz` in the `bids_bg_bmed360` tree as target image**", "_____no_output_____" ] ], [ [ "subj_dir = fs711_subj\nout_dir = dmri_res\n\nsub = 102\nses = 1\n\ntarg_image = '%s/sub-%d/ses-%d/anat/sub-%d_ses-%d_T1w.nii.gz' % (bids_dir, sub, ses, sub, ses)\ntarg_name = 'native_space'", "_____no_output_____" ] ], [ [ "**Use the `my_mri_vol2vol()`function on different source images and masks using approriate interpolation ('trilinear' and 'nearest neighbour')**", "_____no_output_____" ] ], [ [ "%%time\n\nshutil.copy2(targ_image,out_dir) # copy the original anatomy file in bids tree to out_dir\n\ninterptype = 'trilin'\ninp_image = 'orig'\nmy_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\ninterptype = 'trilin'\ninp_image = 'brain'\nmy_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\ninterptype = 'nearest'\ninp_image = 'ribbon'\nmy_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\ninterptype = 'nearest'\ninp_image = 'aseg'\nmy_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\ninterptype = 'nearest'\ninp_image = 'wmparc'\nmy_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\ninterptype = 'nearest'\ninp_image = 'aparc+aseg'\nmy_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)", "CPU times: user 4.59 ms, sys: 8.94 ms, total: 13.5 ms\nWall time: 8.6 s\n" ] ], [ [ "#### Run the native space conversion on all subjects and sessions using the `_T1_biascorr_brain.nii.gz` image obtained from `03-fsl-anat.ipynb` as target image.", "_____no_output_____" ] ], [ [ "%%time\nsubj_dir = fs711_subj\nbids_dir = bids_dir\nout_dir = dmri_res\ntarg_name = 'native_space'\n\nfor sub in [102, 103, 111, 123]:\n for ses in [1, 2]:\n \n print(f'Computing sub:{sub} ses:{ses}')\n \n targ_image = join(bids_dir,'sub-%d/ses-%d/anat/sub-%d_ses-%d_T1w.nii.gz' % (sub, ses, sub, ses))\n shutil.copy2(targ_image,out_dir) # copy the original anatomy file in bids tree to out_dir\n\n inp_image = 'orig'\n interptype = 'trilin'\n my_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n \n inp_image = 'brain'\n interptype = 'trilin'\n my_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\n inp_image = 'brainmask'\n interptype = 'nearest'\n my_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n \n inp_image = 'ribbon'\n interptype = 'nearest'\n my_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\n inp_image = 'aseg'\n interptype = 'nearest'\n my_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\n inp_image = 'wmparc'\n interptype = 'nearest'\n my_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)\n\n inp_image = 'aparc+aseg'\n interptype = 'nearest'\n my_mri_vol2vol(subj_dir, sub, ses, inp_image, targ_image, targ_name, out_dir, interptype)", "Computing sub:102 ses:1\nComputing sub:102 ses:2\nComputing sub:103 ses:1\nComputing sub:103 ses:2\nComputing sub:111 ses:1\nComputing sub:111 ses:2\nComputing sub:123 ses:1\nComputing sub:123 ses:2\nCPU times: user 10 ms, sys: 51.9 ms, total: 61.9 ms\nWall time: 1min 15s\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb95880c1e9e2fe2d470c44ad00dbcf2b14ac357
216,901
ipynb
Jupyter Notebook
.ipynb_checkpoints/tellurium-checkpoint.ipynb
willisdc/tellurium-nanohub-base
31cbd72c7f47d4c5112562a7c4f02f85af354d7f
[ "BSD-3-Clause" ]
null
null
null
.ipynb_checkpoints/tellurium-checkpoint.ipynb
willisdc/tellurium-nanohub-base
31cbd72c7f47d4c5112562a7c4f02f85af354d7f
[ "BSD-3-Clause" ]
null
null
null
.ipynb_checkpoints/tellurium-checkpoint.ipynb
willisdc/tellurium-nanohub-base
31cbd72c7f47d4c5112562a7c4f02f85af354d7f
[ "BSD-3-Clause" ]
null
null
null
194.530045
78,280
0.89077
[ [ [ "# Cdc2 Cyclin Model\nDrew Willis\n\nSource:</br>\nTyson, J. J. “Modeling the Cell Division Cycle: Cdc2 and Cyclin Interactions.” \nProceedings of the National Academy of Sciences, vol. 88, no. 16, 1991, pp. 7328–7332., doi:10.1073/pnas.88.16.7328. https://www.pnas.org/content/pnas/88/16/7328.full.pdf\n\n\nThis is a recreation of Tyson's Cdc2 and Cyclin Interactions model, and includes extra tools to explore other aspects of the model.", "_____no_output_____" ] ], [ [ "import tellurium as te\nimport numpy as np\nimport ipywidgets as widgets\nimport matplotlib.pyplot as plt\nfrom ipywidgets import *\nfrom IPython.display import display, update_display", "_____no_output_____" ] ], [ [ "## Background\n(still need description here, recommend reading the abstract (bold text) here: https://www.pnas.org/content/pnas/88/16/7328.full.pdf)\n\nThe cell cycle is composed of cell growth and mitosis. These processes have been shown to operate differently in relation to each other. We look at the cell cycle as two processes:\n* Cell Growth cycle\n* Cell Division cycle\n\nThe division cycle is determined by an enzyme, the maturation promoting factor (MPF). It is formed by cdc2 and cyclin in the cell and is autocatalytic. The MPF enzyme is necessary for mitotic processes to occur. Once the cell reaches anaphase, MPF is degraded and the cycle repeats.\n\nMPF activation can depend on cell growth, cyclin levels in the cell, or other enzymes, depending on the cell type and environment.\n\n\n![cyclediagram-2.png](attachment:cyclediagram-2.png)\n(Tyson)\n\nIn the model, this process is represented by 9 steps:\n1. cyclin is created\n2. cyclin can be unstable and be destroyed\n3. cyclin is phosphorylated and forms heterodimer with \n4. cdc2-P to form preMPF\n5. preMPF is dephosphorylated to form active MPF\n6. MPF activation can be opposed\n7. Nuclear is division triggered, and active MPF is destroyed, releasing phosphorylated cyclin\n8. Phosphorylated cyclin is destroyed\n9. cdc2 is phosphorylated\n10. cdc2 can be reversed\n\nThe original paper uses a series of differential equations, but I have chosen to represent the model in arrow equation format that is functionally the same.\n#### Model equations\n1. C2 -> CP ; k8*P*C2\n2. CP -> C2 ; k9*CP\n3. CP -> pM ; k3*CP*Y\n4. pM -> M ; pM * (k4p + k4*(M/CT)^2)\n5. M -> pM ; k5*P*M\n6. M -> C2 ; k6*M\n7. -> Y ; k1*aa\n8. Y -> ; k2*Y + k3*CP*Y\n9. -> YP ; k6*M\n10. YP -> ; k7*YP\n\n(will clean up these equations with better formatting for readability)\n##### Variable descriptions\n* aa : amino acids \n* C2 : cdc2\n* CP : cdc2-P\n* pM : P-cyclin-cdc2-P / preMPF\n* M : P-cyclin-cdc2 / active MPF\n* Y : cyclin\n* CP : cyclin-P\n* CT : total cdc2\n* k4 > k4p\n\n#### Expected Result\nThe conclusion of the original paper found three states in which the system would operate:\n\n1. Steady state high MPF activity\n2. Autonomous oscillations\n3. Excitable steady state\n\n## Cycle Model", "_____no_output_____" ] ], [ [ "# ----< DEFINE MODEL >----\nmodel = '''\n // Equations\n E1: C2 -> CP ; k8*P*C2\n E2: CP -> C2 ; k9*CP\n E3: CP -> pM ; k3*CP*Y\n E4: pM -> M ; pM * (k4p + k4*(M/CT)^2)\n E5: M -> pM ; k5*P*M\n E6: M -> C2 ; k6*M\n E7: -> Y ; k1*aa\n E8: Y -> ; k2*Y + k3*CP*Y\n E9: -> YP ; k6*M\n E10: YP -> ; k7*YP\n \n \n CT := C2+CP+pM+M\n \n // Inputs\n k1 := 0.015*CT/aa\n k2 = 0\n k3 := 200/CT\n k4 = 100\n k4p = 0.018\n k5 = 0\n k6 = 0.5\n k7 = 0.6\n k8 = 100\n k9 = 10\n P = 1\n aa = 1\n \n C2 = 0.1\n CP = 1\n pM = 0.1\n M = 0.1\n Y = 0.1\n YP = 0.1\n \n\n'''\n\n \n# ----< WIDGETS >----\n\n# model parameters\nstyle = {'description_width': 'initial'}\nk4_widget = widgets.FloatSlider(\n description='k4 rate constant',\n value=100.0,\n min=10.0,\n max=1000.0,\n continuous_update=False,\n style=style\n)\nk6_widget = widgets.FloatSlider(\n description='k6 rate constant',\n value=0.5,\n min=0.1,\n max=10.0,\n continuous_update=False,\n style=style\n)\n\nk8_widget = widgets.FloatSlider(\n description='k8 rate constant',\n value=0.5,\n min=10.0,\n max=200.0,\n continuous_update=False,\n style=style\n)\n\nk9_widget = widgets.FloatSlider(\n description='k9 rate constant',\n value=10.0,\n min=10.0,\n max=200.0,\n continuous_update=False,\n style=style\n)\n\n# simulation settings\nsim_length_widget = widgets.IntSlider(\n description='simulation length',\n value=100,\n min=2,\n max=1000,\n continuous_update=False,\n style=style\n)\nsim_points_widget = widgets.IntSlider(\n description='simulated points',\n value=1000,\n min=2,\n max=20000,\n continuous_update=False,\n style=style\n)\n\n\n# display toggles\nC2_widget = widgets.ToggleButton(\n description='C2 toggle',\n value=True,\n)\nCP_widget = widgets.ToggleButton(\n description='CP toggle',\n value=True,\n)\npM_widget = widgets.ToggleButton(\n description='pM toggle',\n value=True,\n)\nY_widget = widgets.ToggleButton(\n description='Y toggle',\n value=True,\n)\nM_widget = widgets.ToggleButton(\n description='M toggle',\n value=True,\n)\nYP_widget = widgets.ToggleButton(\n description='YP toggle',\n value=True,\n)\nyscale_widget = widgets.ToggleButton(\n description='yscale: linear',\n value=False\n)\n\ncenter_align = widgets.Layout(display='justify-content',\n flex_flow='column',\n align_items='stretch',\n width='100%')\nright_align = widgets.Layout(display='flex',\n flex_flow='column',\n align_items='flex-end',\n width='100%')\nleft_vbox = widgets.VBox([k4_widget,k6_widget,k8_widget,k9_widget])\nright_vbox = widgets.VBox([sim_length_widget,sim_points_widget])\nsimulation_hbox = widgets.HBox([left_vbox,right_vbox],layout=center_align)\nyscale_hbox = widgets.HBox([yscale_widget], layout=right_align)\ndisplay_toggles_hbox = widgets.HBox([C2_widget,CP_widget,pM_widget,Y_widget,M_widget,YP_widget])\n\n\n# ----< PLOT SETUP >----\n\nfig, axs = plt.subplots(1,figsize=(15,7))\nplt.close()\nfig2, axs2 = plt.subplots(1, figsize=(7,7))\naxs.set(title='cdc2 cyclin model',xlabel='time',ylabel='variable')\naxs2.set(title='pM vs YP',xlabel='pM',ylabel='YP')\nplt.close()\n\n\n# ----< INTERACT AND RUN >----\n\ndef RunModel(*args):\n # reload model in case user interacts with other cells and touches these widgets\n m = te.loada(model) \n \n\n m.k4 = k4_widget.value\n m.k6 = k6_widget.value\n m.k8 = k8_widget.value\n m.k9 = k9_widget.value\n s = m.simulate(0,sim_length_widget.value,sim_points_widget.value,['time','C2','CP','pM','Y','M','YP'])\n \n axs.set(xlim=[0,sim_length_widget.value])\n linewidth = 2\n if C2_widget.value:\n axs.plot(s['time'],s['C2'],linewidth=linewidth,label='C2')\n if CP_widget.value:\n axs.plot(s['time'],s['CP'],linewidth=linewidth,label='CP')\n if pM_widget.value:\n axs.plot(s['time'],s['pM'],linewidth=linewidth,label='pM')\n if Y_widget.value:\n axs.plot(s['time'],s['Y'],linewidth=linewidth,label='Y')\n if M_widget.value:\n axs.plot(s['time'],s['M'],linewidth=linewidth,label='M')\n if YP_widget.value:\n axs.plot(s['time'],s['YP'],linewidth=linewidth,label='YP')\n if axs.lines:\n axs.legend(bbox_to_anchor=(0.1, -0.175, 0.8, .102), loc=2, ncol=3, mode=\"expand\",fontsize='large')\n axs2.plot(s['pM'],s['YP'])\n update_display(display_id=\"0\", obj=fig)\n update_display(display_id=\"1\", obj=fig2)\n axs.cla()\n axs2.cla()\n axs.set(title='cdc2 cyclin model',xlabel='time (minutes)',ylabel='variable')\n axs2.set(title='pM vs YP',xlabel='pM',ylabel='YP')\n\nfor i in range(len(left_vbox.children)):\n left_vbox.children[i].observe(RunModel,names='value')\nfor i in range(len(right_vbox.children)):\n right_vbox.children[i].observe(RunModel,names='value')\nfor i in range(len(display_toggles_hbox.children)):\n display_toggles_hbox.children[i].observe(RunModel,names='value')\n\nRunModel()", "_____no_output_____" ] ], [ [ "## Model\n\nUse the toggles to show and hide each component. The included rate constants have boundaries defined by the original paper. (Although k6 values should be rather low on the slider.)", "_____no_output_____" ] ], [ [ "display(fig, display_id=\"0\")\ndisplay(display_toggles_hbox)\ndisplay(simulation_hbox)\ndisplay(yscale_hbox)", "_____no_output_____" ] ], [ [ "## Simulation Energy\nReading this graph can tell you about the energy of the system and whether or not it will continue to oscillate.\n\nI have chosen to graph YP against pM since their oscillations are always out of sync.\n* Large loops in this graph indicate continuous oscillations.\n* Loops decaying inwards indicate the steady state with high MPF activity.\n* Loops that break outwards indicate the excitable switch with low MPF activity.\n\n(still need a better way to describe this section)", "_____no_output_____" ] ], [ [ "display(fig2, display_id=\"1\")\ndisplay(display_toggles_hbox)\ndisplay(simulation_hbox)", "_____no_output_____" ] ], [ [ "### Parameter Scan", "_____no_output_____" ] ], [ [ "# ----< DEFINE MODEL >----\nmodel = '''\n // Equations\n E1: C2 -> CP ; k8*P*C2\n E2: CP -> C2 ; k9*CP\n E3: CP -> pM ; k3*CP*Y\n E4: pM -> M ; pM * (k4p + k4*(M/CT)^2)\n E5: M -> pM ; k5*P*M\n E6: M -> C2 ; k6*M\n E7: -> Y ; k1*aa\n E8: Y -> ; k2*Y\n E9: Y -> ; k3*CP*Y\n E10: -> YP ; k6*M\n E11: YP -> ; k7*YP\n \n \n CT := C2+CP+pM+M\n \n // Inputs\n k1 := 0.015*CT/aa\n k2 = 0\n k3 := 200/CT\n k4 = 100\n k4p = 0.018\n k5 = 0\n k6 = 0.5\n k7 = 0.6\n k8 = 100\n k9 = 10\n P = 1\n aa = 1\n \n C2 = 0.1\n CP = 1\n pM = 0.1\n M = 0.1\n Y = 0.1\n YP = 0.1\n \n\n'''\n\n \n# ----< WIDGETS >----\n\n# model parameters\nstyle = {'description_width': 'initial'}\n\nk8_widget = widgets.FloatSlider(\n description='k8 rate constant',\n value=0.5,\n min=100.0,\n max=200.0,\n continuous_update=False,\n style=style\n)\n\nk9_widget = widgets.FloatSlider(\n description='k9 rate constant',\n value=10.0,\n min=10.0,\n max=200.0,\n continuous_update=False,\n style=style\n)\n\n# simulation settings\nsim_length_widget = widgets.IntSlider(\n description='simulation length',\n value=250,\n min=2,\n max=1000,\n continuous_update=False,\n style=style\n)\nsim_points_widget = widgets.IntSlider(\n description='simulated points',\n value=500,\n min=2,\n max=20000,\n continuous_update=False,\n style=style\n)\ncycleTimesW = widgets.Textarea(\n value='',\n placeholder='',\n description='min/max cycle times:',\n disabled=False\n)\n\n\n\nleft_vbox = widgets.VBox([k8_widget,k9_widget])\nright_vbox = widgets.VBox([sim_length_widget,sim_points_widget])\nsimulation_hbox = widgets.HBox([left_vbox,right_vbox,cycleTimesW])\n\n\n# ----< PLOT SETUP >----\n\nfig, axs = plt.subplots(1,figsize=(7,7))\naxs.set(title='k4 vs k6 oscillation occurences',xlabel='k6',ylabel='k4')\nplt.close()\n\n\n# ----< INTERACT AND RUN >----\ndef GetState(s):\n oscillations=0\n flip=False\n highYP=0\n highpM=0\n for i in range(int(len(s)/2),len(s['pM'])):\n if flip:\n highYP+=1\n if s['pM'][i]>s['YP'][i]+0.075:\n oscillations += 1\n flip=False\n else:\n highpM+=1\n if s['pM'][i]<s['YP'][i]+0.075:\n oscillations += 1\n flip=True\n if oscillations>7:\n return \"green\"\n else:\n if highYP>highpM:\n return \"red\"\n else:\n return \"orange\"\ndef GetCycleTime(s):\n flip=False\n times = []\n for i in range(int(len(s)/2),len(s['pM'])):\n if flip:\n if s['pM'][i]>s['YP'][i]+0.075:\n times.append(s['time'][i])\n flip=False\n else:\n if s['pM'][i]<s['YP'][i]+0.075:\n flip=True\n if times:\n cycleTime = np.mean(np.diff(np.asarray(times)))\n growthRate = 1/cycleTime\n if ( growthRate > 1.0):\n print(\"Error: Growth rate too large.\")\n return cycleTime, np.clip(growthRate, 0.0, 1.0)\n else:\n return 0.0, 0.0\n \n \n \ndef RunModel(*args):\n # reload model in case user interacts with other cells and touches these widgets\n m = te.loada(model) \n x=[]\n y=[]\n color=[]\n maxCycleTime=0\n maxCT_k4=0\n maxCT_k6=0\n minCycleTime=99999\n minCT_k4=0\n minCT_k6=0\n \n for lk4 in np.arange(1.0,3.0,0.1):\n for lk6 in np.arange(-1.0,1.0,0.1):\n m.resetAll()\n m.k4 = 10**lk4\n m.k6 = 10**lk6\n m.k8 = k8_widget.value\n m.k9 = k9_widget.value\n s = m.simulate(0,sim_length_widget.value,sim_points_widget.value,['time','C2','CP','pM','Y','M','YP'])\n state = GetState(s)\n cycleTime, _ = GetCycleTime(s)\n if (cycleTime > maxCycleTime):\n maxCycleTime = cycleTime\n maxCT_k4 = m.k4\n maxCT_k6 = m.k6\n elif ( (cycleTime < minCycleTime) and (cycleTime > 0.0) ):\n minCycleTime = cycleTime\n minCT_k4 = m.k4\n minCT_k6 = m.k6\n x.append(10**lk6)\n y.append(10**lk4)\n color.append(state)\n cycleTimesW.value = \"maxCT=\"+str(round(maxCycleTime,2))+\" k4=\"+str(maxCT_k4)+\" k6=\"+str(maxCT_k6)+\"\\n\"+\"minCT=\"+str(round(minCycleTime,2))+\" k4=\"+str(minCT_k4)+\" k6=\"+str(minCT_k6)\n axs.scatter(x=x,y=y,color=color,label=color)\n axs.set(yscale='log',xscale='log')\n \n axs.legend(bbox_to_anchor=(0.1, -0.175, 0.8, .102), loc=2, ncol=3, mode=\"expand\",fontsize='large')\n #axs.set(xlim=[0,10],ylim=[10,20])\n update_display(display_id=\"2\", obj=fig)\n \n #axs.lines=[]\n axs.cla()\n axs.set(title='k4 vs k6 oscillation occurences',xlabel='k6',ylabel='k4')\n\nfor i in range(len(left_vbox.children)):\n left_vbox.children[i].observe(RunModel,names='value')\nfor i in range(len(right_vbox.children)):\n right_vbox.children[i].observe(RunModel,names='value')\n", "_____no_output_____" ] ], [ [ "## Parameter Scan\nFrom the model and the paper we see that k4 and k6 are the most important factors that determine the fate of the simulation. We can do a parameter scan of k4 and k6 to see a 2-dimensional plot of the results\n\nSliders for k8 and k9 are included to observe whether or not they have a significant impact on the state of the simulation.\n\n(The legend is currently broken)\n* red : steady state, high MPF activity\n* green : continuous oscillations\n* yellow : excitable switch, low MPF activity\n\nThis parameter scan also scans for the maximum and minimum growth rates within the oscillating simulations. MaxCT is the maximum division time in minutes, followed by the k4 and k6 that caused this result. MinCT is the minimum division time (not including results with 0).", "_____no_output_____" ] ], [ [ "display(fig, display_id=\"2\")\ndisplay(simulation_hbox)", "_____no_output_____" ] ], [ [ "We clearly see how low k6 and high k4 tends to the steady state and the reverse tends to the excitable switch. This result looks very similar to the results of fig. 2 in the original paper.\n\nI believe this graph also definitively shows k8 and k9 have no effect on the simulation.", "_____no_output_____" ] ], [ [ "from scipy.signal import argrelextrema\n# ----< DEFINE MODEL >----\ncycle_model = '''\n // Equations\n E1: C2 -> CP ; k8*P*C2\n E2: CP -> C2 ; k9*CP\n E3: CP -> pM ; k3*CP*Y\n E4: pM -> M ; pM * (k4p + k4*(M/CT)^2)\n E5: M -> pM ; k5*P*M\n E6: M -> C2 ; k6*M\n E7: -> Y ; k1*aa\n E8: Y -> ; k2*Y + k3*CP*Y\n E9: -> YP ; k6*M\n E10: YP -> ; k7*YP\n \n \n CT := C2+CP+pM+M\n \n // Inputs\n k1 := 0.015*CT/aa\n k2 = 0\n k3 := 200/CT\n k4 = 100\n k4p = 0.018\n k5 = 0\n k6 = 0.5\n k7 = 0.6\n k8 = 100\n k9 = 10\n P = 1\n aa = 1\n \n C2 = 0.1\n CP = 1\n pM = 0.1\n M = 0.1\n Y = 0.1\n YP = 0.1\n \n\n'''\n\ngrowth_model = '''\n // Equations\n E1: -> P ; k*P\n \n // Inputs\n P = 100.0\n k = 0.01\n\n'''\n\n \n# ----< WIDGETS >----\n\n# model parameters\nstyle = {'description_width': 'initial'}\nk4_widget = widgets.FloatSlider(\n description='k4 rate constant',\n value=100.0,\n min=10.0,\n max=1000.0,\n continuous_update=False,\n style=style\n)\nk6_widget = widgets.FloatSlider(\n description='k6 rate constant',\n value=0.5,\n min=0.1,\n max=10.0,\n continuous_update=False,\n style=style\n)\n\nk8_widget = widgets.FloatSlider(\n description='k8 rate constant',\n value=0.5,\n min=10.0,\n max=200.0,\n continuous_update=False,\n style=style\n)\n\nk9_widget = widgets.FloatSlider(\n description='k9 rate constant',\n value=10.0,\n min=10.0,\n max=200.0,\n continuous_update=False,\n style=style\n)\n\n# simulation settings\nsim_length_widget = widgets.IntSlider(\n description='simulation length',\n value=200,\n min=2,\n max=1000,\n continuous_update=False,\n style=style\n)\nsim_points_widget = widgets.IntSlider(\n description='simulated points',\n value=2000,\n min=2,\n max=20000,\n continuous_update=False,\n style=style\n)\n\n\n# display toggles\nC2_widget = widgets.ToggleButton(\n description='C2 toggle',\n value=True,\n)\nCP_widget = widgets.ToggleButton(\n description='CP toggle',\n value=True,\n)\npM_widget = widgets.ToggleButton(\n description='pM toggle',\n value=True,\n)\nY_widget = widgets.ToggleButton(\n description='Y toggle',\n value=True,\n)\nM_widget = widgets.ToggleButton(\n description='M toggle',\n value=True,\n)\nYP_widget = widgets.ToggleButton(\n description='YP toggle',\n value=True,\n)\nyscale_widget = widgets.ToggleButton(\n description='yscale: linear',\n value=False\n)\n\ncenter_align = widgets.Layout(display='justify-content',\n flex_flow='column',\n align_items='stretch',\n width='100%')\nright_align = widgets.Layout(display='flex',\n flex_flow='column',\n align_items='flex-end',\n width='100%')\nleft_vbox = widgets.VBox([k4_widget,k6_widget,k8_widget,k9_widget])\nright_vbox = widgets.VBox([sim_length_widget,sim_points_widget])\nsimulation_hbox = widgets.HBox([left_vbox,right_vbox],layout=center_align)\nyscale_hbox = widgets.HBox([yscale_widget], layout=right_align)\ndisplay_toggles_hbox = widgets.HBox([C2_widget,CP_widget,pM_widget,Y_widget,M_widget,YP_widget])\n\n\n# ----< PLOT SETUP >----\n\nfig, axs = plt.subplots(2,figsize=(15,9))\nfig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=10, hspace=0.4)\naxs[0].set(title='cdc2 cyclin model',xlabel='time (minutes)',ylabel='variable')\naxs[1].set(title='Growth model',xlabel='time (minutes)',ylabel='Population (cells)')\nplt.close()\n\ndef GetGrowthRate(s):\n flip=False\n times = []\n for i in range(int(len(s)/2),len(s['pM'])):\n if flip:\n if s['pM'][i]>s['YP'][i]+0.075:\n times.append(s['time'][i])\n flip=False\n else:\n if s['pM'][i]<s['YP'][i]+0.075:\n flip=True\n if times:\n cycleTime = np.mean(np.diff(np.asarray(times)))\n growthRate = 1/cycleTime\n if ( growthRate > 1.0):\n print(\"Error: Growth rate too large.\")\n return cycleTime, np.clip(growthRate, 0.0, 1.0)\n else:\n return 0.0, 0.0\n \n\n# ----< INTERACT AND RUN >----\n\ndef RunModel(*args):\n # reload model in case user interacts with other cells and touches these widgets\n m = te.loada(cycle_model) \n\n m.k4 = k4_widget.value\n m.k6 = k6_widget.value\n m.k8 = k8_widget.value\n m.k9 = k9_widget.value\n \n \n s = m.simulate(0,sim_length_widget.value,sim_points_widget.value,['time','C2','CP','pM','Y','M','YP'])\n \n # times of each peak in minutes\n #peaks = np.asarray(argrelextrema(s['M'], np.greater)) / (sim_points_widget.value/sim_length_widget.value)\n #avgCycle = np.mean(np.diff(peaks)) # in minutes\n \n cycleTime, growthRate = GetGrowthRate(s)\n \n g_m = te.loada(growth_model) \n # simulate growth model\n g_m.k = growthRate\n g_s = g_m.simulate(0,sim_length_widget.value,sim_points_widget.value,['time','P'])\n\n \n axs[0].set(xlim=[0,sim_length_widget.value])\n axs[1].set(xlim=[0,sim_length_widget.value]) \n axs[1].cla()\n axs[1].set(title='Growth model ('+str(round(cycleTime,2))+' minute rate)',xlabel='time (minutes)',ylabel='Population (cells)')\n\n linewidth = 2\n if C2_widget.value:\n axs[0].plot(s['time'],s['C2'],linewidth=linewidth,label='C2')\n if CP_widget.value:\n axs[0].plot(s['time'],s['CP'],linewidth=linewidth,label='CP')\n if pM_widget.value:\n axs[0].plot(s['time'],s['pM'],linewidth=linewidth,label='pM')\n if Y_widget.value:\n axs[0].plot(s['time'],s['Y'],linewidth=linewidth,label='Y')\n if M_widget.value:\n axs[0].plot(s['time'],s['M'],linewidth=linewidth,label='M')\n if YP_widget.value:\n axs[0].plot(s['time'],s['YP'],linewidth=linewidth,label='YP')\n if axs[0].lines:\n axs[0].legend(bbox_to_anchor=(0.1, -0.175, 0.8, .102), loc=2, ncol=3, mode=\"expand\",fontsize='large')\n if yscale_widget.value:\n yscale_widget.description = 'yscale: log'\n axs[1].set_yscale('log')\n else:\n yscale_widget.description = 'yscale: linear'\n axs[1].set_yscale('linear')\n \n axs[1].plot(g_s['time'],g_s['P'],linewidth=linewidth,label='P')\n \n \n \n update_display(display_id=\"0\", obj=fig)\n axs[0].cla()\n axs[0].set(title='cdc2 cyclin model',xlabel='time (minutes)',ylabel='variable')\n \nfor i in range(len(left_vbox.children)):\n left_vbox.children[i].observe(RunModel,names='value')\nfor i in range(len(right_vbox.children)):\n right_vbox.children[i].observe(RunModel,names='value')\nfor i in range(len(yscale_hbox.children)):\n yscale_hbox.children[i].observe(RunModel,names='value')\nfor i in range(len(display_toggles_hbox.children)):\n display_toggles_hbox.children[i].observe(RunModel,names='value')\n\nRunModel()", "_____no_output_____" ] ], [ [ "# Changes in the model vs cell proliferation\nHow do the k4 and k6 parameters affect the exponential growth rate?\nUsing numbers from the parameter scan, we can get the maximum and minimum growth rates within the oscillating simulations. The faster the oscillations, the more cells we should see at the end of the second graph.", "_____no_output_____" ] ], [ [ "display(fig, display_id=\"0\")\ndisplay(display_toggles_hbox)\ndisplay(simulation_hbox)\ndisplay(yscale_hbox)", "_____no_output_____" ] ], [ [ "# Source\nTyson, J. J. “Modeling the Cell Division Cycle: Cdc2 and Cyclin Interactions.” Proceedings of the National Academy of Sciences, vol. 88, no. 16, 1991, pp. 7328–7332., doi:10.1073/pnas.88.16.7328.\nhttps://www.pnas.org/content/pnas/88/16/7328.full.pdf", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb958849a7e734d082fc95c301b8a1a06ebe78b5
5,339
ipynb
Jupyter Notebook
Day_25.ipynb
jessepisel/30daymapchallenge_21
1c18ee3bbe5ee622b00d71551b36ecc97b4f98c4
[ "MIT" ]
1
2021-11-08T17:23:28.000Z
2021-11-08T17:23:28.000Z
Day_25.ipynb
jessepisel/30daymapchallenge_21
1c18ee3bbe5ee622b00d71551b36ecc97b4f98c4
[ "MIT" ]
null
null
null
Day_25.ipynb
jessepisel/30daymapchallenge_21
1c18ee3bbe5ee622b00d71551b36ecc97b4f98c4
[ "MIT" ]
null
null
null
36.82069
936
0.525379
[ [ [ "import pydeck as pdk", "_____no_output_____" ], [ "terrain = \"https://s3.amazonaws.com/elevation-tiles-prod/terrarium/{z}/{x}/{y}.png\"\ndecoder = {\"rScaler\": 256, \"gScaler\": 1, \"bScaler\": 1 / 256, \"offset\": -32768}\ngeology = 'https://tiles.macrostrat.org/carto/{z}/{x}/{y}.png'\n\nterrain_layer = pdk.Layer(\"TerrainLayer\", elevation_decoder=decoder, texture=geology, elevation_data=terrain, opacity=0.8)\nview_state = pdk.ViewState(latitude=37.991, longitude=-107.5, zoom=11.5, bearing=0, pitch=75)\nr = pdk.Deck(layers= terrain_layer, initial_view_state=view_state, map_style='light')\nr.to_html(\"terrain_layer.html\", iframe_height=800)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb958c3b3ceebe66f6e6de0fc1ed11fc2bcf5abd
34,868
ipynb
Jupyter Notebook
notebooks/lecture16.ipynb
cegme/cs5293sp22
13eeb2d33a420959dbda26d4330b9aa99d261098
[ "MIT" ]
4
2022-03-04T17:20:20.000Z
2022-03-25T16:41:55.000Z
notebooks/lecture16.ipynb
cegme/cs5293sp22
13eeb2d33a420959dbda26d4330b9aa99d261098
[ "MIT" ]
null
null
null
notebooks/lecture16.ipynb
cegme/cs5293sp22
13eeb2d33a420959dbda26d4330b9aa99d261098
[ "MIT" ]
7
2022-02-16T15:47:25.000Z
2022-03-25T16:27:14.000Z
37.532831
1,404
0.549415
[ [ [ "# Tokenizers ", "_____no_output_____" ] ], [ [ "! pipenv install nltk", "\u001b[39m\u001b[1mInstalling \u001b[32m\u001b[1mnltk\u001b[39m\u001b[22m...\u001b[39m\u001b[22m\n\u001b[K\u001b[39m\u001b[1mAdding\u001b[39m\u001b[22m \u001b[32m\u001b[1mnltk\u001b[39m\u001b[22m \u001b[39m\u001b[1mto Pipfile's\u001b[39m\u001b[22m \u001b[33m\u001b[1m[packages]\u001b[39m\u001b[22m\u001b[39m\u001b[1m...\u001b[39m\u001b[22m\n\u001b[K\u001b[?25h✔ Installation Succeeded\u001b[0m \n\u001b[33m\u001b[1mPipfile.lock (110078) out of date, updating to (fb166b)...\u001b[39m\u001b[22m\n\u001b[39m\u001b[22mLocking\u001b[39m\u001b[22m \u001b[33m\u001b[22m[dev-packages]\u001b[39m\u001b[22m \u001b[39m\u001b[22mdependencies...\u001b[39m\u001b[22m\n\u001b[KBuilding requirements...\n\u001b[KResolving dependencies...\n\u001b[K\u001b[?25h\u001b[32m\u001b[22m✔ Success!\u001b[39m\u001b[22m\u001b[0m \n\u001b[39m\u001b[22mLocking\u001b[39m\u001b[22m \u001b[33m\u001b[22m[packages]\u001b[39m\u001b[22m \u001b[39m\u001b[22mdependencies...\u001b[39m\u001b[22m\n\u001b[KBuilding requirements...\n⠙\u001b[0m Locking...\u001b[K/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n\u001b[KResolving dependencies...\n\u001b[K\u001b[?25h\u001b[32m\u001b[22m✔ Success!\u001b[39m\u001b[22m\u001b[0m \n\u001b[33m\u001b[22mWarning: /home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\u001b[39m\u001b[22m\n\u001b[39m\u001b[1mUpdated Pipfile.lock (fb166b)!\u001b[39m\u001b[22m\n\u001b[39m\u001b[1mInstalling dependencies from Pipfile.lock (fb166b)...\u001b[39m\u001b[22m\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n 🐍 \u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m 15/15 — \u001b[30m\u001b[22m00:00:15\u001b[39m\u001b[22m22m2m2m2m2mm2m22m2m2m2m2m2m2m2m\n\u001b[0m" ], [ "import nltk\nfrom nltk import tokenize", "_____no_output_____" ], [ "s1 = \"\"\"Why wase time say lot word when few word do trick?\"\"\"", "_____no_output_____" ], [ "s2 = \"\"\"Hickory dickory dock, the mouse ran up the clock.\"\"\"", "_____no_output_____" ], [ "from nltk.tokenize import word_tokenize", "_____no_output_____" ], [ "! df -h /home/christangrant/nltk_data", "Filesystem Size Used Avail Use% Mounted on\n/dev/root 100G 22G 79G 22% /\n" ], [ "# nltk.download('punkt') # Download the model", "[nltk_data] Downloading package punkt to\n[nltk_data] /home/christangrant/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\n" ], [ "word_tokenize(s1)", "_____no_output_____" ], [ "word_tokenize(s2)", "_____no_output_____" ], [ "paragraph = [s1,s2]\nparagraph", "_____no_output_____" ], [ "' '.join(paragraph)", "_____no_output_____" ], [ "from nltk.tokenize import sent_tokenize", "_____no_output_____" ], [ "sent_tokenize(' '.join(paragraph))", "_____no_output_____" ], [ "[ word_tokenize(sent) for sent in sent_tokenize(' '.join(paragraph))]", "_____no_output_____" ] ], [ [ "# Vectorization", "_____no_output_____" ], [ "Given a corpus C, normalize C, then vectorize C.\nVectorizing C gives us a **vocabulary** and it gives us **weights**.", "_____no_output_____" ], [ "Get vocabulary => vocabulary is a list of terms that appear in the corpus.\n\n\nCreate a vector where each entry represents a voculary item.\nIf our vocabulary size is 10K, our vector size if 10K. (Good Normalization shrinks this vector size.)\n\nIf V is the vocabulary, V + 5 => vocabulary size.\n\n- OOV (Out of Vocabulary) /Unknown terms\n- Redacted terms\n- grouped terms\n- \n\nCorpus = \n\n>Old macdonald had a farm, on his farm he had a cow.\n>Old macdonald had a farm, on his farm he had a pig.\n>Old macdonald had a farm, on his farm he had a goat.\n\nNormalize(Corpus) \n\n>Old, macdonald, farm, pig, cow, goat\n\n\n\n```pre\nOld, macdonald, farm, pig, cow, goat\n[1, 1, 1, 0, 1 ,0]\n[1, 1, 1, 1 ,0, 0]\n[1, 1, 1, 0, 0, 1]\n```\n\n**One-hot encoding** of text.\n\nHow can we get some positional information in this one-hot encoded format?\n\n> Use weights to represent positions?\n> Use nggrams to group terms together \n\n### N-gram encoding\nVocabulary size grows with the size of the ngram\n\n```pre\nOld, macdonald, farm, pig, cow, goat\n<s> Old, Old macdonald, macdonald farm, farm pig, pig cow, cow goat, goat </s>\n[1,1,1,0,0,0,0]\n[1,1,1,0,0,0,1]\n[1,1,1,0,0,0,0]\n```\nUseful to have a range of n-grams when vectorizing.\n\n## Bag of words model\n\nUnordered bag representation of the vocabulary.\n\n>Old macdonald had a farm, on his farm he had a cow.\n>Old macdonald had a farm, on his farm he had a pig.\n>Old macdonald had a farm, on his farm he had a goat.\n\n```pre\nbow =\nOld, macdonald, farm, pig, cow, goat\n[1, 1, 2, 0, 1 ,0]\n[1, 1, 2, 1 ,0, 0]\n[1, 1, 2, 0, 0, 1]\n```\n\nUnique words may be important!\n\n## Term frequency\n\nThe raw frequency value of a term in a particular document.\n\n$$\ntf(word,document) = \\sum_{v \\in D} 1 (if v == w)\n$$\n\n\n## Document frequency \n\nThe number of documents that contain a word w.\n\n\n## TF*IDF\n\nidf = 1/df = log(N/(df+epslilon)) + epsilon\n\n\nTerm frequency * Inverse document freqency\n\n\n## Smoothing.\n\nAdding a small term to help handle out of vocabulary errors floating point issues.\n", "_____no_output_____" ] ], [ [ "! pipenv install sklearn", "\u001b[39m\u001b[1mInstalling \u001b[32m\u001b[1msklearn\u001b[39m\u001b[22m...\u001b[39m\u001b[22m\n\u001b[K\u001b[39m\u001b[1mAdding\u001b[39m\u001b[22m \u001b[32m\u001b[1msklearn\u001b[39m\u001b[22m \u001b[39m\u001b[1mto Pipfile's\u001b[39m\u001b[22m \u001b[33m\u001b[1m[packages]\u001b[39m\u001b[22m\u001b[39m\u001b[1m...\u001b[39m\u001b[22m\n\u001b[K\u001b[?25h✔ Installation Succeeded\u001b[0m \n\u001b[33m\u001b[1mPipfile.lock (fb166b) out of date, updating to (dcd7af)...\u001b[39m\u001b[22m\n\u001b[39m\u001b[22mLocking\u001b[39m\u001b[22m \u001b[33m\u001b[22m[dev-packages]\u001b[39m\u001b[22m \u001b[39m\u001b[22mdependencies...\u001b[39m\u001b[22m\n\u001b[KBuilding requirements...\n\u001b[KResolving dependencies...\n\u001b[K\u001b[?25h\u001b[32m\u001b[22m✔ Success!\u001b[39m\u001b[22m\u001b[0m \n\u001b[39m\u001b[22mLocking\u001b[39m\u001b[22m \u001b[33m\u001b[22m[packages]\u001b[39m\u001b[22m \u001b[39m\u001b[22mdependencies...\u001b[39m\u001b[22m\n\u001b[KBuilding requirements...\n⠙\u001b[0m Locking...\u001b[K/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n\u001b[KResolving dependencies...\n\u001b[K\u001b[?25h\u001b[32m\u001b[22m✔ Success!\u001b[39m\u001b[22m\u001b[0m \n\u001b[33m\u001b[22mWarning: /home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\u001b[39m\u001b[22m\n\u001b[39m\u001b[1mUpdated Pipfile.lock (dcd7af)!\u001b[39m\u001b[22m\n\u001b[39m\u001b[1mInstalling dependencies from Pipfile.lock (dcd7af)...\u001b[39m\u001b[22m\n/home/christangrant/.local/lib/python3.9/site-packages/pipenv/patched/notpip/_internal/operations/prepare.py:218: PipDeprecationWarning: DEPRECATION: A future pip version will change local packages to be built in-place without first copying to a temporary directory. We recommend you use --use-feature=in-tree-build to test your packages with this new behavior before it becomes the default.\n pip 21.3 will remove support for this functionality. You can find discussion regarding this at https://github.com/pypa/pip/issues/7555.\n deprecated(\n 🐍 \u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m\u001b[32m\u001b[1m▉\u001b[39m\u001b[22m 2/2 — \u001b[30m\u001b[22m00:00:00\u001b[39m\u001b[22m:00:00\u001b[39m\u001b[22m:00:00\u001b[39m\u001b[22m\n\u001b[0m" ] ], [ [ "Sklearn has an api to documents.\n\n>Transformers take a document and processes it using the function .fit_transform\n>Vectorizers take a document and process it using .fit() \n>fit create an internal model using the document\n\n ", "_____no_output_____" ] ], [ [ "corpus = [ word_tokenize(sent) for sent in sent_tokenize(' '.join(paragraph))]\ncorpus", "_____no_output_____" ], [ "import sklearn\nfrom sklearn.feature_extraction.text import CountVectorizer", "_____no_output_____" ], [ "cv = CountVectorizer(min_df=0., max_df=1.)\ncv_matrix = cv.fit_transform([s1, s2])\nprint(cv_matrix)", " (0, 16)\t1\n (0, 14)\t1\n (0, 11)\t1\n (0, 9)\t1\n (0, 6)\t1\n (0, 17)\t2\n (0, 15)\t1\n (0, 4)\t1\n (0, 2)\t1\n (0, 12)\t1\n (1, 5)\t1\n (1, 1)\t1\n (1, 3)\t1\n (1, 10)\t2\n (1, 7)\t1\n (1, 8)\t1\n (1, 13)\t1\n (1, 0)\t1\n" ], [ "cv_matrix.toarray()", "_____no_output_____" ], [ "vocab = cv.get_feature_names_out()\nvocab", "_____no_output_____" ], [ "import pandas as pd\ndf = pd.DataFrame(cv_matrix.toarray(), columns=vocab)\ndf", "_____no_output_____" ], [ "bv = CountVectorizer(ngram_range=(2,2))\nbv_matrix = bv.fit_transform([s1, s2])\n\nvocab = bv.get_feature_names_out()\ndf1 = pd.DataFrame(bv_matrix.toarray(), columns=vocab)\ndf1", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb958cdfbe3982c5e1cdd283de10830509ed10b5
37,288
ipynb
Jupyter Notebook
subtask_2/src_scripts/statistics/data_statistics.ipynb
MFajcik/SemEval_2020_Task-5
c20b2aa34d7e2613cd3c8b3c1e3881e320ebdd9e
[ "MIT" ]
1
2021-11-08T05:49:42.000Z
2021-11-08T05:49:42.000Z
subtask_2/src_scripts/statistics/data_statistics.ipynb
MFajcik/SemEval_2020_Task-5
c20b2aa34d7e2613cd3c8b3c1e3881e320ebdd9e
[ "MIT" ]
1
2021-06-02T03:59:46.000Z
2021-06-02T03:59:46.000Z
subtask_2/src_scripts/statistics/data_statistics.ipynb
MFajcik/SemEval_2020_Task-5
c20b2aa34d7e2613cd3c8b3c1e3881e320ebdd9e
[ "MIT" ]
null
null
null
23.017284
744
0.320291
[ [ [ "%load_ext autoreload\n%autoreload 2\nfrom IPython.display import Markdown, display\ndef printmd(string):\n display(Markdown(string))\n \ndef colorize(string,color=\"red\"):\n return f\"<span style=\\\"color:{color}\\\">{string}</span>\"", "_____no_output_____" ] ], [ [ "# Problem description", "_____no_output_____" ], [ "### Subtask2: Detecting antecedent and consequence\n\nIndicating causal insight is an inherent characteristic of counterfactual. To further detect the causal knowledge conveyed in counterfactual statements, subtask 2 aims to locate antecedent and consequent in counterfactuals.\n \nAccording to (Nelson Goodman, 1947. The problem of counterfactual conditionals), a counterfactual statement can be converted to a contrapositive with a true antecedent and consequent. Consider example “Her post-traumatic stress could have been avoided if a combination of paroxetine and exposure therapy had been prescribed two months earlier”; it can be transposed into “because her post-traumatic stress was not avoided, (we know) a combination of paroxetine and exposure therapy was not prescribed”. Such knowledge can be not only used for analyzing the specific statement but also be accumulated across corpora to develop domain causal knowledge (e.g., a combination of paroxetine and exposure may help cure post-traumatic stress).\n \nPlease note that __in some cases there is only an antecedent part while without a consequent part in a counterfactual statement__. For example, \"Frankly, I wish he had issued this order two years ago instead of this year\", in this sentence we could only get the antecedent part. In our subtask2, when locating the antecedent and consequent part, please set '-1' as consequent starting index (character index) and ending index (character index) to refer that there is no consequent part in this sentence. For details, please refer to the 'Evaluation' on this website.\n\n", "_____no_output_____" ] ], [ [ "!ls", "analyze_roberta_large.ipynb data_statistics.ipynb\r\n" ], [ "import pandas as pd\n!pwd\ndf = pd.read_csv('../../.data/semeval2020_5/train_task2.csv')", "/home/ifajcik/PycharmProjects/semeval2020_task5/scripts/statistics\r\n" ] ], [ [ "We have this amount of data:", "_____no_output_____" ] ], [ [ "len(df)", "_____no_output_____" ], [ "import random\ni = random.randint(0,len(df))\nprint(df.iloc[i])\nprint(\"-\"*50)\nprint(df[\"sentence\"].iloc[i])\nprint(\"-\"*50)\nprint(df[\"antecedent\"].iloc[i])\nprint(\"-\"*50)\nprint(df[\"consequent\"].iloc[i])", "sentenceID 202696\nsentence I wish there were 10 Daily Kos-style sites to ...\nantecedent I wish there were 10 Daily Kos-style sites to ...\nconsequent {}\nantecedent_startid 0\nantecedent_endid 82\nconsequent_startid -1\nconsequent_endid -1\nName: 2696, dtype: object\n--------------------------------------------------\nI wish there were 10 Daily Kos-style sites to cover the universe of great Democrats.\n--------------------------------------------------\nI wish there were 10 Daily Kos-style sites to cover the universe of great Democrats\n--------------------------------------------------\n{}\n" ], [ "import random\ni = random.randint(0,len(df))\ns = df.loc[df[\"sentenceID\"]==203483]\n#print(s)\nprint(\"-\"*50)\nprint(s[\"sentence\"].iloc[0])\nprint(\"-\"*50)\nprint(s[\"antecedent\"].iloc[0])\nprint(\"-\"*50)\nprint(s[\"consequent\"].iloc[0])", "--------------------------------------------------\n\"Only last year Pfizer tried a tax inversion, an unsuccessful merger with AstraZeneca that would have shifted Pfizer's tax home to Britain.\"\n--------------------------------------------------\nan unsuccessful merger with AstraZeneca that would have shifted Pfizer's tax home to Britain\n--------------------------------------------------\nwould have shifted Pfizer's tax home to Britain\n" ], [ "df[\"antecedent\"].iloc[0]", "_____no_output_____" ], [ "df[\"consequent\"].iloc[0]", "_____no_output_____" ], [ "df[\"sentence\"].iloc[0][df[\"consequent_startid\"].iloc[0]:df[\"consequent_endid\"].iloc[0]]", "_____no_output_____" ] ], [ [ "Check whether all indices fit the annotation \n_Note: annotation indices are inclusive!_", "_____no_output_____" ] ], [ [ "for i in range(len(df)):\n assert df[\"sentence\"].iloc[i][df[\"antecedent_startid\"].iloc[i]:df[\"antecedent_endid\"].iloc[i]+1] \\\n == df[\"antecedent\"].iloc[i]\n if df[\"consequent_startid\"].iloc[i]>0:\n assert df[\"sentence\"].iloc[i][df[\"consequent_startid\"].iloc[i]:df[\"consequent_endid\"].iloc[i]+1] \\\n == df[\"consequent\"].iloc[i]", "_____no_output_____" ] ], [ [ "__Consequent part might not always exist!__", "_____no_output_____" ] ], [ [ "df.loc[df['consequent_startid'] == -1]", "_____no_output_____" ] ], [ [ "It does not exist in this number of cases", "_____no_output_____" ] ], [ [ "df_without_conseq = df.loc[df['consequent_startid'] == -1]\nprint(f\"{len(df_without_conseq)} / {len(df)}\")", "520 / 3551\n" ] ], [ [ "Lets check what are the lengths of sentences, and how much sentences without consequent correlate with length.", "_____no_output_____" ] ], [ [ "all_lens = [len(s.split()) for s in df[\"sentence\"].values.tolist()]\nno_conseq_lens = [len(s.split()) for s in df_without_conseq[\"sentence\"].values.tolist()]", "_____no_output_____" ], [ "all_lens", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n\nvalues1 = all_lens\nvalues2= no_conseq_lens\nbins=100\n_range=(0,max(all_lens))\n\nfig = plt.figure(figsize=(8,6))\nax = fig.add_subplot(111) \nax.hist(values1, alpha=0.5, bins=bins, range=_range, color= 'b', label='All sentences')\nax.hist(values2, alpha=0.5, bins=bins, range=_range, color= 'r', label='Sentences without consequent')\nax.legend(loc='upper right', prop={'size':14})\nplt.show()", "_____no_output_____" ] ], [ [ "Distribution is skewed a little bit toward smaller values, but there does not seem to be any big correlation here...", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
cb95904800773f40029c465d291e41fbd34327f6
251,255
ipynb
Jupyter Notebook
src/PPI_WLKernel.ipynb
Cinofix/graph-kernel-manifold-learning
803b453d5d5e81e68c32b085df48aa6648f2f694
[ "MIT" ]
3
2021-11-07T06:09:31.000Z
2022-03-01T08:03:28.000Z
src/PPI_WLKernel.ipynb
Cinofix/graph-kernel-manifold-learning
803b453d5d5e81e68c32b085df48aa6648f2f694
[ "MIT" ]
null
null
null
src/PPI_WLKernel.ipynb
Cinofix/graph-kernel-manifold-learning
803b453d5d5e81e68c32b085df48aa6648f2f694
[ "MIT" ]
null
null
null
294.554513
63,980
0.928419
[ [ [ "import numpy as np\nimport scipy.io as sio\nfrom sklearn import svm\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom sklearn import manifold\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.svm import SVC", "_____no_output_____" ], [ "from graph_kernels_lib import WeisfeilerLehmanKernel, fit_n_components", "_____no_output_____" ], [ "ppi = sio.loadmat(\"PPI.mat\")\nppi_graphs = ppi['G'][0]\nppi_labels = ppi['labels'].ravel()", "_____no_output_____" ], [ "n = ppi_labels.shape[0]", "_____no_output_____" ], [ "wl_kernel = WeisfeilerLehmanKernel()", "_____no_output_____" ], [ "K = wl_kernel.eval_similarities(ppi_graphs[:]['am'], 2)", "_____no_output_____" ], [ "D = pairwise_distances(K, metric='euclidean')", "_____no_output_____" ], [ "plt.imshow(D, zorder=2, cmap='Blues', interpolation='nearest')\nplt.colorbar();\nplt.style.use(\"ggplot\")\nplt.show()", "_____no_output_____" ] ], [ [ "# SVM Linear Classifier", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import StratifiedKFold\nstrat_k_fold = StratifiedKFold(n_splits = 10, shuffle = True) #10", "_____no_output_____" ], [ "clf = svm.SVC(kernel=\"linear\", C = 1.0)\nscores_ln = cross_val_score(clf, D, ppi_labels, cv = strat_k_fold)\nprint(str(np.min(scores_ln)) +\" - \"+str(np.mean(scores_ln))+ \" - \" + str(np.max(scores_ln)) + \" - \"+ str(np.std(scores_ln)))", "0.5555555555555556 - 0.763888888888889 - 1.0 - 0.15023130314433286\n" ], [ "PCA_D = PCA(n_components = 2).fit_transform(D)\nplt.plot(np.cumsum(PCA().fit(D).explained_variance_ratio_))\nplt.show()\nnp.cumsum(PCA().fit(D).explained_variance_ratio_)[:3]", "_____no_output_____" ], [ "acidovorax = PCA_D[ppi_labels == 1]\nacidobacteria = PCA_D[ppi_labels == 2]\n\nclf = clf.fit(PCA_D, ppi_labels)\nw = clf.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(np.min(PCA_D), np.max(PCA_D))\nyy = a * xx - (clf.intercept_[0]) / w[1]\n\nplt.figure(figsize=(10,5))\n\n\nax_av = plt.scatter(acidovorax[:, 0], acidovorax[:, 1], color = \"xkcd:red\", marker = \"^\",label = \"Acidovorax\", s = 455, alpha = 0.65) \nax_ab = plt.scatter(acidobacteria[:, 0], acidobacteria[:, 1], color = \"green\", label = \"Acidobacteria\", s = 250, alpha = 0.75)\nsvm_line = plt.plot(xx, yy, color = \"xkcd:sky blue\", linestyle = \"--\", linewidth = 3.0)\n\nplt.axis('tight');\n#plt.grid(True)\nplt.legend(prop={'size': 15})\n\nax_av.set_facecolor('xkcd:salmon')\nax_ab.set_facecolor('xkcd:pale green')\n\nplt.show()\n", "_____no_output_____" ], [ "from mpl_toolkits.mplot3d import Axes3D\nfig = plt.figure(figsize=(10,8))\nax = fig.add_subplot(111, projection='3d')\n\nPCA_D = PCA(n_components = 3).fit_transform(D)\n\nacidovorax = PCA_D[ppi_labels == 1]\nacidobacteria = PCA_D[ppi_labels == 2]\n\nclf = clf.fit(PCA_D, ppi_labels)\nw = clf.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(np.min(PCA_D), np.max(PCA_D))\nyy = a * xx - (clf.intercept_[0]) / w[1]\n\n#plt.figure(figsize=(10,5))\n\n\nax_av = ax.scatter(acidovorax[:, 0], acidovorax[:, 1], acidovorax[:, 2],c = \"xkcd:red\", marker = \"^\",label = \"Acidovorax\", s = 455, alpha = 0.65) \nax_ab = ax.scatter(acidobacteria[:, 0], acidobacteria[:, 1], acidobacteria[:, 2], c = \"green\", label = \"Acidobacteria\", s = 250, alpha = 0.75)\n#svm_line = plt.plot(xx, yy, color = \"xkcd:sky blue\", linestyle = \"--\", linewidth = 3.0)\n\nplt.axis('tight');\n#plt.grid(True)\nplt.legend(prop={'size': 15})\n\nax_av.set_facecolor('xkcd:salmon')\nax_ab.set_facecolor('xkcd:pale green')\nax.view_init(azim = 30, elev = 25)\nplt.show()\n", "_____no_output_____" ] ], [ [ "# Manifold Learning Isomap", "_____no_output_____" ] ], [ [ "n_neighbors = 14#15\nn_components = 2\niso_prj_D = manifold.Isomap(n_neighbors, n_components).fit_transform(D)", "_____no_output_____" ], [ "scores_ln = cross_val_score(clf, iso_prj_D, ppi_labels, cv = strat_k_fold, n_jobs= 8)\nnp.mean(scores_ln)", "_____no_output_____" ] ], [ [ "It seems that manifold learning with Isomap does not improve the performance of our linear svm classifier", "_____no_output_____" ], [ "### Plots for Isomap", "_____no_output_____" ] ], [ [ "acidovorax = iso_prj_D[ppi_labels == 1]\nacidobacteria = iso_prj_D[ppi_labels == 2]\n\nclf = clf.fit(iso_prj_D, ppi_labels)\nw = clf.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(np.min(iso_prj_D), np.max(iso_prj_D))\nyy = a * xx - (clf.intercept_[0]) / w[1]\n\nplt.figure(figsize=(10,5))\n\n\nax_av = plt.scatter(acidovorax[:, 0], acidovorax[:, 1], color = \"xkcd:red\", marker = \"^\",label = \"Acidovorax\", s = 455, alpha = 0.65) \nax_ab = plt.scatter(acidobacteria[:, 0], acidobacteria[:, 1], color = \"green\", label = \"Acidobacteria\", s = 250, alpha = 0.75)\nsvm_line = plt.plot(xx, yy, color = \"xkcd:sky blue\", linestyle = \"--\", linewidth = 3.0)\n\nplt.axis('tight');\n#plt.grid(True)\nplt.legend(prop={'size': 15})\n\nax_av.set_facecolor('xkcd:salmon')\nax_ab.set_facecolor('xkcd:pale green')\n\nplt.show()\n", "_____no_output_____" ] ], [ [ "#### Fit with best n of components", "_____no_output_____" ] ], [ [ "opt_n_components = fit_n_components(D, ppi_labels, manifold.Isomap, n_iteration= 10)", "_____no_output_____" ], [ "opt_iso_prj_D = manifold.Isomap(n_neighbors, opt_n_components).fit_transform(D)", "_____no_output_____" ], [ "scores_ln = cross_val_score(clf, opt_iso_prj_D, ppi_labels, cv = strat_k_fold, n_jobs= 8)\nnp.mean(scores_ln)", "_____no_output_____" ] ], [ [ "# Manifold Learning LocalLinearEmbedding", "_____no_output_____" ] ], [ [ "n_neighbors = 13#15\nn_components = 15\nlle_prj_D = manifold.LocallyLinearEmbedding(n_neighbors, n_components).fit_transform(D)", "_____no_output_____" ], [ "scores_ln = cross_val_score(clf, lle_prj_D, ppi_labels, cv = strat_k_fold, n_jobs= 8)\nnp.mean(scores_ln)", "_____no_output_____" ] ], [ [ "It seems that also manifold learning with LocalLinearEmbedding does not improve the performance of our linear svm classifier", "_____no_output_____" ], [ "### Plots for LLE", "_____no_output_____" ] ], [ [ "acidovorax = lle_prj_D[ppi_labels == 1]\nacidobacteria = lle_prj_D[ppi_labels == 2]\n\nclf = clf.fit(lle_prj_D, ppi_labels)\nw = clf.coef_[0]\na = -w[0] / w[1]\nxx = np.linspace(-0.2,0.25)\nyy = a * xx - (clf.intercept_[0]) / w[1]\nplt.figure(figsize=(10,5))\n\n\nax_av = plt.scatter(acidovorax[:, 0], acidovorax[:, 1], color = \"xkcd:red\", marker = \"^\",label = \"Acidovorax\", s = 455, alpha = 0.65) \nax_ab = plt.scatter(acidobacteria[:, 0], acidobacteria[:, 1], color = \"green\", label = \"Acidobacteria\", s = 250, alpha = 0.75)\nsvm_line = plt.plot(xx, yy, color = \"xkcd:sky blue\", linestyle = \"--\", linewidth = 3.0)\n\nplt.axis('tight');\n#plt.grid(True)\nplt.legend(prop={'size': 15})\n\nax_av.set_facecolor('xkcd:salmon')\nax_ab.set_facecolor('xkcd:pale green')\n\nplt.show()\n", "_____no_output_____" ] ], [ [ "#### Fit with best n of components", "_____no_output_____" ] ], [ [ "opt_n_components = fit_n_components(D, ppi_labels, manifold.LocallyLinearEmbedding, n_neighbors=13, n_iteration= 10)\nopt_n_components", "_____no_output_____" ], [ "opt_lle_prj_D = manifold.LocallyLinearEmbedding(13, opt_n_components).fit_transform(D)", "_____no_output_____" ], [ "scores_ln = cross_val_score(clf, opt_lle_prj_D, ppi_labels, cv = strat_k_fold, n_jobs= 8)\nnp.mean(scores_ln)", "_____no_output_____" ] ], [ [ "# Graphs plots", "_____no_output_____" ] ], [ [ "import networkx as nx\n\nG = nx.from_numpy_matrix(ppi_graphs[10]['am'])\n#pos=nx.spring_layout(G) # positions for all nodes\npos = nx.spring_layout(G, k = 0.9, iterations = 1000)\nnx.draw_networkx_nodes(G, pos, with_labels= False, node_color = \"green\", node_size = 300, alpha = 0.8)\nnx.draw_networkx_edges(G, pos, width = 2, alpha=0.5,edge_color='r')\nplt.axis('off')\n#plt.savefig(\"acidovorax_graph_10.png\") # save as png\nplt.show() # display", "_____no_output_____" ], [ "G = nx.from_numpy_matrix(ppi_graphs[59]['am'])\n#pos=nx.spring_layout(G) # positions for all nodes\npos = nx.spring_layout(G, k = 0.9, iterations = 1000)\nnx.draw_networkx_nodes(G, pos, with_labels= False, node_color = \"green\", node_size = 300, alpha = 0.8)\nnx.draw_networkx_edges(G, pos, width = 2, alpha=0.5,edge_color='r')\nplt.axis('off')\n#plt.savefig(\"Acidobacteria_graph_59.png\") # save as png\nplt.show() # display", "_____no_output_____" ], [ "G = nx.from_numpy_matrix(ppi_graphs[6]['am'])\n#pos=nx.spring_layout(G) # positions for all nodes\npos = nx.spring_layout(G, k = 0.9, iterations = 1000)\nnx.draw_networkx_nodes(G, pos, with_labels= False, node_color = \"green\", node_size = 300, alpha = 0.8)\nnx.draw_networkx_edges(G, pos, width = 2, alpha=0.5,edge_color='r')\nplt.axis('off')\n#plt.savefig(\"acidovorax_graph_2.png\") # save as png\nplt.show() # display", "_____no_output_____" ], [ "G = nx.from_numpy_matrix(ppi_graphs[48]['am'])\n#pos=nx.spring_layout(G) # positions for all nodes\npos = nx.spring_layout(G, k = 0.9, iterations = 1000)\nnx.draw_networkx_nodes(G, pos, with_labels= False, node_color = \"green\", node_size = 300, alpha = 0.8)\nnx.draw_networkx_edges(G, pos, width = 2, alpha=0.5,edge_color='r')\nplt.axis('off')\n#plt.savefig(\"Acidobacteria_graph_48.png\") # save as png\nplt.show() # display", "_____no_output_____" ], [ "node_labels = wl_kernel.extract_graphs_labels(ppi_graphs[:]['am'])\nsize = int(np.max(np.concatenate(node_labels)))\ndegree_component = np.zeros((n, size))\nfor i in range(len(node_labels)):\n for j in node_labels[i]:\n degree_component[i,int(j)-1] += 1\ndegree_component[0]\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb959107ab532c8054b613499b0a3cf4717264d6
464,429
ipynb
Jupyter Notebook
Homework/Homework 2/numpy-exercise.ipynb
selvadevan/nimblebox-week1
72a963d0c1dbac9d61de483351090ba0af3b28f0
[ "MIT" ]
null
null
null
Homework/Homework 2/numpy-exercise.ipynb
selvadevan/nimblebox-week1
72a963d0c1dbac9d61de483351090ba0af3b28f0
[ "MIT" ]
null
null
null
Homework/Homework 2/numpy-exercise.ipynb
selvadevan/nimblebox-week1
72a963d0c1dbac9d61de483351090ba0af3b28f0
[ "MIT" ]
1
2021-02-28T11:58:06.000Z
2021-02-28T11:58:06.000Z
1,623.877622
450,680
0.959425
[ [ [ "# NumPy Practice Exercise", "_____no_output_____" ], [ "In this notebook, you have to use the knowledge that you have gathered from the `numpy-intro` notebook and solve all four Questions to pass the second module.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom grader import grader_1\nnp.random.seed(10) #do not edit", "_____no_output_____" ] ], [ [ "#### Question 1", "_____no_output_____" ], [ "Create two numpy arrays such that `a` should be all integers between 25-35 (inclusive) and `b` should be ten evenly spaced numbers between 1-6 (inclusive). Perform the following operations on these NumPy arrays:\n\n1. Cube (i.e. raise to the power of 3) all the elements in both arrays (element-wise)\n2. Add both the cubed arrays (e.g., [1,2] + [3,4] = [4,6])\n3. Sum the elements with even indices of the added array.\n4. Take the square root of the added array (element-wise square root)\n5. Append `b` to `a`, reshape the appended array so that it is a 4x5, 2d array and store the results in a variable `m`\n6. Shuffle `m` and extract the third and the fourth column of the m matrix. Store the resulting 4x2 matrix in a new variable `m2`.\n", "_____no_output_____" ] ], [ [ "# Your code goes here", "_____no_output_____" ], [ "# Answer", "_____no_output_____" ] ], [ [ "#### Question 2", "_____no_output_____" ], [ "Create two numpy arrays such that `A` should be a 4x3, 2d array with integers randomly chosen from 1 to 11 and `b` should be a 4x1, 2d array with integers randomly chosen from 1 to 11. Using numpy functions and routines solve for x in `Ax = b`. Note, you should use numpy's pseudoinverse function while aligning A and b's dimensions.\n\n```\nExpected answer: x = [[ 0.64303194]\n [ 0.57685743]\n [-0.28759588]]\n```", "_____no_output_____" ] ], [ [ "# Your code goes here", "_____no_output_____" ], [ "# Answer", "_____no_output_____" ] ], [ [ "#### Question 3", "_____no_output_____" ], [ "Create an 1d numpy array `original_data` with 1000 elements and divide that array into two arrays, `train` with 2/3rd of the elements of `original_data` and `test` with remaining 1/3rd elements of `original_data`.", "_____no_output_____" ] ], [ [ "# Your code goes here", "_____no_output_____" ], [ "# Answer", "_____no_output_____" ] ], [ [ "#### Question 4 (Graded)", "_____no_output_____" ], [ "Let `x` be the number of miles a person drives per day and `y` be the dollars spent on buying car fuel (per day). Created two numpy arrays each of size 100 such that represent `x` (number of miles) ranges from 1 to 10 (hint : use np.linspace()) with a uniform noise of (0,1/2) and `y` (money spent in dollars) will be from 1 to 20 (hint : use np.linspace()) with a uniform noise (0,1). Once these arrays are created, find the:\n\n1. Expected value of x and the expected value of y\n2. Variance and co-variance of distributions of x and y\n3. Assuming that number of dollars spent in car fuel is only dependant on the miles driven, by a linear relationship. Write code that uses a linear predictor to calculate a predicted value of y for each x i.e `y_estimated = mx + b`. Refer to the image below for the formulae.\n4. y_pred for each value in x, put the error into an array called y_error\n5. Root mean square error (RMSE)\n\n![Screen%20Shot%202020-08-06%20at%205.07.59%20PM.png](attachment:Screen%20Shot%202020-08-06%20at%205.07.59%20PM.png)\n", "_____no_output_____" ] ], [ [ "# Your code goes here\n# Hint : use np.linspace to generate 100 numbers within the range as specified in the question. \nx= #generate x\nnp.random.seed(0) # do not edit\nx= x + # Add uniform noise using np.random.uniform\ny= #generate y\nnp.random.seed(0) # do not edit\ny=y + # Add uniform noise using np.random.uniform", "_____no_output_____" ], [ "# Answer\ne_x=\ne_y=\nv_x=\nv_y=\n# To calculate covariance of x and y, use the following formula\n# cov(x,y)= Expectation of (x*y) - (expectation of (x) * expectation of (y))\ncov_xy=", "_____no_output_____" ], [ "b=\nm=\ny_estimated=", "_____no_output_____" ] ], [ [ "# To calculate RMSE \n\n![1_lqDsPkfXPGen32Uem1PTNg.png](attachment:1_lqDsPkfXPGen32Uem1PTNg.png)\n", "_____no_output_____" ] ], [ [ "# write code\nrmse = \nprint(rmse)", "_____no_output_____" ] ], [ [ "### Submit assignment\nRun the cell below to grade and submit your assignment", "_____no_output_____" ] ], [ [ "grader_1(rmse)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb959bdb292e23b62f7bc5afe180a50bd2fb3f09
51,226
ipynb
Jupyter Notebook
tutorials/test_handling_GCTX_original.ipynb
Cellular-Longevity/cmapPy
abd4349f28af6d035f69fe8c399fde7bef8dd635
[ "BSD-3-Clause" ]
null
null
null
tutorials/test_handling_GCTX_original.ipynb
Cellular-Longevity/cmapPy
abd4349f28af6d035f69fe8c399fde7bef8dd635
[ "BSD-3-Clause" ]
10
2022-03-14T18:40:45.000Z
2022-03-22T12:45:02.000Z
tutorials/test_handling_GCTX_original.ipynb
Cellular-Longevity/cmapPy
abd4349f28af6d035f69fe8c399fde7bef8dd635
[ "BSD-3-Clause" ]
null
null
null
44.895706
238
0.361613
[ [ [ "\n%load_ext autoreload\n%autoreload 2\nimport cmapPy", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "DATAFOLDER = '/home/ubuntu'\nos.chdir(DATAFOLDER)\n!aws s3 cp s3://bioinformatics-loyal/nf-core_processing/HEALTHSPAN/ADMERA_100_ALL/kraken2_classification/standard_custom_bisulfite/taxonomy_gctx_classified_only/kraken_species_percentage.gctx tmp/\nfile = DATAFOLDER + '/tmp/kraken_species_percentage.gctx'", "download: s3://bioinformatics-loyal/nf-core_processing/HEALTHSPAN/ADMERA_100_ALL/kraken2_classification/standard_custom_bisulfite/taxonomy_gctx_classified_only/kraken_species_percentage.gctx to tmp/kraken_species_percentage.gctx\n" ], [ "\n", "_____no_output_____" ], [ "\nfrom cmapPy.pandasGEXpress.view import view\nnodeNames = view(file)\n# !h5ls -r tmp/kraken_species_percentage.gctx\n", "0/DATA/0/matrix (99, 7086)\n0/META/COL/group (99,)\n0/META/COL/id (99,)\n0/META/ROW/class (7086,)\n0/META/ROW/family (7086,)\n0/META/ROW/genus (7086,)\n0/META/ROW/id (7086,)\n0/META/ROW/kingdom (7086,)\n0/META/ROW/order (7086,)\n0/META/ROW/phylum (7086,)\n0/META/ROW/root (7086,)\n0/META/ROW/species (7086,)\n0/META/ROW/subspecies (7086,)\n0/META/ROW/taxid (7086,)\n" ], [ "from cmapPy.pandasGEXpress.parse import parse\ngctx_df = parse(file)", "_____no_output_____" ], [ "my_col_metadata = parse(file, col_meta_only=True)\nmy_row_metadata = parse(file, row_meta_only=True)", "_____no_output_____" ], [ "gctx_df.meth_df.head()", "_____no_output_____" ], [ "gctx_df.cov_df.head()", "_____no_output_____" ], [ "gctx_df.row_metadata_df.head()", "_____no_output_____" ], [ "gctx_df.col_metadata_df.head()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb95b62c87ed3a8c0b885356511ff4ce5708b489
57,814
ipynb
Jupyter Notebook
Phase_2/review/Phase_2_Review-ANSWERS.ipynb
PipSaysHoot/ds-east-042621-lectures
c228a808067fba83316da949c18022d0077c3021
[ "MIT" ]
1
2021-08-12T21:48:21.000Z
2021-08-12T21:48:21.000Z
Phase_2/review/Phase_2_Review-ANSWERS.ipynb
ismizu/ds-east-042621-lectures
3d962df4d3cb19a4d0c92c8246ec251a5969f644
[ "MIT" ]
null
null
null
Phase_2/review/Phase_2_Review-ANSWERS.ipynb
ismizu/ds-east-042621-lectures
3d962df4d3cb19a4d0c92c8246ec251a5969f644
[ "MIT" ]
null
null
null
36.614313
570
0.436348
[ [ [ "# Phase 2 Review", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom statsmodels.formula.api import ols\n\npd.set_option('display.max_columns', 100)", "_____no_output_____" ] ], [ [ "### Check Your Data … Quickly\nThe first thing you want to do when you get a new dataset, is to quickly to verify the contents with the .head() method.", "_____no_output_____" ] ], [ [ "df = pd.read_csv('movie_metadata.csv')\nprint(df.shape)\ndf.head()", "(5043, 28)\n" ] ], [ [ "## Question 1\n\nA Hollywood executive wants to know how much an R-rated movie released after 2000 will earn. The data above is a sample of some of the movies with that rating during that timeframe, as well as other movies. How would you go about answering her question? Talk through it theoretically and then do it in code.\n\nWhat is the 95% confidence interval for a post-2000 R-rated movie's box office gross?", "_____no_output_____" ] ], [ [ "df.isna().sum()", "_____no_output_____" ], [ "# talk through your answer here\n\n'''\nDrop null values.\n\nFilter dataframe for movies after 2000, content rating is 'R'.\n\nCalculate mean, standard deviation, sample size, and plug those into the confidence interval formula to \nfind the lower and upper bounds of what the executive can expect such a movie to make.\n'''", "_____no_output_____" ], [ "# do it in code here\n\ndf.dropna(subset=['gross'], inplace=True)\n\ndf_2000R = df[(df['title_year'] > 2000) & (df['content_rating'] == 'R')]\n\nmean = df_2000R.gross.mean()\n\nsd = df_2000R.gross.std()\n\nn = df_2000R.gross.count()\n\nmean, sd, n", "_____no_output_____" ], [ "se = sd/n**.5", "_____no_output_____" ], [ "# 95% confidence interval\nmean - 1.96 * (sd / n**.5), mean + 1.96 * (sd / n**.5)", "_____no_output_____" ] ], [ [ "## Question 2a\n\nYour ability to answer the first question has the executive excited and now she has many other questions about the types of movies being made and the differences in those movies budgets and gross amounts.\n\nRead through the questions below and **determine what type of statistical test you should use** for each question and **write down the null and alternative hypothesis for those tests**.\n\n- Is there a relationship between the number of Facebook likes for a cast and the box office gross of the movie?\n- Do foreign films perform differently at the box office than non-foreign films?\n- Of all movies created are 40% rated R?\n- Is there a relationship between the language of a film and the content rating (G, PG, PG-13, R) of that film?\n- Is there a relationship between the content rating of a film and its budget? ", "_____no_output_____" ], [ "# your answer here\n\n### Facebook Likes (cast) and Box Office Gross\n'''\nCorrelation/simple linear regression\n'''\nHo: Beta =0\nHa: Beta != 0\n\n\n### Domestic vs. Foreign and Box Office Gross\n'''\nTwo-sample T-Test\n'''\nHo: mu_domestic = mu_foreign\nHa: mu_domestic != mu_foreign\n\n\n### Rated R\n'''\nOne-sample Z-Test of proportion\n'''\n\nHo: P = 0.4\nHa: P != 0.4\n\n### Language and Content rating\n'''\nChi-square\n'''\nHo: distributions are equal\nHa: distributions are not equal\n\n\n### Content rating and budget\n'''\nANOVA\n'''\n\nHo: mu_r = mu_PG13 = mu_PG = mu_G\n \nHa: They are not all equal", "_____no_output_____" ], [ "## Question 2b\n\nCalculate the answer for the second question:\n\n- Do foreign films perform differently at the box office than non-foreign films?", "_____no_output_____" ] ], [ [ "df.head()", "_____no_output_____" ], [ "import scipy\nimport numpy as np", "_____no_output_____" ], [ "USA_array = np.array(df[df.country == \"USA\"].gross)\n\nForeign_array = np.array(df[df.country != \"USA\"].gross)\n\n\n\nscipy.stats.ttest_ind(USA_array,Foreign_array, nan_policy = 'omit')", "_____no_output_____" ], [ "# your answer here\ndf_foreign = df[df.country != 'USA'].dropna(subset=['country'])\ndf_domestic = df[df.country == 'USA']\n\ndf_foreign.shape, df_domestic.shape", "_____no_output_____" ], [ "from scipy.stats import ttest_ind", "_____no_output_____" ], [ "ttest_ind(df_foreign.gross, df_domestic.gross)", "_____no_output_____" ], [ "'''\nYes! There is a statistically significant difference between the box office gross of foreign and domestic films.\n'''", "_____no_output_____" ] ], [ [ "## Question 3\n\nNow that you have answered all of those questions, the executive wants you to create a model that predicts the money a movie will make if it is released next year in the US. She wants to use this to evaluate different scripts and then decide which one has the largest revenue potential. \n\nBelow is a list of potential features you could use in the model. Create a new frame containing only those variables.\n\nWould you use all of these features in the model?\n\nIdentify which features you might drop and why.\n\n*Remember you want to be able to use this model to predict the box office gross of a film **before** anyone has seen it.*", "_____no_output_____" ], [ "- **budget**: The amount of money spent to make the movie\n- **title_year**: The year the movie first came out in the box office\n- **years_old**: How long has it been since the movie was released\n- **genres**: Each movie is assigned one genre category like action, horror, comedy\n- **imdb_score**: This rating is taken from Rotten tomatoes, and is the average rating given to the movie by the audience\n- **actor_1_facebook_likes**: The number of likes that the most popular actor in the movie has\n- **cast_total_facebook_likes**: The sum of likes for the three most popular actors in the movie\n- **language**: the original spoken language of the film\n", "_____no_output_____" ] ], [ [ "df.loc[0, 'genres'].split('|')", "_____no_output_____" ], [ "df['genres'] = df.genres.apply(lambda x: x.split('|')[0])\ndf.genre.head()", "_____no_output_____" ], [ "df.columns", "_____no_output_____" ], [ "# your answer here\n\nmodel_data = df[[\n 'gross', 'budget', 'actor_1_facebook_likes', 'cast_total_facebook_likes', \n 'title_year', 'content_rating', 'genres'\n]]\n\nmodel_data.corr()\n\n# '''\n# drop either `cast_total_facebook_likes` or `actor_1_facebook_likes` due to multicollinearity\n# '''", "_____no_output_____" ], [ "'''\n`num_critic_for_reviews` and `imdb_score` can't be known before the movie is released.\n\nwe'll drop them from the model.\n\ndrop either `cast_total_facebook_likes` or `actor_1_facebook_likes` due to multicollinearity.\n'''", "_____no_output_____" ] ], [ [ "## Question 4a\n\nCreate the following variables:\n\n- `years_old`: The number of years since the film was released.\n- Dummy categories for each of the following ratings:\n - `G`\n - `PG`\n - `R`\n \nOnce you have those variables, create a summary output for the following OLS model:\n\n`gross~cast_total_facebook_likes+budget+years_old+G+PG+R`", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "model_data['years_old'] = 2020 - model_data.title_year\n\nmodel_data = pd.get_dummies(model_data, columns=['content_rating']).drop(columns='content_rating_PG-13')", "/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "model_data.columns", "_____no_output_____" ], [ "from statsmodels.formula.api import ols writing out the formula\n\nfrom statsmodels.api import OLS using x,y", "_____no_output_____" ], [ "# your answer here\nlr_model = ols(formula='gross~cast_total_facebook_likes+budget+years_old+G+PG+R', data=model_data).fit()\n\nlr_model.summary()", "_____no_output_____" ] ], [ [ "## Question 4b\n\nBelow is the summary output you should have gotten above. Identify any key takeaways from it.\n- How ‘good’ is this model?\n- Which features help to explain the variance in the target variable? \n - Which do not? \n", "_____no_output_____" ], [ "<img src=\"ols_summary.png\" style=\"withd:300px;\">", "_____no_output_____" ] ], [ [ "'''\n The model is not very good in that it only explains about 7.9% (13.9% in mine) of the variation \n in the data around the mean. (based on R-squared value)\n \n In the photo, Total Facebook likes, budget, age, PG rating, and R rating help to explain the variance, \n whereas G rating does not. (based on p-values)\n \n In mine, everything other than years old helps to explain the variance.\n\n'''", "_____no_output_____" ] ], [ [ "## Question 5\n\n**Bayes Theorem**\n\nAn advertising executive is studying television viewing habits of married men and women during prime time hours. Based on the past viewing records he has determined that during prime time wives are watching television 60% of the time. It has also been determined that when the wife is watching television, 40% of the time the husband is also watching. When the wife is not watching the television, 30% of the time the husband is watching the television. Find the probability that if the husband is watching the television, the wife is also watching the television.", "_____no_output_____" ] ], [ [ "# your answer here\n\n''' \nP(A) = Probability wife is watching tv\nP(B) = Probability husband is watching tv\nP(A|B) = Probability wife is watching tv given husband is\nP(B|A) = Probability husband is watching tv given wife is\n'''\n\np_A = 0.6\np_notA = 1 - p_A\np_B_given_A = 0.4\np_B_given_notA = 0.3\n\np_A_given_B = (p_B_given_A * p_A) / (p_B_given_A * p_A + p_B_given_notA * p_notA)\n\np_A_given_B", "_____no_output_____" ] ], [ [ "## Question 6\n\nExplain what a Type I error is and how it relates to the significance level when doing a statistical test. ", "_____no_output_____" ] ], [ [ "# your answer here\n'''\nA Type I error occurs when you reject the null hypothesis even though the null hypothesis is True.\n\nThe likelihood of a Type I error is directly related to changes in the significance level. If you\nincrease the significance level, the likelihood of a Type I error also increases and vice versa.\n\nIf our significane lecel is 95%, that means we have a 5% chance of making a type one error.\n'''", "_____no_output_____" ] ], [ [ "## Question 7\n\nHow is the confidence interval for a sample related to a one sample t-test?", "_____no_output_____" ], [ "The range of a confidence interval sets the limits of the values for which you would reject a null hypothesis. For example, if a confidence interval for a population mean was 100 to 105, we would reject any null hypothesis where the proposed population mean is outside of that range. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb95b9c712dd4be04a05768af75c1cdf501caa25
59,051
ipynb
Jupyter Notebook
math/Stirling/01_Stirling_log_formula.ipynb
starpentagon/python_scripts
37a484f2657712fe0ea885f58f7f49ee28d077bf
[ "MIT" ]
2
2019-02-22T09:39:58.000Z
2020-11-19T22:46:02.000Z
math/Stirling/01_Stirling_log_formula.ipynb
starpentagon/python_scripts
37a484f2657712fe0ea885f58f7f49ee28d077bf
[ "MIT" ]
null
null
null
math/Stirling/01_Stirling_log_formula.ipynb
starpentagon/python_scripts
37a484f2657712fe0ea885f58f7f49ee28d077bf
[ "MIT" ]
7
2018-09-10T06:11:16.000Z
2021-08-16T07:50:02.000Z
125.908316
16,788
0.855582
[ [ [ "# Stirlingの公式(対数近似)\n* $\\log n! \\sim n\\log n - n$\n* $n!$はおおよそ$\\left(\\frac{n}{e}\\right)^n$になる\n* 参考: [スターリングの公式(対数近似)の導出](https://starpentagon.net/analytics/stirling_log_formula/)", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## $\\log n!$の上からの評価", "_____no_output_____" ] ], [ [ "MIN_X = 0.5\nMAX_X = 10\n\nx = np.linspace(MIN_X, MAX_X, 100)\ny = np.log(x)\n\np = plt.plot(x, y, label='$\\log x$')\np = plt.hlines([0], MIN_X, MAX_X)\n\np = plt.xlim(MIN_X, MAX_X-0.5)\np = plt.xticks(range(1, MAX_X+1))\np = plt.ylim([-0.2, 2.3])\n\n# 面積log kの矩形を描画\nfor k in range(2, MAX_X):\n p = plt.vlines(k, 0, np.log(k), linestyles='dashed')\n p = plt.hlines(np.log(k), k, k+1, linestyles='dashed')\n\np = plt.legend()\nplt.show(p)", "_____no_output_____" ] ], [ [ "## $\\log n!$の下からの評価", "_____no_output_____" ] ], [ [ "MIN_X = 0.5\nMAX_X = 10\n\nx = np.linspace(MIN_X, MAX_X, 100)\ny = np.log(x)\n\np = plt.plot(x, y, label='$\\log x$')\np = plt.hlines([0], MIN_X, MAX_X)\n\np = plt.xlim(MIN_X, MAX_X-0.5)\np = plt.xticks(range(1, MAX_X+1))\np = plt.ylim([-0.2, 2.3])\n\n# 面積log kの矩形を描画\nfor k in range(2, MAX_X):\n p = plt.vlines(k-1, 0, np.log(k), linestyles='dashed')\n p = plt.hlines(np.log(k), k-1, k, linestyles='dashed')\n\np = plt.vlines(MAX_X-1, 0, np.log(MAX_X), linestyles='dashed')\n\np = plt.legend()\nplt.show(p)", "_____no_output_____" ] ], [ [ "## $n \\log n - n$の近似精度", "_____no_output_____" ] ], [ [ "def log_factorial(n):\n '''log n!を返す'''\n val = 0.0\n \n for i in range(1, n+1):\n val += np.log(i)\n \n return val", "_____no_output_____" ], [ "# test of log_factorial\neps = 10**-5\n\nassert abs(log_factorial(1) - 0.0) < eps\nassert abs(log_factorial(2) - np.log(2)) < eps\nassert abs(log_factorial(5) - np.log(120)) < eps", "_____no_output_____" ], [ "def log_factorial_approx(n):\n '''log n!の近似: n log n - nを返す'''\n return n * np.log(n) - n", "_____no_output_____" ], [ "# test of log_factorial_approx\nassert abs(log_factorial_approx(1) - (-1)) < eps\nassert abs(log_factorial_approx(2) - (2 * np.log(2) - 2)) < eps", "_____no_output_____" ], [ "# log_factorial, log_factorial_approxをplot\nn_list = range(1, 50+1)\ny_fact = [log_factorial(n) for n in n_list]\ny_approx = [log_factorial_approx(n) for n in n_list]\n\np = plt.plot(n_list, y_fact, label='$\\log n!$')\np = plt.plot(n_list, y_approx, label='$n \\log n - n$')\n\np = plt.legend()\n\nplt.show(p)", "_____no_output_____" ], [ "# 近似精度を評価\nn_list = [5, 10, 20, 50, 100, 1000]\n\napprox_df = pd.DataFrame()\n\napprox_df['n'] = n_list\napprox_df['log n!'] = [log_factorial(n) for n in n_list]\napprox_df['n log(n)-n'] = [log_factorial_approx(n) for n in n_list]\napprox_df['error(%)'] = 100 * (approx_df['log n!'] - approx_df['n log(n)-n']) / approx_df['log n!']", "_____no_output_____" ], [ "pd.options.display.float_format = '{:.1f}'.format\napprox_df", "_____no_output_____" ] ], [ [ "## $n!$と$\\left(\\frac{n}{e}\\right)^n$の比較", "_____no_output_____" ] ], [ [ "n_list = [5, 10, 20, 50, 100]\n\napprox_df = pd.DataFrame()\n\napprox_df['n'] = n_list\napprox_df['n!'] = [np.exp(log_factorial(n)) for n in n_list]\napprox_df['(n/e)^n'] = [np.exp(log_factorial_approx(n)) for n in n_list]\napprox_df['error(%)'] = 100 * (approx_df['n!'] - approx_df['(n/e)^n']) / approx_df['n!']", "_____no_output_____" ], [ "pd.options.display.float_format = None\npd.options.display.precision = 2\n\napprox_df", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb95c9746ba5444708acfeb01f0efb8feb6d3aa1
196,673
ipynb
Jupyter Notebook
exploration-notebooks/planned_dams_africa.ipynb
lherwehe/global-dam-impacts
57de90a13c3def3b60f2f63af9609bc269c8f955
[ "BSD-3-Clause" ]
null
null
null
exploration-notebooks/planned_dams_africa.ipynb
lherwehe/global-dam-impacts
57de90a13c3def3b60f2f63af9609bc269c8f955
[ "BSD-3-Clause" ]
null
null
null
exploration-notebooks/planned_dams_africa.ipynb
lherwehe/global-dam-impacts
57de90a13c3def3b60f2f63af9609bc269c8f955
[ "BSD-3-Clause" ]
null
null
null
99.986274
53,468
0.76795
[ [ [ "# Final Project: Earth Analytics Python Course, Spring 2020\nSteph Shepherd & Lauren Herwehe\n\nA Big Dam Problem:\nGlobal Dam Watch (http://globaldamwatch.org/) maintains a database of existing (GRandD)and future (FHReD) dams across the globe. In this project we explore the future dams database by continent and country, identifying any proposed dams that will potentially impact Ramsar sites - wetlands designated of critical importance under the Ramsar Convention (1971). ", "_____no_output_____" ], [ "## Import packages, define functions, and acquire data", "_____no_output_____" ] ], [ [ "# Imports\nimport warnings\nimport os\nimport sys\n\nimport numpy as np\nimport numpy.ma as ma\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport matplotlib.lines as mlines\n\nimport geopandas as gpd\nfrom geopandas import GeoDataFrame as gdf\nfrom geopandas import GeoSeries as gs\nfrom shapely.geometry import Point, Polygon\n\nimport contextily as ctx\nimport earthpy as et\nimport earthpy.plot as ep", "_____no_output_____" ], [ "# Check path and set working directory.\nwd_path = os.path.join(et.io.HOME, 'earth-analytics', 'data')\nif os.path.exists(wd_path):\n os.chdir(wd_path)\nelse:\n print(\"Path does not exist\")", "_____no_output_____" ], [ "# Download Data stored on figshare\n# Ramsar Sites\net.data.get_data(url=\"https://ndownloader.figshare.com/files/22507082\")\n\n# Future dams\net.data.get_data(url=\"https://ndownloader.figshare.com/files/22486157\")\n\n# Country boundaries\net.data.get_data(url=\"https://ndownloader.figshare.com/files/22507058\")", "_____no_output_____" ], [ "# Open the ramsar shapefile with geopandas\nramsar_all = gpd.read_file(os.path.join(\n \"earthpy-downloads\", \"ramsar-site-data\", \"ramsar-boundaries\",\n \"features_publishedPolygon.shp\"))\n\n# Check the crs of the ramsar sites\nprint(ramsar_all.crs)\n\n# Open the dams csv files with pandas\nfname = os.path.join(\"earthpy-downloads\", \"future_dams_2015.csv\")\ndf = pd.read_csv(fname)\n\n# Covert the pandas dataframe to a shapefile for plotting\n# Set output path for shp\ndams_path = os.path.join('earthpy-downloads', 'fhred-proposed-dams')\nif not os.path.exists(dams_path):\n os.mkdir(dams_path)\n\n# Define the geometry for the points\ngeometry = [Point(xy) for xy in zip(df.Lon_Cleaned, df.LAT_cleaned)]\ncrs = {'init': 'epsg:4326'}\ngeo_df = gdf(df, crs=crs, geometry=geometry)\ngeo_df.to_file(driver='ESRI Shapefile', filename=os.path.join(\n dams_path, 'proposed_dams.shp'))\n\n# Open the proposed dams shapefile with geopandas\ndams_all = gpd.read_file(os.path.join(dams_path, \"proposed_dams.shp\"))\n\n# Pull only the columns that we need from each gdf to save processing time\nproposed_dams = dams_all[['Country',\n 'Continent', 'Major Basi', 'Stage', 'geometry']]\nramsar_areas = ramsar_all[['country_en', 'geometry']]\n\n# Open country borders shapefile for adding boundary of study area\ncountry_borders_path = os.path.join(\"earthpy-downloads\", \"country-borders\",\n \"99bfd9e7-bb42-4728-87b5-07f8c8ac631c2020328-1-1vef4ev.lu5nk.shp\")\ncountry_borders = gpd.read_file(country_borders_path)", "{'init': 'epsg:4326'}\n" ] ], [ [ "# Figures 2-3: Plots of Future Dams by Continent and Selected Countries", "_____no_output_____" ] ], [ [ "# Getting Number of Dams by Continent and Development State\n# Extact the columns needed for analysis\ndams_continent = dams_all[['Continent', 'Country', 'Stage']]\n\n# Group and count data by stage.\ndams_stage = dams_continent.groupby(['Continent', 'Country'])[\n ['Stage']].count().reset_index()\n\ndams_stage", "_____no_output_____" ], [ "# Group and count data by stage.\ndams_stage_alt = dams_continent.groupby(['Continent', 'Country', 'Stage'])[\n ['Stage']].count().reset_index()\n\ndams_stage_alt", "_____no_output_____" ], [ "# PLOT - NUMBER OF DAMS PROPOSED VS UNDER CONSTRUCTION BY CONTINENT\n# Create a bar plot of the dams by continent and stage of process.\nlabels = ['Africa', 'Asia', 'Europe', 'N. America', 'Oceania', 'S. America']\nproposed = [179, 937, 611, 143, 7, 1188]\nunder_const = [21, 424, 41, 34, 1, 114]\n\nx = np.arange(len(labels)) # the label locations\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots(figsize=(10, 10))\nrects1 = ax.bar(x - width/2, proposed, width, label='Proposed')\nrects2 = ax.bar(x + width/2, under_const, width, label='Under Construction')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Count', size=15)\nax.set_title('Figure 2: Future Dams by Continent, 2015', size=20)\nax.set_xticks(x)\nax.set_xticklabels(labels, size=15, rotation=45)\nax.legend()\n\nautolabel(rects1)\nautolabel(rects2)\n\nax.text(0.5, -0.2, \"Data Source: Global Dam Watch Future Hydropower \"\n \"Reservoirs and Dams Database (http://globaldamwatch.org/fhred/)\",\n size=12, ha=\"center\", transform=ax.transAxes)\nfig.tight_layout()\n\nplt.show()", "_____no_output_____" ], [ "# Extract data by continent.\nafrica = dams_continent[dams_continent[\"Continent\"] == \"Africa\"]\n\n# Group and count country data by stage.\nafrica_stage = africa.groupby(['Country', 'Stage'])[['Stage']].count()\n\nafrica_stage", "_____no_output_____" ], [ "#Extract data by continent.\nasia = dams_continent[dams_continent[\"Continent\"] == \"Asia\"]\n\n#Group and count country data by stage.\nasia_stage = asia.groupby(['Country','Stage'])[['Stage']].count()\n\nasia_stage", "_____no_output_____" ], [ "#Extract data by continent.\neurope = dams_continent[dams_continent[\"Continent\"] == \"Europe\"]\n\n#Group and count country data by stage.\neurope_stage = europe.groupby(['Country','Stage'])[['Stage']].count()\n\neurope_stage", "_____no_output_____" ], [ "#Extract data by continent.\nn_america = dams_continent[dams_continent[\"Continent\"] == \"North America\"]\n\n#Group and count country data by stage.\nn_america_stage = n_america.groupby(['Country','Stage'])[['Stage']].count()\n\nn_america_stage", "_____no_output_____" ], [ "#Extract data by continent.\noceania = dams_continent[dams_continent[\"Continent\"] == \"Oceania\"]\n\n#Group and count country data by stage.\noceania_stage = oceania.groupby(['Country','Stage'])[['Stage']].count()\n\noceania_stage", "_____no_output_____" ], [ "#Extract data by continent.\ns_america = dams_continent[dams_continent[\"Continent\"] == \"South America\"]\n\n#Group and count country data by stage.\ns_america_stage = s_america.groupby(['Country','Stage'])[['Stage']].count()\n\ns_america_stage", "_____no_output_____" ], [ "# Create a bar plot of the dams by countries in Africa comparing stage of process.\nafrica_labels = ['Benin', 'Burkina Faso', 'Gabon', 'Guinea', 'Malawi', 'Mali', 'Morocco',\n 'Mozambique', 'Namibia', 'Niger', 'Nigeria', 'Rwanda', 'South Africa', 'Zimbabwe']\nafrica_proposed = [6, 2, 1, 23, 2, 12, 0, 3, 3, 1, 1, 0, 3, 2]\nafrica_under_const = [0, 0, 1, 0, 0, 1, 2, 1, 0, 1, 2, 1, 0, 1]\n\nx = np.arange(len(africa_labels)) # the label locations\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots(figsize=(10, 10))\nrects1 = ax.bar(x - width/2, africa_proposed, width, label='Proposed')\nrects2 = ax.bar(x + width/2, africa_under_const,\n width, label='Under Construction')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Count', size=15)\nax.set_title(\n 'Figure 3A: Future Dam Construction by Selected Countries in Africa', size=20)\nax.set_xticks(x)\nax.set_xticklabels(africa_labels, size=15, rotation=45)\nax.legend()\n\n\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its value.\"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n\nautolabel(rects1)\nautolabel(rects2)\n\nax.text(0.5, -0.2, \"Data Source: Global Dam Watch Future Hydropower Reservoirs \"\n \"and Dams Database (http://globaldamwatch.org/fhred/)\",\n size=12, ha=\"center\", transform=ax.transAxes)\n\nfig.tight_layout()\n\nplt.show()", "_____no_output_____" ], [ "# Create a bar plot of the dams by country in N. America comparing stage of process.\nna_labels = ['Belize', 'Canada', 'Costa Rica', 'El Salvador', 'Guatemala', 'Haiti', 'Honduras', 'Mexico', 'Nicaragua', 'Panama', 'United States']\nna_proposed = [1, 26, 40, 1, 0, 1, 0, 4, 17, 43, 10]\nna_under_const = [0, 8, 3, 0, 2, 0, 2, 3, 1, 15, 0]\n\nx = np.arange(len(na_labels)) # the label locations\nwidth = 0.35 # the width of the bars\n\nfig, ax = plt.subplots(figsize=(10, 10))\nrects1 = ax.bar(x - width/2, na_proposed, width, label='Proposed')\nrects2 = ax.bar(x + width/2, na_under_const, width, label='Under Construction')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_ylabel('Count', size=15)\nax.set_title('Figure 3B: Future Dam Construction by Countries in N. America', size=20)\nax.set_xticks(x)\nax.set_xticklabels(na_labels, size=15, rotation=45)\nax.legend()\n\nautolabel(rects1)\nautolabel(rects2)\n\nfig.tight_layout()\n\nplt.show()", "_____no_output_____" ] ], [ [ "# Overlay future dams and Ramsar sites datasets for Africa.", "_____no_output_____" ] ], [ [ "# Change the datas CRS to projected for Africa (WGS 84 World Mercator)\n# To make this data more accurate, for the next course we can create \n#list of EPSG for each country in Africa to include in function\nproposed_dams = proposed_dams.to_crs('epsg:3765')\nramsar_areas = ramsar_areas.to_crs('epsg:3765')", "C:\\Users\\lherwehe\\Miniconda3\\envs\\earth-analytics-python\\lib\\site-packages\\pyproj\\crs.py:77: FutureWarning: '+init=<authority>:<code>' syntax is deprecated. '<authority>:<code>' is the preferred initialization method.\n return _prepare_from_string(\" \".join(pjargs))\nC:\\Users\\lherwehe\\Miniconda3\\envs\\earth-analytics-python\\lib\\site-packages\\pyproj\\crs.py:77: FutureWarning: '+init=<authority>:<code>' syntax is deprecated. '<authority>:<code>' is the preferred initialization method.\n return _prepare_from_string(\" \".join(pjargs))\n" ], [ "# Get dam impact by African country\n# List of African country names\n# Data cleaning issues: Removed 'Côte d'Ivoire' bc in Ramsar dataset it's called Cite D'ivore and don't know how to deal with additional ' in a string; also removed Congo bc in Ramsar it's called Congo & Democratic Republic of Congo and in FhRED it's called Congo, Rep.\nafrica_cntry = ['Algeria', 'Angola', 'Benin', 'Botswana', 'Burkina Faso',\n 'Burundi', 'Cabo Verde', 'Cameroon', 'Central African Republic',\n 'Chad', 'Comoros', 'Djibouti', 'Equatorial Guinea', 'Eritrea',\n 'Ethiopia', 'Gabon', 'Gambia', 'Ghana', 'Guinea', 'Guinea-Bissau',\n 'Kenya', 'Lesotho', 'Liberia', 'Libya', 'Madagascar', 'Malawi', \n 'Mali', 'Mauritania', 'Mauritius', 'Morocco', 'Mozambique', 'Namibia', \n 'Niger', 'Nigeria', 'Rwanda', 'Sao Tome and Principe', 'Senegal', \n 'Seychelles', 'Sierra Leone', 'Somalia', 'South Africa', 'South Sudan',\n 'Sudan', 'Tanzania', 'Togo', 'Tunisia', 'Uganda', 'Zambia', 'Zimbabwe']\n\n# Empty Africa dict\nafrica_dams = {}\n\n# Append dam_impact function data to africa_dams\nfor i in africa_cntry:\n try:\n africa_dams[i] = {\"5km Buffer Area\": dam_impact(\n 5, i), \"10km Buffer Area\": dam_impact(10, i)}\n except:\n dams = None", "_____no_output_____" ], [ "# Turn it into a pandas dataframe for plotting\nafrica_df = pd.DataFrame.from_dict(africa_dams)\n\n# Some values in the dataframe are zero and some are NaN, make it the same\nafrica_df = africa_df.fillna('None')\nafrica_df.replace({0: 'None'})", "_____no_output_____" ], [ "#Plot data to illustrate which countires have potential imapcts from proposed dams.\n# Create legend so only countries with potential impacts are listed.\nblue_patch = mpatches.Patch(color='dodgerblue', label='Beinin')\ngreen_patch = mpatches.Patch(color='green', label='Gabon')\nred_patch = mpatches.Patch(color='red', label='Guinea')\nteal_patch = mpatches.Patch(color='c', label='Niger')\n\n# Create the figure\nfig, ax = plt.subplots(figsize=(8, 8))\nafrica_df.plot(ax=ax, kind='barh', stacked=True, legend=True)\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\nax.set_xlabel('Affected Area (km)', size=15)\nax.set_title('Figure 4: Dam Impact on Ramsar Areas in Africa', size=20)\nax.text(0.5, -0.2, \"Data Sources: Global Dam Watch Future Hydropower Reservoirs \"\n \"and Dams Database (http://globaldamwatch.org/fhred/),\\nRamsar Sites \"\n \"Information Service (https://rsis.ramsar.org/)\",\n size=12, ha=\"center\", transform=ax.transAxes)\nax.legend(handles=[blue_patch, green_patch, red_patch, teal_patch],\n fontsize=15,\n frameon=True,\n loc=('lower right'),\n title=\"Country\")", "_____no_output_____" ] ], [ [ "# Map buffer results for Guinea", "_____no_output_____" ] ], [ [ "# Analyze Guinea\n# Pull only the data for Guinea\nproposed_dams_guin = proposed_dams[proposed_dams['Country'] == \"Guinea\"]\nramsar_areas_guin = ramsar_areas[ramsar_areas['country_en'] == \"Guinea\"]\nguinea_border = country_borders[country_borders['CNTRY_NAME'] == \"Guinea\"]\n\n# Get the CRS right for plotting\nproposed_dams_guin = proposed_dams_guin.to_crs('epsg:3462')\nramsar_areas_guin = ramsar_areas_guin.to_crs('epsg:3462')\nguinea_border = guinea_border.to_crs('epsg:3462')\n\n# Buffer the dams to 5km & 10km for plotting\nproposed_dams_guin_5k_buff = proposed_dams_guin.buffer(5000)\nproposed_dams_guin_10k_buff = proposed_dams_guin.buffer(10000)", "_____no_output_____" ], [ "# Create a map of the dams and the ramsar sites for Guinea\nblack_line = mlines.Line2D([], [], color='black', label='Country Border')\nyellow_patch = mpatches.Patch(color='yellow', label='Ramsar Area')\ngreen_circle = mlines.Line2D([], [], color='white', marker='o',\n markerfacecolor='forestgreen', markersize=18, \n label='10km Buffer')\nlime_circle = mlines.Line2D([], [], color='white', marker='o',\n markerfacecolor='lime', markersize=12, \n label='5km Buffer')\nred_dot = mlines.Line2D([], [], color='white', marker='o',\n markerfacecolor='red', label='Proposed Dam Site')\n\nfig, ax = plt.subplots(figsize=(15, 15))\nramsar_areas_guin.plot(ax=ax, facecolor='yellow')\nproposed_dams_guin_10k_buff.plot(facecolor='forestgreen',\n ax=ax)\nproposed_dams_guin_5k_buff.plot(facecolor='lime',\n ax=ax)\nproposed_dams_guin.plot(ax=ax,\n markersize=5,\n color='red')\nguinea_border.plot(ax=ax, color=\"none\", edgecolor=\"black\", linewidth=2)\nax.legend(handles=[black_line, yellow_patch, green_circle, lime_circle, red_dot],\n fontsize=15,\n frameon=True,\n loc=('upper right'),\n title=\"LEGEND\")\nctx.add_basemap(ax, url=ctx.providers.Stamen.Terrain, zoom=0)\nax.set_axis_off()\nax.set_title(\n 'Figure 5: Guinea Ramsar Areas, Proposed Dams, and Dam Buffer Areas', size=20)\nax.text(0.5, -0.1, \"Data Sources: Global Dam Watch Future Hydropower Reservoirs \"\n \"and Dams Database (http://globaldamwatch.org/fhred/), \\n Ramsar Sites \"\n \"Information Service (https://rsis.ramsar.org/)\",\n size=12, ha=\"center\", transform=ax.transAxes)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb95cd6a29b522fd7f92e06db661b0bb7573981f
10,873
ipynb
Jupyter Notebook
Coba.ipynb
arthurholong/mywind
8517bd0535d40519434f0ab575221cef7d95b185
[ "BSD-2-Clause" ]
null
null
null
Coba.ipynb
arthurholong/mywind
8517bd0535d40519434f0ab575221cef7d95b185
[ "BSD-2-Clause" ]
null
null
null
Coba.ipynb
arthurholong/mywind
8517bd0535d40519434f0ab575221cef7d95b185
[ "BSD-2-Clause" ]
null
null
null
82.371212
7,349
0.815322
[ [ [ "<a href=\"https://colab.research.google.com/github/arthurholong/mywind/blob/master/Coba.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "x = 1 + 20\nprint(x)", "21\n" ], [ "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom google.colab import files\nimport io\n\nuploaded = files.upload()\ncoba = pd.read_csv(io.StringIO(uploaded['bookings.csv'].decode('utf-8')))", "_____no_output_____" ], [ "booking", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
cb95e04a37a9cf656176829c3b7d8fa6e73e90ee
14,003
ipynb
Jupyter Notebook
20200125/.ipynb_checkpoints/20190125_Slide-checkpoint.ipynb
Miura55/MLNagoya_LT
1d323443a3593a32ef30057b94c79266dbd801d4
[ "MIT" ]
null
null
null
20200125/.ipynb_checkpoints/20190125_Slide-checkpoint.ipynb
Miura55/MLNagoya_LT
1d323443a3593a32ef30057b94c79266dbd801d4
[ "MIT" ]
null
null
null
20200125/.ipynb_checkpoints/20190125_Slide-checkpoint.ipynb
Miura55/MLNagoya_LT
1d323443a3593a32ef30057b94c79266dbd801d4
[ "MIT" ]
null
null
null
24.101549
99
0.479254
[ [ [ "%%HTML\n<link rel=\"stylesheet\" type=\"text/css\" href=\"style/template.css\">", "_____no_output_____" ] ], [ [ "# 今さらOpen MVを触ってみた ーその2ー\n\n## K.Miura", "_____no_output_____" ], [ "# 自己紹介\n- 三浦 耕生(こうき)\n- 大学院生、ロボット工学専攻\n- TechAcademyのジュニアメンター\n- Twitter: @k_miura_io\n- Facebook: koki.miura05", "_____no_output_____" ], [ "# スライドのリンク\n", "_____no_output_____" ], [ " ※このスライドはjupyter-notebookを使用しています", "_____no_output_____" ] ], [ [ "print(\"Hello World\")", "Hello World\n" ] ], [ [ "# さて、本題です", "_____no_output_____" ], [ "# 前回までのあらすじ\n- kickstarterでOpen MVを買ったので遊んでみた\n- 小さいのに顔認識がサクサク動いて面白かった\n<div align=\"center\">\n<img src=\"./imgs/IMG_7078.jpeg\" width=30%>\n</div>", "_____no_output_____" ], [ "# でその翌日", "_____no_output_____" ], [ "# 東京でイベントにてブース出展\n- 朝一の新幹線で東京で1日IBMのブースでお手伝い(詳しくは懇親会で)\n- その帰りに久々に秋葉原を散策してたらM5Stickを発見\n- 前から気になってたしつい買ってしまった\n<div align=\"center\">\n<img src=\"./imgs/img001.jpeg\" width=15%>\n</div>", "_____no_output_____" ], [ "# というわけで", "_____no_output_____" ], [ "# ~今さらOpen MVを触ってみた ーその2ー~\n# 今さらM5Stick-Vを触ってみた\n\n## K.Miura", "_____no_output_____" ], [ "# Maker界隈でアツイM5シリーズ\n- 様々な機種があり、Twitterで遊んでみた投稿が多く存在する\n- M5Stick-Vでも画像認識を使った作品が豊富にある\n\n<div align=\"center\">\n <img src=\"https://media.giphy.com/media/Tfp1lxLUrStcYv2Cud/giphy.gif\" width=\"15%\">\n</div>", "_____no_output_____" ], [ "# どうやらM5Stick-V用のモデルを作成するサイトがあるらしい\n- その名も**V-Training**\n- トレーニング用のファームウェアで画像を撮影して、サイトにアップロードすると学習モデルをダウンロードして自作の画像分類機を簡単にできるらしい\n<div align=\"center\">\n <img src=\"./imgs/V-traning.png\" width=\"50%\">\n</div>", "_____no_output_____" ], [ "# 使ってみた", "_____no_output_____" ], [ "# その前に今回のネタ\n- 最近カルロス・ゴーンが何かと話題になっている\n- カルロス・ゴーンってMr.Beanと顔がよく似ている\n- この2人の顔を識別してみよう\n<div align=\"center\">\n<img src=\"./imgs/0035.jpg\">\n</div>", "_____no_output_____" ], [ "# データ作成\n- スクレイピングでgoogleの画像検索で出てきた画像を取得\n- V-Traningを使う場合は1クラスあたり最低35枚の画像が必要\n- 最大10クラスを学習させることが可能", "_____no_output_____" ] ], [ [ "import argparse\nimport json\nimport os\nimport urllib\n\nfrom bs4 import BeautifulSoup\nimport requests\nfrom termcolor import cprint\n\n__version__ = \"1.0.0\"\n", "_____no_output_____" ], [ "class GoogleImageSerch(object):\n def __init__(self):\n self.GOOGLE_IMAGE_SEARCH_URL = \"https://www.google.co.jp/search\"\n self.session = requests.session()\n self.session.headers.update(\n {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:10.0) \\\n Gecko/20100101 Firefox/10.0\"\n }\n )\n\n def search(self, keyword, maximum):\n print(f\"Searching {keyword}.\")\n query = self.generate_query(keyword)\n return self.serch_images(query, maximum)\n\n def generate_query(self, keyword):\n # search query generator\n page = 0\n while True:\n params = urllib.parse.urlencode(\n {\"q\": keyword, \"tbm\": \"isch\", \"ijn\": str(page)}\n )\n\n yield self.GOOGLE_IMAGE_SEARCH_URL + \"?\" + params\n page += 1\n\n def serch_images(self, generate_query, maximum):\n results = []\n total = 0\n while True:\n # search\n html = self.session.get(next(generate_query)).text\n soup = BeautifulSoup(html, \"lxml\")\n elements = soup.select(\".rg_meta.notranslate\")\n jsons = [json.loads(e.get_text()) for e in elements]\n image_url_list = [js[\"ou\"] for js in jsons]\n\n # add search results\n if not image_url_list:\n cprint(\"No more images.\", \"yellow\")\n break\n elif len(image_url_list) > maximum - total:\n results += image_url_list[: maximum - total]\n break\n else:\n results += image_url_list\n total += len(image_url_list)\n\n cprint(f\"Found {len(results)} images.\", \"green\")\n return results\n", "_____no_output_____" ], [ "def main(args):\n os.makedirs(args.download_dir, exist_ok=True)\n os.makedirs(os.path.join(args.download_dir, args.target_name), exist_ok=True)\n\n google_image_serch = GoogleImageSerch()\n\n # search images\n results = google_image_serch.search(args.target_name, maximum=args.num_images)\n\n # download\n download_errors = []\n for i, url in enumerate(results):\n download_name = f\"{(i + 1):>0{max(4, len(str(args.num_images)))}}.jpg\"\n download_path = os.path.join(args.download_dir, args.target_name, download_name)\n\n if os.path.exists(download_path) and not args.is_overwrite:\n print(f\"{download_path} is already exists.\")\n download_errors.append(i + 1)\n continue\n\n print(f\"Downloading image {download_name}.\", end=\" \")\n try:\n urllib.request.urlretrieve(url, download_path)\n cprint(\"Successful.\", \"green\")\n except urllib.error.HTTPError:\n cprint(\"Failed. (HTTP Error)\", \"yellow\")\n download_errors.append(i + 1)\n continue\n except urllib.error.URLError:\n cprint(\"Failed. (SSL Error)\", \"yellow\")\n download_errors.append(i + 1)\n continue\n except UnicodeEncodeError:\n cprint(\"Failed. (Encoding Error)\", \"yellow\")\n download_errors.append(i + 1)\n continue\n\n cprint(\"Download complete.\", \"blue\")\n\n cprint(f\"Successful: {len(results) - len(download_errors)} images.\", \"blue\")\n if download_errors:\n cprint(f\"Failed: {len(download_errors)} images.\", \"yellow\")\n", "_____no_output_____" ], [ "if __name__ == \"__main__\":\n cprint(\"-\" * 50, \"magenta\")\n cprint((f\"Image Collector v{__version__}\").center(50), \"magenta\")\n cprint(\"-\" * 50, \"magenta\")\n\n parser = argparse.ArgumentParser(description=f\"Image Collector v{__version__}\")\n parser.add_argument(\n \"-t\",\n \"--target\",\n dest=\"target_name\",\n help=\"Target name\",\n type=str,\n required=True,\n )\n parser.add_argument(\n \"-n\",\n \"--number\",\n dest=\"num_images\",\n help=\"Number of images\",\n type=int,\n required=True,\n )\n parser.add_argument(\n \"-d\",\n \"--directory\",\n dest=\"download_dir\",\n help=\"Download location\",\n type=str,\n default=\"./data\",\n )\n parser.add_argument(\n \"-f\",\n \"--force\",\n dest=\"is_overwrite\",\n action=\"store_true\",\n help=\"Whether to overwrite existing files\",\n )\n args = parser.parse_args()\n\n main(args)\n", "_____no_output_____" ] ], [ [ "# Let's Take Photo\n- そのまま学習させてもいいけど、M5目線の画像を学習したいので、M5Stick-Vを撮影して学習\n- データ作成をするためのプログラムはV-TraningのサイトからダウンロードしてSDカードに保存して動かす\n- とりあえず最低量の35枚の画像を収集\n<div align=\"center\">\n<img src=\"./imgs/IMG_7358.JPG\" width=\"20%\">\n</div>", "_____no_output_____" ], [ "# 画像データをアップロード\n- 専用のサイトへSDに保存されたデータセットを圧縮してアップロード\n- アップロードするとステータスIDを付与されて裏側で学習される\n- 学習が完了するとメールアドレスに学習したモデルと実行用のコードをセットで送られる\n<div align=\"center\">\n<img src=\"./imgs/0002.png\" width=\"50%\">\n</div>", "_____no_output_____" ], [ "# 1回目の学習結果\n- データの質が悪い&量が少ないせいで精度が悪かった\n- 「もう1回送ったほうがいいよ」と一言アドバイス ~(余計なお世話)~\n- 35枚の画像では精度がでるような仕様ではないようだ\n<div align=\"center\">\n<img src=\"./imgs/first_results.png\" width=\"50%\">\n</div>", "_____no_output_____" ], [ "# もう一度データを用意\n- 今度は画像を100枚用意して撮影\n- もちろんキーワードと関係ない画像も含まれているので実質学習に使えたのは60枚程度\n- あとは単純に疲れた(笑)", "_____no_output_____" ], [ "# 2回目の学習結果\n<div align=\"center\">\n<img src=\"./imgs/new_results.png\" width=\"50%\">\n</div>\n", "_____no_output_____" ] ], [ [ "from PIL import Image\nim1 = Image.open(\"./imgs/0006.png\")\nim2 = Image.open(\"./imgs/0005.png\")\n\ndef get_concat_h(im1, im2):\n dst = Image.new('RGB', (im1.width + im2.width, im1.height))\n dst.paste(im1, (0, 0))\n dst.paste(im2, (im1.width, 0))\n return dst\n\nget_concat_h(im1, im2).save('./imgs/new_results.png')", "_____no_output_____" ] ], [ [ "# END", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb95e8e3762e0cd384e652bd41e2012ebd8688fc
232,096
ipynb
Jupyter Notebook
Figure_1/figure_1bd.ipynb
pachterlab/pachterlab-MBLGLMBHGP_2021
3141e78f649a6e0384073ba13cf343277773101c
[ "BSD-2-Clause" ]
null
null
null
Figure_1/figure_1bd.ipynb
pachterlab/pachterlab-MBLGLMBHGP_2021
3141e78f649a6e0384073ba13cf343277773101c
[ "BSD-2-Clause" ]
null
null
null
Figure_1/figure_1bd.ipynb
pachterlab/pachterlab-MBLGLMBHGP_2021
3141e78f649a6e0384073ba13cf343277773101c
[ "BSD-2-Clause" ]
null
null
null
244.56902
205,172
0.913032
[ [ [ "!date", "Wed Jun 12 17:13:33 PDT 2019\n" ] ], [ [ "# Figure 1b and 1d", "_____no_output_____" ] ], [ [ "import glob\nimport pandas as pd\nimport numpy as np\nimport pandas as pd\nimport scipy as scp\nimport sklearn\nimport itertools\nfrom scipy.optimize import fsolve\nfrom upsetplot import generate_data, plot, from_memberships\nfrom collections import Counter\nfrom matplotlib.ticker import FormatStrFormatter\nfrom matplotlib.ticker import StrMethodFormatter\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nimport matplotlib.patches as mpatches\nimport matplotlib.ticker as ticker\n\nimport matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.rcParams.update({'font.size': 22})\n%config InlineBackend.figure_format = 'retina'", "_____no_output_____" ], [ "v2_names = np.array(['SRR8599150_v2',\n 'heart1k_v2', 'SRR8611943_v2',\n 'SRR8257100_v2', 'EMTAB7320_v2',\n 'SRR7299563_v2', 'SRR8513910_v2',\n 'SRR8639063_v2', 'SRR8524760_v2',\n 'SRR6956073_v2', 'hgmm1k_v2',\n 'SRR8206317_v2', 'SRR8327928_v2',\n 'SRR6998058_v2'], dtype=object)\n\nv3_names = np.array(['pbmc_1k_v3', 'hgmm10k_v3',\n 'neuron_10k_v3', 'pbmc_10k_v3',\n 'heart1k_v3', 'hgmm1k_v3'], dtype=object)", "_____no_output_____" ], [ "v2_data = {}\nv2_data[\"EMTAB7320_v2\"] = {\"n_reads\": 335147976}\nv2_data[\"heart1k_v2\"] = {\"n_reads\": 88872840}\nv2_data[\"hgmm1k_v2\"] = {\"n_reads\": 75225120}\nv2_data[\"SRR6956073_v2\"] = {\"n_reads\": 161274652}\nv2_data[\"SRR6998058_v2\"] = {\"n_reads\": 37227612}\nv2_data[\"SRR7299563_v2\"] = {\"n_reads\": 112176350}\nv2_data[\"SRR8206317_v2\"] = {\"n_reads\": 85992089}\nv2_data[\"SRR8257100_v2\"] = {\"n_reads\": 189337914}\nv2_data[\"SRR8327928_v2\"] = {\"n_reads\": 190094560}\nv2_data[\"SRR8513910_v2\"] = {\"n_reads\": 146617182}\nv2_data[\"SRR8524760_v2\"] = {\"n_reads\": 97106426}\nv2_data[\"SRR8599150_v2\"] = {\"n_reads\": 8860361}\nv2_data[\"SRR8611943_v2\"] = {\"n_reads\": 21574502}\nv2_data[\"SRR8639063_v2\"] = {\"n_reads\": 416437344}\n\nv2_data[\"EMTAB7320_v2\"][\"n_cells\"] = 4510\nv2_data[\"heart1k_v2\"][\"n_cells\"] = 712\nv2_data[\"hgmm1k_v2\"][\"n_cells\"] = 1079\nv2_data[\"SRR6956073_v2\"][\"n_cells\"] = 4168\nv2_data[\"SRR6998058_v2\"][\"n_cells\"] = 575\nv2_data[\"SRR7299563_v2\"][\"n_cells\"] = 1660\nv2_data[\"SRR8206317_v2\"][\"n_cells\"] = 4418\nv2_data[\"SRR8257100_v2\"][\"n_cells\"] = 11685\nv2_data[\"SRR8327928_v2\"][\"n_cells\"] = 10396\nv2_data[\"SRR8513910_v2\"][\"n_cells\"] = 726\nv2_data[\"SRR8524760_v2\"][\"n_cells\"] = 3064\nv2_data[\"SRR8599150_v2\"][\"n_cells\"] = 3949\nv2_data[\"SRR8611943_v2\"][\"n_cells\"] = 5194\nv2_data[\"SRR8639063_v2\"][\"n_cells\"] = 6614", "_____no_output_____" ], [ "v3_data = {}\nv3_data[\"hgmm1k_v3\"] = {\"n_reads\": 63105786}\nv3_data[\"neuron_10k_v3\"] = {\"n_reads\": 357111595}\nv3_data[\"pbmc_10k_v3\"] = {\"n_reads\": 638901019}\nv3_data[\"pbmc_1k_v3\"] = {\"n_reads\": 66601887}\nv3_data[\"heart1k_v3\"] = {\"n_reads\": 84512390}\nv3_data[\"hgmm10k_v3\"] = {\"n_reads\": 721180737}\n\nv3_data[\"hgmm1k_v3\"][\"n_cells\"] = 1011\nv3_data[\"neuron_10k_v3\"][\"n_cells\"] = 11477\nv3_data[\"pbmc_10k_v3\"][\"n_cells\"] = 1045\nv3_data[\"pbmc_1k_v3\"][\"n_cells\"] = 11790\nv3_data[\"heart1k_v3\"][\"n_cells\"] = 11692\nv3_data[\"hgmm10k_v3\"][\"n_cells\"] = 1227", "_____no_output_____" ], [ "w = 67365891\nc = 345420\nu = 2013414\n\nv2_data[\"heart1k_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\nw = 57345535\nc = 176786\nu = 1849405\nv3_data[\"heart1k_v3\"][\"barcode_error_correction\"] = (w, c, u)\n\n\nw = 58523823\nc = 358110\nu = 2035210\nv2_data[\"hgmm1k_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 46243317\nc = 132278\nu = 1394347\nv3_data[\"hgmm1k_v3\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\n\nw = 499346666\nc = 2613284\nu = 20298095\nv3_data[\"hgmm10k_v3\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 227709973\nc = 659929\nu = 7299697\nv3_data[\"neuron_10k_v3\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\n\nw = 353379492\nc = 1912254\nu = 14819352\nv3_data[\"pbmc_10k_v3\"][\"barcode_error_correction\"] = (w, c, u)\n\n\nw = 39178903\nc = 190366\nu = 1538993\nv3_data[\"pbmc_1k_v3\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 28344188\nc = 231718\nu = 625557\nv2_data[\"SRR6998058_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 66294966\nc = 782287\nu = 1728840\nv2_data[\"SRR8206317_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 111254198\nc = 1567548\nu = 4904318\nv2_data[\"SRR8327928_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 348557155\nc = 1857224\nu = 1836077\nv2_data[\"SRR8639063_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 258864227\nc = 4111830\nu = 9256167\nv2_data[\"EMTAB7320_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\n\nw = 107572180\nc = 1082195\nu = 2639035\nv2_data[\"SRR6956073_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 64690144\nc = 477618\nu = 1520183\nv2_data[\"SRR7299563_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\n\nw = 173540630\nc = 1094514\nu = 4191648\nv2_data[\"SRR8257100_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\nw = 131004911\nc = 910116\nu = 3772762\nv2_data[\"SRR8513910_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\n\nw = 3420063\nc = 38493\nu = 117197\nv2_data[\"SRR8599150_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\n\nw = 16021922\nc = 206410\nu = 518515\nv2_data[\"SRR8611943_v2\"][\"barcode_error_correction\"] = (w, c, u)\n\n\n\n\nw = 68514365\nc = 615351\nu = 1748491\nv2_data[\"SRR8524760_v2\"][\"barcode_error_correction\"] = (w, c, u)\n", "_____no_output_____" ], [ "%%time\nfor f in glob.glob(\"/home/sina/projects/bus/validate/all_bus/k*\"):\n name = \"_\".join(f.split(\"/\")[7].split(\"_\")[1:])\n print(name)\n if \"v2\" in name:\n # loss counts\n v2_data[name][\"loss_ratio\"] = pd.read_csv(f + \"/lossratio.txt\", header=None).values.flatten()\n \n elif \"v3\" in name:\n v3_data[name][\"loss_ratio\"] = pd.read_csv(f + \"/lossratio.txt\", header=None).values.flatten()\n", "pbmc_1k_v3\nSRR8599150_v2\nheart1k_v2\nSRR8611943_v2\nhgmm10k_v3\nneuron_10k_v3\nSRR8257100_v2\nEMTAB7320_v2\nSRR7299563_v2\nSRR8513910_v2\nSRR8639063_v2\n" ] ], [ [ "# Figure 1b", "_____no_output_____" ] ], [ [ "# (inwhitelist, correct, uncorrected)\nw = [v2_data[i][\"barcode_error_correction\"][0]/(v2_data[i][\"barcode_error_correction\"][0] + v2_data[i][\"barcode_error_correction\"][1] + v2_data[i][\"barcode_error_correction\"][2]) for i in v2_names]\n[w.append(v3_data[i][\"barcode_error_correction\"][0]/(v3_data[i][\"barcode_error_correction\"][0] + v3_data[i][\"barcode_error_correction\"][1] + v3_data[i][\"barcode_error_correction\"][2])) for i in v3_names]\n\nc = [v2_data[i][\"barcode_error_correction\"][1]/(v2_data[i][\"barcode_error_correction\"][0] + v2_data[i][\"barcode_error_correction\"][1] + v2_data[i][\"barcode_error_correction\"][2]) for i in v2_names]\n[c.append(v3_data[i][\"barcode_error_correction\"][1]/(v3_data[i][\"barcode_error_correction\"][0] + v3_data[i][\"barcode_error_correction\"][1] + v3_data[i][\"barcode_error_correction\"][2])) for i in v3_names]\n\nu = [v2_data[i][\"barcode_error_correction\"][2]/(v2_data[i][\"barcode_error_correction\"][0] + v2_data[i][\"barcode_error_correction\"][1] + v2_data[i][\"barcode_error_correction\"][2]) for i in v2_names]\n[u.append(v3_data[i][\"barcode_error_correction\"][2]/(v3_data[i][\"barcode_error_correction\"][0] + v3_data[i][\"barcode_error_correction\"][1] + v3_data[i][\"barcode_error_correction\"][2])) for i in v3_names]", "_____no_output_____" ], [ "nreads = [v2_data[i][\"n_reads\"] for i in v2_names]\n[nreads.append(v3_data[i][\"n_reads\"]) for i in v3_names]\n\nidx_sorted = np.argsort(nreads)\nnames = v2_names \nn3 = v3_names\nnames = np.append(names, n3)\nnames = names[idx_sorted]\n\n\nsorted_nreads = np.sort(nreads)\n\nw = np.array(w)[idx_sorted]\nc = np.array(c)[idx_sorted]\nu = np.array(u)[idx_sorted]\n\ndata = [w, c, u]\np = data[1]/(16*data[0] + data[1])", "_____no_output_____" ], [ "fig, ax=plt.subplots(figsize=(15, 7))\nwidth=0.8\n\np1 = ax.bar(names, data[0], width, color='white', label=\"Retained\", edgecolor=\"black\")\np2 = ax.bar(names, data[1], width, bottom=data[0], color='black', label=\"Corrected\", edgecolor=\"black\")\np3 = ax.bar(names, data[2], width, \n bottom=np.array(data[0])+np.array(data[1]), color='#cccccc', label=\"Uncorrected\", edgecolor=\"black\")\nplt.xticks(rotation=45, ha=\"right\")\n\nxlocs, xlabs = plt.xticks()\nfor i, v in enumerate([\"{:.3f}\".format(i) for i in 16*p*(1-p)**15]):\n plt.text(xlocs[i]-0.4, 1 + 0.01, str(v), size=12)\n\nax.set_ylim(bottom=0)\nplt.ylabel(\"Fraction of total barcodes\")\nplt.legend(loc=\"lower right\")\nplt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.1f}'))\nplt.subplots_adjust(bottom=0.15, wspace=0.05)\nplt.tight_layout()\n#plt.savefig(\"barcodecorrection.pdf\")\nplt.show()", "_____no_output_____" ] ], [ [ "### Data from Figure 1", "_____no_output_____" ] ], [ [ "for i in range(len(names)):\n print(p[i], names[i])", "0.0007029465078580609 SRR8599150_v2\n0.0005106859668839463 SRR8611943_v2\n0.0003940297471424041 SRR6998058_v2\n0.00032698094377725875 hgmm1k_v3\n0.0008798369193879135 pbmc_1k_v3\n0.0004612349698473498 hgmm1k_v2\n0.0003380939474722988 heart1k_v3\n0.0004340109240798901 SRR8206317_v2\n0.00080453805456423 heart1k_v2\n0.00032036737506866027 SRR8524760_v2\n0.0003822942075884642 SRR7299563_v2\n0.00019263917418652586 SRR8513910_v2\n0.0005610190071025255 SRR6956073_v2\n0.00017874793072967027 SRR8257100_v2\n0.0006283658711736884 SRR8327928_v2\n0.0003035884629647799 EMTAB7320_v2\n0.0001810991956940125 neuron_10k_v3\n0.0007369625204656871 SRR8639063_v2\n0.0003329091238252972 pbmc_10k_v3\n0.0009917728026873924 hgmm10k_v3\n" ], [ "p.mean()", "_____no_output_____" ], [ "(16*p*(1-p)**15).mean()", "_____no_output_____" ], [ "(120*p**2*(1-p)**14).mean()*100", "_____no_output_____" ], [ "w = [v2_data[i][\"barcode_error_correction\"][0] for i in v2_names]\n[w.append(i) for i in [v3_data[i][\"barcode_error_correction\"][0] for i in v3_names]]\nw = np.array(w)\n\nb = [v2_data[i][\"barcode_error_correction\"][1] for i in v2_names]\n[b.append(i) for i in [v3_data[i][\"barcode_error_correction\"][1] for i in v3_names]]\nb = np.array(b)\n\ng = [v2_data[i][\"barcode_error_correction\"][2] for i in v2_names]\n[g.append(i) for i in [v3_data[i][\"barcode_error_correction\"][2] for i in v3_names]]\ng = np.array(g)\n\nt = w+b+g", "_____no_output_____" ], [ "w/(1-(p)**16)", "_____no_output_____" ], [ "t", "_____no_output_____" ], [ "b/w/(1-(p)**16)", "_____no_output_____" ], [ "16*p*(1-p)**15", "_____no_output_____" ], [ "10*p*(1-p)**9*100", "_____no_output_____" ], [ "(c/(w+c+u)*100).mean()", "_____no_output_____" ] ], [ [ "# Figure 1d", "_____no_output_____" ] ], [ [ "data = [v2_data[i][\"loss_ratio\"] for i in v2_names]\n[data.append(v3_data[i][\"loss_ratio\"]) for i in v3_names]\n\nnames = v2_names \nn3 = v3_names\nnames = np.append(names, n3)\n\nnreads = [v2_data[i][\"n_reads\"] for i in v2_names]\n[nreads.append(v3_data[i][\"n_reads\"]) for i in v3_names]\n\nidx_sorted = np.argsort(nreads)\nsorted_nreads = np.sort(nreads)\n\ndata = np.array(data)[idx_sorted]\n\nnames = names[idx_sorted]", "_____no_output_____" ], [ "kallisto_color_v2 = '#cccccc'\nkallisto_color_v3 = 'black'\n\n\ndef adjacent_values(vals, q1, q3):\n upper_adjacent_value = q3 + (q3 - q1) * 1.5\n upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])\n\n lower_adjacent_value = q1 - (q3 - q1) * 1.5\n lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)\n return lower_adjacent_value, upper_adjacent_value\n\n\ndef set_axis_style(ax, labels):\n ax.get_xaxis().set_tick_params(direction='out')\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xticks(np.arange(1, len(labels) + 1))\n ax.set_xticklabels(labels)\n ax.set_xlim(0.25, len(labels) + 0.75)\n ax.set_ylim(0, 1)\n #ax.set_xlabel('Sample name')\n\nfig, ax = plt.subplots(figsize=(20, 10), nrows=1, ncols=1)\n\n\n################\nparts = ax.violinplot(data, showmeans=False, showmedians=False,showextrema=False)\n\n\nfor pc_i in range(len(parts['bodies'])):\n if \"v2\" in names[pc_i]:\n parts['bodies'][pc_i].set_facecolor(kallisto_color_v2)\n elif \"v3\" in names[pc_i]:\n parts['bodies'][pc_i].set_facecolor(kallisto_color_v2)\n parts['bodies'][pc_i].set_edgecolor('black')\n parts['bodies'][pc_i].set_alpha(1)\n\n# set style for the axes\nset_axis_style(ax, names)\n\ninset = inset_axes(ax, width=\"50%\", height=\"50%\", loc=1)\n\nparts = inset.violinplot(data, showmeans=False, showmedians=False,showextrema=False)\n\nfor pc_i in range(len(parts['bodies'])):\n if \"v2\" in names[pc_i]:\n parts['bodies'][pc_i].set_facecolor(kallisto_color_v2)\n elif \"v3\" in names[pc_i]:\n parts['bodies'][pc_i].set_facecolor(kallisto_color_v3)\n parts['bodies'][pc_i].set_edgecolor('black')\n parts['bodies'][pc_i].set_alpha(1)\n\n \nset_axis_style(inset, names)\n\ninset.set_xticklabels([])\ninset.set_ylim(0, 0.001)\n\n\nticks = ax.get_yticks().tolist()\nticks = [\"{:.1f}\".format(i) for i in ticks]\nticks[0] = \"No counts lost 0.0\"\nticks[-1] = \"All counts lost 1.0\"\n\n#for tick in ax.get_xticklabels():\n# tick.set_rotation(45, ha=\"right\")\n\nax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=45, ha=\"right\")\n\n\ngrey_patch = mpatches.Patch(color=kallisto_color_v2, label='v2 Chemistry')\nblack_patch = mpatches.Patch(color=kallisto_color_v3, label='v3 Chemistry')\n\nax.figure.legend(handles=[grey_patch, black_patch], loc=1, bbox_to_anchor=(0.34,0.94))\n\nax.yaxis.set_ticklabels(ticks)\nplt.subplots_adjust(bottom=0.15, wspace=0.05)\nplt.tight_layout()\nplt.savefig(\"lossratio.pdf\")\nplt.show()", "_____no_output_____" ] ], [ [ "### Data from Figure 1", "_____no_output_____" ] ], [ [ "loss2 = [v2_data[i][\"loss_ratio\"] for i in v2_names]\nloss3 = [v3_data[i][\"loss_ratio\"] for i in v3_names]", "_____no_output_____" ], [ "for i in range(len(v2_names)):\n print(\"{:,.4f}% \\t {}\".format(loss2[i].mean()*100, v2_names[i]))", "_____no_output_____" ], [ "for i in range(len(v3_names)):\n print(\"{:,.4f}% \\t {}\".format(loss3[i].mean(), v3_names[i]))", "_____no_output_____" ], [ "(np.array([loss2[i].mean() for i in range(len(loss2))])).mean()*100", "_____no_output_____" ], [ "(np.array([loss3[i].mean() for i in range(len(loss3))])).mean()*100", "_____no_output_____" ], [ "print(\"Number of Reads per Cell\")\nprint(\"------------------------\")\nfor i in v2_names:\n print(\"{:,.0f} \\t {}\".format(v2_data[i][\"n_reads\"]/v2_data[i][\"n_cells\"], i))", "Number of Reads per Cell\n------------------------\n2,244 \t SRR8599150_v2\n12,395 \t SRR7692543_v2\n124,821 \t heart1k_v2\n4,154 \t SRR8611943_v2\n16,204 \t SRR8257100_v2\n74,312 \t EMTAB7320_v2\n67,576 \t SRR7299563_v2\n201,952 \t SRR8513910_v2\n62,963 \t SRR8639063_v2\n31,693 \t SRR8524760_v2\n38,694 \t SRR6956073_v2\n69,717 \t hgmm1k_v2\n19,464 \t SRR8206317_v2\n18,285 \t SRR8327928_v2\n64,744 \t SRR6998058_v2\n" ], [ "print(\"Number of Reads per Cell\")\nprint(\"------------------------\")\nfor i in v3_names:\n print(\"{:,.0f} \\t {}\".format(v3_data[i][\"n_reads\"]/v3_data[i][\"n_cells\"], i))", "Number of Reads per Cell\n------------------------\n54,280 \t pbmc_1k_v3\n62,837 \t hgmm10k_v3\n30,289 \t neuron_10k_v3\n54,644 \t pbmc_10k_v3\n83,593 \t heart1k_v3\n60,388 \t hgmm1k_v3\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb95ef00bd044a24e5fd71220dab4067b0a619ae
11,807
ipynb
Jupyter Notebook
examples/Talk/Untitled2.ipynb
xmnlab/ibis-vega-transform
75b4c5e4a8452dbeb5a81e19eae817b4d9c64999
[ "Apache-2.0" ]
null
null
null
examples/Talk/Untitled2.ipynb
xmnlab/ibis-vega-transform
75b4c5e4a8452dbeb5a81e19eae817b4d9c64999
[ "Apache-2.0" ]
null
null
null
examples/Talk/Untitled2.ipynb
xmnlab/ibis-vega-transform
75b4c5e4a8452dbeb5a81e19eae817b4d9c64999
[ "Apache-2.0" ]
null
null
null
24.597917
98
0.467943
[ [ [ "import altair as alt\nimport ibis_vega_transform\nimport ibis.omniscidb\n\n\nconn = ibis.omniscidb.connect(\n host='bewdy.mapd.com', user='mapd', password='HyperInteractive',\n port=6274, database='mapd', protocol= 'binary'\n)", "_____no_output_____" ], [ "t = conn.table('github')", "_____no_output_____" ], [ "time_chart = alt.Chart(t).mark_line().encode(\n y='count()',\n x='yearmonth(created_at):T'\n)\ntime_chart", "_____no_output_____" ], [ "type_chart = alt.Chart(t).mark_bar().encode(\n x='count()',\n y=alt.Y('type:O', sort=alt.Sort(encoding='x', order='descending'))\n)\ntype_chart", "_____no_output_____" ], [ "repo_dropdown = alt.binding_select(options=[\n \"jupyterlab/jupyterlab\",\n \"vega/vega\",\n \"vega/vega-lite\",\n \"mapd/mapd-core\"\n])\n\nrepo_selection = alt.selection_single(\n fields=['repo_name'],\n bind=repo_dropdown\n)", "_____no_output_____" ], [ "time_with_filter = time_chart.add_selection(repo_selection).transform_filter(repo_selection)", "_____no_output_____" ], [ "time_with_filter", "_____no_output_____" ], [ "time_selection = alt.selection_interval(\n fields=['create_at'],\n encodings=['x']\n)", "_____no_output_____" ], [ "(\n time_with_filter.add_selection(time_selection) &\n type_chart.transform_filter(\n repo_selection\n ).transform_filter(\n time_selection\n ).properties(height=600)\n).properties(padding=60)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb95fbd2990a8da050001ff8e161bac1498fcfb6
9,343
ipynb
Jupyter Notebook
colab.ipynb
msteknoadam/tf_polynomial_regression
dc278749a17b8ed7a5a2b3742f26d68c4ee3749b
[ "MIT" ]
null
null
null
colab.ipynb
msteknoadam/tf_polynomial_regression
dc278749a17b8ed7a5a2b3742f26d68c4ee3749b
[ "MIT" ]
null
null
null
colab.ipynb
msteknoadam/tf_polynomial_regression
dc278749a17b8ed7a5a2b3742f26d68c4ee3749b
[ "MIT" ]
null
null
null
32.106529
663
0.593278
[ [ [ "try:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass", "_____no_output_____" ] ], [ [ "We need to tell Google Colab that we want the TF 2.0 version so the code can work properly.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\ntf.executing_eagerly()", "_____no_output_____" ] ], [ [ "We also need to import our required libraries so we can use them in the next parts.", "_____no_output_____" ] ], [ [ "a1 = tf.random.uniform(shape=[], minval=-5, maxval=5)\nb1 = tf.random.uniform(shape=[], minval=-5, maxval=5)\nc1 = tf.random.uniform(shape=[], minval=-5, maxval=5)", "_____no_output_____" ] ], [ [ "At this part we get some random values for a, b and c variables since our polynomial equation looks something like this: a*x^2 + b*x + c", "_____no_output_____" ] ], [ [ "a2 = tf.random.uniform(shape=[], minval=-5, maxval=5)\nb2 = tf.random.uniform(shape=[], minval=-5, maxval=5)\nc2 = tf.random.uniform(shape=[], minval=-5, maxval=5)\n\nxs = tf.constant(range(0, 20), dtype=tf.float32)\nys = tf.constant(tf.add(tf.add(tf.multiply(tf.pow(xs, 2), a2), tf.multiply(xs, b2)), c2), dtype=tf.float32)\nprint(f\"Start values: \\nModel: ({a1})*x^2 + ({b1})*x + ({c1})\\nRandom Values: ({a2})*x^2 + ({b2})*x + ({c2})\")", "_____no_output_____" ], [ "# xs = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype=tf.float32)\n# ys = tf.constant([45000.0, 50000.0, 60000.0, 80000.0, 110000.0, 150000.0, 200000.0, 300000.0, 500000.0, 1000000.0], dtype=tf.float32)", "_____no_output_____" ] ], [ [ "You can either use the upper one or the bottom one while trying this out. The top one generates another random polynomial equation which we then try to find using our model. The bottom one on the other hand, is kind of well known dataset for trying out the polynomial regression. You may come accross that dataset whenever you are looking for polynomial regression tutorials.", "_____no_output_____" ] ], [ [ "plt.plot(xs, ys, 'bo')", "_____no_output_____" ] ], [ [ "Plotting the X and Y values before training our model so we can get an idea of how our data looks like.", "_____no_output_____" ] ], [ [ "def mean_squared_error(predictions, labels):\n return tf.reduce_mean(tf.square(predictions - labels))\n\ndef stochastic_gradient_descent_optimizer(indexes, labels , predictions):\n result = tf.reduce_mean(2 * indexes * (predictions - labels)).numpy()\n print(f\"SGD --> Indexes: {indexes.numpy()} | Labels: {labels.numpy()} | Predictions: {predictions.numpy()} | Result: {result}\")\n return result\n \ndef predict(indexes):\n prediction = tf.add(tf.add(tf.multiply(tf.pow(indexes, 2), a1), tf.multiply(indexes, b1)), c1)\n print(f\"Incoming: {indexes.numpy()} | Prediction: {prediction.numpy()}\")\n return prediction", "_____no_output_____" ] ], [ [ "Here, we declare our 3 main functions we need for our \"thing\" to become a bit of \"Machine Learning Model\". First one is Mean Squared Error. This function will tell a number based on how off our predictions are from the actual values. Next one is Stochastic Gradient Descent. This function will be acting as our optimizer in our model so we will be changing our a1, b1 and c1 values based on this value. And the final and the most important one(yes, all of them are very important but they won't make any sense without this one :D), our connection to model! With the prediction function, we can comminucate with our model and ask for predictions from it.", "_____no_output_____" ] ], [ [ "EPOCHS = 25\nSAMPLES = xs.shape[0]\nBATCH_SIZE = 1\nLEARNING_RATE = 0.0001\n\ndataset = tf.data.Dataset.from_tensor_slices((xs , ys))\ndataset = dataset.repeat(EPOCHS).batch(BATCH_SIZE)\niterator = dataset.__iter__()", "_____no_output_____" ] ], [ [ "At this step, we are preparing our dataset to become iterable so we can train our model with the batches of data we just make here.", "_____no_output_____" ] ], [ [ "num_features = len(xs)\nepochs_plot = list()\nloss_plot = list()\n\nfor i in range(EPOCHS):\n epoch_loss = list()\n for Q in range(int(SAMPLES/BATCH_SIZE)):\n x_batch, y_batch = iterator.get_next()\n output = predict(x_batch)\n loss_val = epoch_loss.append(mean_squared_error(y_batch , output).numpy())\n deriv_val = stochastic_gradient_descent_optimizer(x_batch, y_batch , output)\n # print(f\"deriv_val: {deriv_val}\")\n a1 -= (LEARNING_RATE * deriv_val)\n b1 -= (LEARNING_RATE * deriv_val)\n c1 -= (LEARNING_RATE * deriv_val)\n loss_val = np.array(epoch_loss).mean()\n epochs_plot.append(i + 1)\n loss_plot.append(loss_val)\n print('Loss is {}'.format(loss_val)) ", "_____no_output_____" ] ], [ [ "And yet another very important step, training! At this step, we train our model using the functions we have defined a few steps ago.", "_____no_output_____" ] ], [ [ "plt.plot(epochs_plot, loss_plot) \nplt.show()", "_____no_output_____" ] ], [ [ "Here, we can see how our loss value lowered as we trained our model at each epoch.", "_____no_output_____" ] ], [ [ "polynomial_points = list()\nfor i in range(len(xs)):\n polynomial_points.append(predict(xs[i]).numpy())\nplt.plot(xs, ys, 'bo', xs, polynomial_points, 'r')", "_____no_output_____" ] ], [ [ "And the final step! First, we just predict our values on the same X values as our dataset so we can match each other in the plot. After making the predictions, we just plot them both together and voila! We have just created and trained our own model for polynomial regression! You can get more information about this project (such as the math behind MSE and SGD) at my blog post which you can go with [this](https://blog.tekno.icu/2020/01/22/polynomial-regression-in-python-using-tensorflow-2-0/) link.", "_____no_output_____" ], [ "We can also check how close our model became to the randomly generated quadratic equation if you have chosen to generate a random quadratic equation at the first step.", "_____no_output_____" ] ], [ [ "print(f\"End values: \\nModel: ({a1})*x^2 + ({b1})*x + ({c1})\\nRandom Values: ({a2})*x^2 + ({b2})*x + ({c2})\")", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb960098990c6af7a316db8a051f3c7a865defd3
239,970
ipynb
Jupyter Notebook
#09. Cluster Analysis con k-Means/09practice.ipynb
apoboldon/machine-learning-program
4758e3b8061e0f963161eb57a45ff382a5b15da3
[ "MIT" ]
null
null
null
#09. Cluster Analysis con k-Means/09practice.ipynb
apoboldon/machine-learning-program
4758e3b8061e0f963161eb57a45ff382a5b15da3
[ "MIT" ]
null
null
null
#09. Cluster Analysis con k-Means/09practice.ipynb
apoboldon/machine-learning-program
4758e3b8061e0f963161eb57a45ff382a5b15da3
[ "MIT" ]
null
null
null
90.554717
46,016
0.778989
[ [ [ "<font size=\"+5\">#09. Cluster Analysis con k-Means</font>", "_____no_output_____" ], [ "- Book + Private Lessons [Here ↗](https://sotastica.com/reservar)\n- Subscribe to my [Blog ↗](https://blog.pythonassembly.com/)\n- Let's keep in touch on [LinkedIn ↗](www.linkedin.com/in/jsulopz) 😄", "_____no_output_____" ], [ "# Load the Data", "_____no_output_____" ], [ "> - Simply execute the following lines of code to load the data\n> - This dataset contains **statistics** (columns)\n> - About **Car Models** (rows)", "_____no_output_____" ] ], [ [ "import seaborn as sns\n\ndf = sns.load_dataset(name='mpg', index_col='name')\ndf.sample(10)", "_____no_output_____" ], [ "dfclean=df.dropna()\ndfclean", "_____no_output_____" ], [ "X=dfclean[['mpg','horsepower']]", "_____no_output_____" ] ], [ [ "# Data `preprocessing`\n\n> - Do you need to *transform* the data\n> - To get a **truthful insight** of the model?", "_____no_output_____" ] ], [ [ "from sklearn.preprocessing import MinMaxScaler", "_____no_output_____" ], [ "scaler=MinMaxScaler()", "_____no_output_____" ], [ "import pandas as pd", "_____no_output_____" ], [ "vnorm = scaler.fit_transform(X)", "_____no_output_____" ], [ "X.columns", "_____no_output_____" ], [ "X.index", "_____no_output_____" ], [ "dfnorm = pd.DataFrame(vnorm, columns=X.columns, index=X.index)\ndfnorm.head()", "_____no_output_____" ] ], [ [ "# `KMeans()` Model in Python", "_____no_output_____" ], [ "## Build the Model", "_____no_output_____" ], [ "> 1. **Necesity**: Build Model\n> 2. **Google**: How do you search for the solution?\n> 3. **Solution**: Find the `function()` that makes it happen", "_____no_output_____" ] ], [ [ "from sklearn.cluster import KMeans", "_____no_output_____" ], [ "error=[]", "_____no_output_____" ], [ "error", "_____no_output_____" ], [ "model=KMeans(n_clusters=1)", "_____no_output_____" ], [ "model.fit(X=dfnorm)", "_____no_output_____" ], [ "inercia=model.score(X=dfnorm)", "_____no_output_____" ], [ "inercia", "_____no_output_____" ], [ "error.append(inercia)", "_____no_output_____" ], [ "error", "_____no_output_____" ], [ "model=KMeans(n_clusters=2)", "_____no_output_____" ], [ "model.fit(X=dfnorm)", "_____no_output_____" ], [ "inercia = model.score(X=dfnorm)", "_____no_output_____" ], [ "inercia", "_____no_output_____" ], [ "error.append(inercia)", "_____no_output_____" ], [ "error", "_____no_output_____" ], [ "error = []", "_____no_output_____" ], [ "for pepa in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n\n model=KMeans(n_clusters=pepa)\n\n model.fit(X=dfnorm)\n\n inercia = model.score(X=dfnorm)\n\n error.append(inercia)\n\n error", "_____no_output_____" ], [ "sns.scatterplot(y=error, x=list(range(1, 11)))\nsns.lineplot(y=error, x=list(range(1, 11)));", "_____no_output_____" ], [ "import matplotlib.pyplot as plt", "_____no_output_____" ], [ "dfgroup = dfsel.groupby('cluster')", "_____no_output_____" ], [ "dfgroup.get_group(0)", "_____no_output_____" ], [ "dfgroup.get_group(1)", "_____no_output_____" ], [ "plt.scatter(x='mpg', y='horsepower', data=dfgroup.get_group(0))\nplt.scatter(x='mpg', y='horsepower', data=dfgroup.get_group(1))\nplt.scatter(x='mpg', y='horsepower', data=dfgroup.get_group(2))", "_____no_output_____" ], [ "for pepa in [0, 1, 2]:\n plt.scatter(x='mpg', y='horsepower', data=dfgroup.get_group(pepa))", "_____no_output_____" ], [ "error", "_____no_output_____" ], [ "df.mpg", "_____no_output_____" ], [ "df.mpg.hist();", "_____no_output_____" ], [ "df.mpg.sort_values()", "_____no_output_____" ], [ "df.mpg.quantile(q=[0.1, 0.9])", "_____no_output_____" ], [ "model=KMeans(n_clusters=3)", "_____no_output_____" ] ], [ [ "## Code Thinking\n\n> Which function computes the Model?\n> - `fit()`\n>\n> How could can you **import the function in Python**?", "_____no_output_____" ] ], [ [ "X", "_____no_output_____" ], [ "model.fit(X)", "_____no_output_____" ] ], [ [ "### Separate Variables for the Model\n\n> Regarding their role:\n> 1. **Target Variable `y`**\n>\n> - [ ] What would you like **to predict**?\n>\n> Total number of accients? Or Alcohol?\n>\n> 2. **Explanatory Variable `X`**\n>\n> - [ ] Which variable will you use **to explain** the target?", "_____no_output_____" ], [ "### Data Visualization to Analyize Patterns", "_____no_output_____" ], [ "> - Visualize the 2 variables with a `scatterplot()`\n> - And decide *how many `clusters`* you'd like to calculate", "_____no_output_____" ] ], [ [ "sns.scatterplot(x='mpg', y='horsepower', data=dfnorm)", "_____no_output_____" ] ], [ [ "### Finally `fit()` the Model", "_____no_output_____" ], [ "## `predict()` the *cluster* for every row", "_____no_output_____" ], [ "> - `model.` + `↹`", "_____no_output_____" ] ], [ [ "pred=model.predict(X=dfnorm)", "_____no_output_____" ], [ "dfnorm", "_____no_output_____" ], [ "pred", "_____no_output_____" ] ], [ [ "> - Create a `dfsel` DataFrame\n> - That contains the **columns you used for the model**", "_____no_output_____" ] ], [ [ "dfsel=dfnorm.copy()", "_____no_output_____" ] ], [ [ "> - Add a **new column**\n> - That **contains the `cluster` prediction** for every USA State", "_____no_output_____" ] ], [ [ "dfsel['cluster'] = pred", "_____no_output_____" ], [ "pred", "_____no_output_____" ], [ "dfsel.sample(20)", "_____no_output_____" ] ], [ [ "## Model Visualization", "_____no_output_____" ], [ "> - You may `hue=` the points with the `cluster` column", "_____no_output_____" ] ], [ [ "dfsel.cluster", "_____no_output_____" ], [ "sns.scatterplot(x='mpg', y='horsepower', data=dfsel,hue=dfsel.cluster, palette='Set1');", "_____no_output_____" ] ], [ [ "## Model Interpretation", "_____no_output_____" ] ], [ [ "%%HTML\n\n<iframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/4b5d3muPQmA\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen></iframe>", "_____no_output_____" ] ], [ [ "> - Can you put a **`nickname` to each group**?\n> - Observe the `centroids` within `model.` + `↹`", "_____no_output_____" ] ], [ [ "model.__dict__", "_____no_output_____" ], [ "dfcentroides = pd.DataFrame(model.cluster_centers_, columns=['mpg', 'horsepower'])\ndfcentroides\n", "_____no_output_____" ] ], [ [ "## Model Visualization with Centroids\n\n> - I want to see the `centroid`\n> - with a **big `markers=\"X\"`** in the plot", "_____no_output_____" ] ], [ [ "sns.scatterplot(x='mpg', y='horsepower', data=dfsel,hue=dfsel.cluster, palette='Set1');\nsns.scatterplot(x='mpg', y='horsepower', data=dfcentroides,hue=dfcentroides.index, palette='Set1', s=500);", "_____no_output_____" ], [ "a = dfnorm.sample()", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "model.predict(a)", "_____no_output_____" ], [ "model.__dict__", "_____no_output_____" ] ], [ [ "# Achieved Goals", "_____no_output_____" ], [ "_Double click on **this cell** and place an `X` inside the square brackets (i.e., [X]) if you think you understand the goal:_\n\n- [X] Understand how the **machine optimizes a model**\n - No more than to find the best numbers for a mathematical equation\n- [X] **Residual Sum of Squares (RSS)** as a fundamental measure for the **error**. We see it on ↓\n - Neural Networks\n - Linear Regression\n - Variance\n- [X] Understand the necessity to **Scale** the Data\n - For all algorithms that involves **distance calculation**.\n- [X] Understand that programming is not an end itself, but a tool to achieve the end\n - We need to understand the problem and design the solution before coding\n - But we won't need how to design the solution if we don't know how to code first\n - Solution? Apply the discipline\n- [X] There is **not a unique way to group data**. The same way it is not a unique way ↓\n - To predict a number **Regression Mathematical Equations**\n - To predict a category **Classification Mathematical Equations**", "_____no_output_____" ], [ "## Machine Learning\n\n- Modelos Supervisados\n - Objetivo: tipo numerica → Regresion\n - Objetivo: tipo categorica → clasificacion\n- Modelos No Supervisados\n - No hay objetivo: queremos agrupar", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
cb96099973073ae8f05011b91d2b40b2ef825ab9
132,489
ipynb
Jupyter Notebook
archive/2020-04-Publishers-Usecases/1-Gathering-data-for-a-journal.ipynb
eschares/dimensions-api-lab
cfe6bc72631c08aabb392044abef020180dffe90
[ "MIT" ]
57
2019-06-24T19:35:34.000Z
2022-02-27T14:45:10.000Z
archive/2020-04-Publishers-Usecases/1-Gathering-data-for-a-journal.ipynb
eschares/dimensions-api-lab
cfe6bc72631c08aabb392044abef020180dffe90
[ "MIT" ]
6
2019-09-04T19:14:40.000Z
2021-12-09T15:54:41.000Z
archive/2020-04-Publishers-Usecases/1-Gathering-data-for-a-journal.ipynb
eschares/dimensions-api-lab
cfe6bc72631c08aabb392044abef020180dffe90
[ "MIT" ]
16
2019-08-13T04:24:01.000Z
2022-03-04T07:49:11.000Z
52.97441
46,362
0.592721
[ [ [ "# Part 1: Extracting a Journal's Publications+Researchers Datasets\n\nIn this notebook we are going to \n\n* extract all publications data for a given journal\n* have a quick look at the publications' authors and affiliations \n* review how many authors have been disambiguated with a Dimensions Researcher ID\n* produce a dataset of non-disambiguated authors that can be used for manual disambiguation ", "_____no_output_____" ], [ "## Prerequisites: Installing the Dimensions Library and Logging in", "_____no_output_____" ] ], [ [ "\n# @markdown # Get the API library and login\n# @markdown Click the 'play' button on the left (or shift+enter) after entering your API credentials\n\nusername = \"\" #@param {type: \"string\"}\npassword = \"\" #@param {type: \"string\"}\nendpoint = \"https://app.dimensions.ai\" #@param {type: \"string\"}\n\n\n!pip install dimcli plotly tqdm -U --quiet\nimport dimcli\nfrom dimcli.shortcuts import *\ndimcli.login(username, password, endpoint)\ndsl = dimcli.Dsl()\n\n#\n# load common libraries\nimport time\nimport sys\nimport json\nimport os\nimport pandas as pd\nfrom pandas.io.json import json_normalize\nfrom tqdm.notebook import tqdm as progress\n\n#\n# charts libs\n# import plotly_express as px\nimport plotly.express as px\nif not 'google.colab' in sys.modules:\n # make js dependecies local / needed by html exports\n from plotly.offline import init_notebook_mode\n init_notebook_mode(connected=True)\n#\n# create output data folder\nif not(os.path.exists(\"data\")):\n os.mkdir(\"data\")", "DimCli v0.6.7 - Succesfully connected to <https://app.dimensions.ai> (method: dsl.ini file)\n" ] ], [ [ "## Selecting a Journal and Extracting All Publications Metadata", "_____no_output_____" ] ], [ [ "#@title Select a journal from the dropdown\n#@markdown If the journal isn't there, you can try type in the exact name instead.\n\njournal_title = \"Nature Genetics\" #@param ['Nature', 'The Science of Nature', 'Nature Communications', 'Nature Biotechnology', 'Nature Medicine', 'Nature Genetics', 'Nature Neuroscience', 'Nature Structural & Molecular Biology', 'Nature Methods', 'Nature Cell Biology', 'Nature Immunology', 'Nature Reviews Drug Discovery', 'Nature Materials', 'Nature Physics', 'Nature Reviews Neuroscience', 'Nature Nanotechnology', 'Nature Reviews Genetics', 'Nature Reviews Urology', 'Nature Reviews Molecular Cell Biology', 'Nature Precedings', 'Nature Reviews Cancer', 'Nature Photonics', 'Nature Reviews Immunology', 'Nature Reviews Cardiology', 'Nature Reviews Gastroenterology & Hepatology', 'Nature Reviews Clinical Oncology', 'Nature Reviews Endocrinology', 'Nature Reviews Neurology', 'Nature Chemical Biology', 'Nature Reviews Microbiology', 'Nature Geoscience', 'Nature Reviews Rheumatology', 'Nature Climate Change', 'Nature Reviews Nephrology', 'Nature Chemistry', 'Nature Digest', 'Nature Protocols', 'Nature Middle East', 'Nature India', 'Nature China', 'Nature Plants', 'Nature Microbiology', 'Nature Ecology & Evolution', 'Nature Astronomy', 'Nature Energy', 'Nature Human Behaviour', 'AfCS-Nature Molecule Pages', 'Human Nature', 'Nature Reviews Disease Primers', 'Nature Biomedical Engineering', 'Nature Reports Stem Cells', 'Nature Reviews Materials', 'Nature Sustainability', 'Nature Catalysis', 'Nature Electronics', 'Nature Reviews Chemistry', 'Nature Metabolism', 'Nature Reviews Physics', 'Nature Machine Intelligence', 'NCI Nature Pathway Interaction Database', 'Nature Reports: Climate Change'] {allow-input: true}\nstart_year = 2015 #@param {type: \"number\"}\n#@markdown ---\n\n# PS \n# To get titles from the API one can do this:\n# > %dsldf search publications where journal.title~\"Nature\" and publisher=\"Springer Nature\" return journal limit 100\n# > \", \".join([f\"'{x}'\" for x in list(dsl_last_results.title)]) \n#\n\nq_template = \"\"\"search publications where \n journal.title=\"{}\" and \n year>={} \n return publications[basics+altmetric+times_cited]\"\"\"\nq = q_template.format(journal_title, start_year)\nprint(\"DSL Query:\\n----\\n\", q, \"\\n----\")\npubs = dsl.query_iterative(q.format(journal_title, start_year), limit=500)\n", "DSL Query:\n----\n search publications where \n journal.title=\"Nature Genetics\" and \n year>=2015 \n return publications[basics+altmetric+times_cited] \n----\n500 / 1472\n1000 / 1472\n1472 / 1472\n" ] ], [ [ "Save the data as a CSV file in case we want to reuse it later", "_____no_output_____" ] ], [ [ "dfpubs = pubs.as_dataframe()\ndfpubs.to_csv(\"data/1.pubs_metadata_with_metrics.csv\")\n# preview the publications \ndfpubs.head(10)", "_____no_output_____" ] ], [ [ "Extract the authors data ", "_____no_output_____" ] ], [ [ "# preview the authors data \nauthors = pubs.as_dataframe_authors()\nauthors.to_csv(\"data/1.publications_authors.csv\", index=False)\nauthors.head(10)", "_____no_output_____" ] ], [ [ "Extract the affiliations data ", "_____no_output_____" ] ], [ [ "affiliations = pubs.as_dataframe_authors_affiliations()\naffiliations.to_csv(\"data/1.publications_authors_affiliations.csv\", index=False)\naffiliations.head(10)", "_____no_output_____" ] ], [ [ "## Some stats about authors\n\n* count how many authors in total \n* count how many authors have a researcher ID\n* count how many unique researchers IDs we have in total", "_____no_output_____" ] ], [ [ "researchers = authors.query(\"researcher_id!=''\")\n#\ndf = pd.DataFrame({\n 'measure' : ['Authors in total (non unique)', 'Authors with a researcher ID', 'Authors with a researcher ID (unique)'],\n 'count' : [len(authors), len(researchers), researchers['researcher_id'].nunique()],\n})\npx.bar(df, x=\"measure\", y=\"count\", title=f\"Author stats for {journal_title} (from {start_year})\")", "_____no_output_____" ], [ "# save the researchers data to a file\nresearchers.to_csv(\"data/1.authors_with_researchers_id.csv\")", "_____no_output_____" ] ], [ [ "## Apprendix: A quick look at authors *without a Researcher ID*\n\nWe're not going to try to disambiguate them here, but still it's good to have a quick look at them... \n\nLooks like the most common surname is `Wang`, while the most common first name is an empty value", "_____no_output_____" ] ], [ [ "authors_without_id = authors.query(\"researcher_id==''\")\nauthors_without_id[['first_name', 'last_name']].describe()\n", "_____no_output_____" ] ], [ [ "Top Ten surnames seem all Chinese.. ", "_____no_output_____" ] ], [ [ "authors_without_id['last_name'].value_counts()[:10]", "_____no_output_____" ] ], [ [ "### Any common patterns? \n\nIf we try to group the data by name+surname we can see some interesting patterns \n\n* some entries are things which are not persons (presumably the results of bad source data in Dimensions, eg from the publisher) \n* there are some apparently meaningful name+surname combinations with a lot of hits\n* not many Chinese names in the top ones \n\n", "_____no_output_____" ] ], [ [ "test = authors_without_id.groupby([\"first_name\", \"last_name\"]).size()\ntest.sort_values(ascending=False, inplace=True)\ntest.head(50)", "_____no_output_____" ] ], [ [ "## Conclusion and next steps\n\nFor the next tasks, we will focus on the disambiguated authors as the Researcher ID links will let us carry out useful analyses.\n\nStill, we can **save the authors with missing IDs** results and try to do some manual disambiguation later. To this end, adding a simple google-search URL can help in making sense of these data quickly.", "_____no_output_____" ] ], [ [ "from urllib.parse import quote\n\nout = []\nfor index, value in test.items():\n # compose a simple URL of the form 'https://www.google.com/search?q=tonu+esko'\n if index[0] or index[1]:\n n, s = quote(index[0]), quote(index[1])\n url = f\"https://www.google.com/search?q={n}+{s}\"\n else:\n url = \"\"\n d = {'name': index[0] , 'surname' : index[1] , 'frequency' : value , 'search_url' : url }\n out.append(d)\n\ndftest = pd.DataFrame.from_dict(out)\n# set order of columns\ndftest = dftest[['name', 'surname', 'frequency', 'search_url']]\n\ndftest.head(20)", "_____no_output_____" ], [ "# save the data\n#\ndftest.to_csv(\"data/1.authors_not_disambiguated_frequency.csv\", header=True)", "_____no_output_____" ], [ "if COLAB_ENV:\n files.download(\"data/1.authors_not_disambiguated_frequency.csv\")\n files.download(\"data/1.authors_with_researchers_id.csv\")\n files.download(\"data/1.publications_authors.csv\")\n files.download(\"data/1.publications_authors_affiliations.csv\")\n files.download(\"data/1.pubs_metadata_with_metrics.csv\")", "_____no_output_____" ] ], [ [ "That's it! \n\nNow let's go and open this in [Google Sheets](https://docs.google.com/spreadsheets/)...", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
cb960b4d0c17979206340c2c6f57eb8ab8c8aee8
258,315
ipynb
Jupyter Notebook
notebooks/Submission 03 - Model and Prediction.ipynb
nberliner/Random-Walk-of-the-Penguins
bcb3c6139b6a600e9b9179c80329a1814de42ffe
[ "MIT" ]
null
null
null
notebooks/Submission 03 - Model and Prediction.ipynb
nberliner/Random-Walk-of-the-Penguins
bcb3c6139b6a600e9b9179c80329a1814de42ffe
[ "MIT" ]
null
null
null
notebooks/Submission 03 - Model and Prediction.ipynb
nberliner/Random-Walk-of-the-Penguins
bcb3c6139b6a600e9b9179c80329a1814de42ffe
[ "MIT" ]
null
null
null
194.660889
98,844
0.850555
[ [ [ "%pylab inline", "Populating the interactive namespace from numpy and matplotlib\n" ], [ "import os\nimport sys\n\nsrc_dir = os.path.join(os.getcwd(), os.pardir, 'src')\nsys.path.append(src_dir)", "_____no_output_____" ], [ "import pickle\n\nimport numpy as np\nimport pandas as pd\n\n# Set the seed for Keras\nnp.random.seed(4)\n\nfrom model.model import get_model, split_train_test\n\nfrom model.predictions import fit_model, predict, assemble_submission, convert_predictions, AMAPE\n\nfrom utils.visualisations import PenguinVisualisation", "Using Theano backend.\n" ], [ "# Use the same features without noise added computed for Submission 2\nfname = '../data/interim/Submission_02/features.p'\ndf_train, df_test, df_features, _, features, scaler = pickle.load(open(fname, 'rb'))", "_____no_output_____" ], [ "print(df_train.shape, df_test.shape)\ndf_train.head()", "(17496, 38) (1296, 38)\n" ], [ "df_train.columns", "_____no_output_____" ], [ "# Get the model\nts_steps = 4\naux_input_size = 6\nmodel = get_model(ts_steps, aux_input_size=aux_input_size)", "_____no_output_____" ], [ "# Run the fitting\nbatch_size = 256\nmodel, history = fit_model(df_train, model, df_test, batch_size=batch_size, epochs=300, verbose=0)", "_____no_output_____" ], [ "# Show the history of the validation loss and select the \"best\" number of epochs to run\nval_loss = history.history['val_loss']\nbest_epoch_auto = np.argmin(val_loss) + 1\nbest_epoch = best_epoch_auto\n\nprint(\"Epoch with lowest validation loss is epoch %i with a loss of %.2f\" %(best_epoch_auto, val_loss[best_epoch_auto]))\nprint(\"Manually chosen epoch is epoch %i with a loss of %.2f\" %(best_epoch, val_loss[best_epoch]))\n\n\nfig = plt.figure(figsize=(13,5))\nax = fig.add_subplot(111)\nax.plot(val_loss)\nax.axvline(best_epoch-1, color='green', linestyle='dashed');\nax.axvline(best_epoch_auto-1, color='red');", "Epoch with lowest validation loss is epoch 218 with a loss of 0.27\nManually chosen epoch is epoch 218 with a loss of 0.27\n" ], [ "# Get the model\nnp.random.seed(4)\nts_steps = 4\naux_input_size = 6\nmodel = get_model(ts_steps, aux_input_size=aux_input_size)\nmodel, history = fit_model(df_train, model, df_test, batch_size=batch_size, epochs=best_epoch, verbose=0)", "_____no_output_____" ], [ "val_loss_re = history.history['val_loss']\nbest_epoch_re = np.argmin(val_loss_re) + 1\n\nprint(\"Epoch with lowest validation loss is epoch %i with a loss of %.2f\" %(best_epoch_re, val_loss_re[best_epoch_re-1]))\n\nfig = plt.figure(figsize=(13,5))\nax = fig.add_subplot(111)\nax.plot(val_loss_re)\nax.axvline(best_epoch_re-1, color='red');", "Epoch with lowest validation loss is epoch 218 with a loss of 0.25\n" ], [ "# Predict the steps ahead\ndf_predictions = predict(df_features, 4, model, features)\ndf_predictions.head()", "_____no_output_____" ], [ "# Rescale the predictions\ndf_predictions = convert_predictions(df_predictions, scaler)\ndf_predictions = df_predictions.round()\ndf_predictions.head()", "_____no_output_____" ], [ "# Show some estimate of what the final score for the test set would be.\n# First, show the score for all data (i.e. including the interpolated counts)\namape = AMAPE(interpolated=True)\namapeScore = amape.amape(df_predictions)\n\nprint(\"AMAPE 2011: %.2f\" %amapeScore.loc['2011'])\nprint(\"AMAPE 2012: %.2f\" %amapeScore.loc['2012'])\nprint(\"AMAPE 2013: %.2f\" %amapeScore.loc['2013'])\nprint(\"Projected AMAPE score: %.2f\" %(amapeScore.loc['2011']+amapeScore.loc['2012']+amapeScore.loc['2013']))\n\nplt.figure(figsize=(10,5))\nplt.plot(amapeScore);", "AMAPE 2011: 0.26\nAMAPE 2012: 0.35\nAMAPE 2013: 0.37\nProjected AMAPE score: 0.98\n" ], [ "# Show the score only for the truly observed predictions. This will be harder to predict since the interpolated\n# data is smooth and thus much easier to predict.\namape = AMAPE(interpolated=False)\namapeScore = amape.amape(df_predictions)\n\nprint(\"AMAPE 2011: %.2f\" %amapeScore.loc['2011'])\nprint(\"AMAPE 2012: %.2f\" %amapeScore.loc['2012'])\nprint(\"AMAPE 2013: %.2f\" %amapeScore.loc['2013'])\nprint(\"Projected AMAPE score: %.2f\" %(amapeScore.loc['2011']+amapeScore.loc['2012']+amapeScore.loc['2013']))\n\nplt.figure(figsize=(10,5))\nplt.plot(amapeScore);", "AMAPE 2011: 1.67\nAMAPE 2012: 1.68\nAMAPE 2013: 2.43\nProjected AMAPE score: 5.77\n" ], [ "# Have look at some random sites and species and check if the model seems to make sense\nvis = PenguinVisualisation(df_predictions)\nvis.plot_random()", "_____no_output_____" ], [ "# Finally, retrain the model with the full data and make the predictions for the submission\nnp.random.seed(4)\nmodel = get_model(ts_steps, aux_input_size=aux_input_size)\nmodel, _ = fit_model(df_features, model, None, batch_size=batch_size, epochs=best_epoch, verbose=0)\n\n# Make the predictions\ndf_predictions = predict(df_features, 4, model, features)\ndf_predictions = convert_predictions(df_predictions, scaler)\ndf_predictions = df_predictions.round()\n\n# Prepare the submission\ndf_submission = assemble_submission(df_predictions)\ndf_submission.head()", "_____no_output_____" ], [ "# Check if the score changes much compared to the run without the test data.\namape = AMAPE(interpolated=False)\namapeScore = amape.amape(df_predictions)\n\nprint(\"AMAPE 2011: %.2f\" %amapeScore.loc['2011'])\nprint(\"AMAPE 2012: %.2f\" %amapeScore.loc['2012'])\nprint(\"AMAPE 2013: %.2f\" %amapeScore.loc['2013'])\nprint(\"Projected AMAPE score: %.2f\" %(amapeScore.loc['2011']+amapeScore.loc['2012']+amapeScore.loc['2013']))\n\nplt.figure(figsize=(10,5))\nplt.plot(amapeScore);", "AMAPE 2011: 1.60\nAMAPE 2012: 1.72\nAMAPE 2013: 2.65\nProjected AMAPE score: 5.97\n" ], [ "fname_submission = '../data/submission/submission_03.csv'\ndf_submission.to_csv(fname_submission, index=True)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9612731739a55f86a002162df9812c6688e719
6,873
ipynb
Jupyter Notebook
gym/CartPole-v0/Hill_Climbing.ipynb
lukedottec/ai-gym
1a94777c7d6d4c53871855758ea0c0b8d325f297
[ "MIT" ]
null
null
null
gym/CartPole-v0/Hill_Climbing.ipynb
lukedottec/ai-gym
1a94777c7d6d4c53871855758ea0c0b8d325f297
[ "MIT" ]
15
2021-03-18T22:14:35.000Z
2022-03-11T23:40:51.000Z
gym/CartPole-v0/Hill_Climbing.ipynb
lukedottec/ai-gym
1a94777c7d6d4c53871855758ea0c0b8d325f297
[ "MIT" ]
1
2019-05-03T14:40:48.000Z
2019-05-03T14:40:48.000Z
29.625
135
0.519424
[ [ [ "# Hill Climbing\n\n---\n\nIn this notebook, we will train hill climbing with adaptive noise scaling with OpenAI Gym's Cartpole environment.", "_____no_output_____" ], [ "### 1. Import the Necessary Packages", "_____no_output_____" ] ], [ [ "import gym\nimport numpy as np\nfrom collections import deque\nimport matplotlib.pyplot as plt\n%matplotlib inline\n\n!python -m pip install pyvirtualdisplay\nfrom pyvirtualdisplay import Display\ndisplay = Display(visible=0, size=(1400, 900))\ndisplay.start()\n\nis_ipython = 'inline' in plt.get_backend()\nif is_ipython:\n from IPython import display\n\nplt.ion()", "Requirement already satisfied: pyvirtualdisplay in /opt/conda/lib/python3.6/site-packages\nRequirement already satisfied: EasyProcess in /opt/conda/lib/python3.6/site-packages (from pyvirtualdisplay)\n\u001b[33mYou are using pip version 9.0.1, however version 19.0.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n" ] ], [ [ "### 2. Define the Policy", "_____no_output_____" ] ], [ [ "env = gym.make('CartPole-v0')\nprint('observation space:', env.observation_space)\nprint('action space:', env.action_space)\n\nclass Policy():\n def __init__(self, s_size=4, a_size=2):\n self.w = 1e-4*np.random.rand(s_size, a_size) # weights for simple linear policy: state_space x action_space\n \n def forward(self, state):\n x = np.dot(state, self.w)\n return np.exp(x)/sum(np.exp(x))\n \n def act(self, state):\n probs = self.forward(state)\n #action = np.random.choice(2, p=probs) # option 1: stochastic policy\n action = np.argmax(probs) # option 2: deterministic policy\n return action", "_____no_output_____" ] ], [ [ "### 3. Train the Agent with Stochastic Policy Search", "_____no_output_____" ] ], [ [ "env = gym.make('CartPole-v0')\nenv.seed(0)\nnp.random.seed(0)\n\npolicy = Policy()\n\ndef hill_climbing(n_episodes=1000, max_t=1000, gamma=1.0, print_every=100, noise_scale=1e-2):\n \"\"\"Implementation of hill climbing with adaptive noise scaling.\n \n Params\n ======\n n_episodes (int): maximum number of training episodes\n max_t (int): maximum number of timesteps per episode\n gamma (float): discount rate\n print_every (int): how often to print average score (over last 100 episodes)\n noise_scale (float): standard deviation of additive noise\n \"\"\"\n scores_deque = deque(maxlen=100)\n scores = []\n best_R = -np.Inf\n best_w = policy.w\n for i_episode in range(1, n_episodes+1):\n rewards = []\n state = env.reset()\n for t in range(max_t):\n action = policy.act(state)\n state, reward, done, _ = env.step(action)\n rewards.append(reward)\n if done:\n break \n scores_deque.append(sum(rewards))\n scores.append(sum(rewards))\n\n discounts = [gamma**i for i in range(len(rewards)+1)]\n R = sum([a*b for a,b in zip(discounts, rewards)])\n\n if R >= best_R: # found better weights\n best_R = R\n best_w = policy.w\n noise_scale = max(1e-3, noise_scale / 2)\n policy.w += noise_scale * np.random.rand(*policy.w.shape) \n else: # did not find better weights\n noise_scale = min(2, noise_scale * 2)\n policy.w = best_w + noise_scale * np.random.rand(*policy.w.shape)\n\n if i_episode % print_every == 0:\n print('Episode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\n if np.mean(scores_deque)>=195.0:\n print('Environment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_deque)))\n policy.w = best_w\n break\n \n return scores\n \nscores = hill_climbing()", "_____no_output_____" ] ], [ [ "### 4. Plot the Scores", "_____no_output_____" ] ], [ [ "fig = plt.figure()\nax = fig.add_subplot(111)\nplt.plot(np.arange(1, len(scores)+1), scores)\nplt.ylabel('Score')\nplt.xlabel('Episode #')\nplt.show()", "_____no_output_____" ] ], [ [ "### 5. Watch a Smart Agent!", "_____no_output_____" ] ], [ [ "env = gym.make('CartPole-v0')\nstate = env.reset()\nimg = plt.imshow(env.render(mode='rgb_array'))\nfor t in range(200):\n action = policy.act(state)\n img.set_data(env.render(mode='rgb_array')) \n plt.axis('off')\n display.display(plt.gcf())\n display.clear_output(wait=True)\n state, reward, done, _ = env.step(action)\n if done:\n break \n\nenv.close()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb961775d7a32edb24832dad221b9569ce6cfaa0
8,729
ipynb
Jupyter Notebook
Solutions/challenge.ipynb
raminamini1985/Fraudulent-Transactions
b7f8d89115c4ffb1a8e7bb06c1e62279524fd7e9
[ "PostgreSQL", "CC-BY-2.0" ]
null
null
null
Solutions/challenge.ipynb
raminamini1985/Fraudulent-Transactions
b7f8d89115c4ffb1a8e7bb06c1e62279524fd7e9
[ "PostgreSQL", "CC-BY-2.0" ]
null
null
null
Solutions/challenge.ipynb
raminamini1985/Fraudulent-Transactions
b7f8d89115c4ffb1a8e7bb06c1e62279524fd7e9
[ "PostgreSQL", "CC-BY-2.0" ]
null
null
null
35.197581
224
0.503952
[ [ [ "# Challenge\n\nAnother approach to identifying fraudulent transactions is to look for outliers in the data. Standard deviation or quartiles are often used to detect outliers. Using this starter notebook, code two Python functions:\n\n* One that uses standard deviation to identify anomalies for any cardholder.\n\n* Another that uses interquartile range to identify anomalies for any cardholder.\n\n## Identifying Outliers using Standard Deviation", "_____no_output_____" ] ], [ [ "# Initial imports\nimport pandas as pd\nimport numpy as np\nimport random\nfrom sqlalchemy import create_engine\n\n", "_____no_output_____" ], [ "# Create a connection to the database\nengine = create_engine(\"postgresql://postgres:postgres@localhost:5432/fraud_detection\")\n\n", "_____no_output_____" ], [ "# Write function that locates outliers using standard deviation\ndef find_outliers_sd(card_holder=1):\n query = (\n \"SELECT t.date, t.amount, t.card \"\n + \"FROM transaction AS t \"\n + \"JOIN credit_card AS cc ON cc.card = t.card \"\n + \"JOIN card_holder AS ch ON ch.id = cc.id_card_holder \"\n + \"WHERE ch.id = \"\n + str(card_holder)\n + \" ORDER BY date\"\n )\n data = pd.read_sql(query, engine)\n elements = data[\"amount\"]\n mean = np.mean(elements, axis=0)\n sd = np.std(elements, axis=0)\n # 2 standard deviations are taken for analysis purposes\n low_transactions = [x for x in elements if (x < mean - 2 * sd)]\n high_transaction = [x for x in elements if (x > mean + 2 * sd)]\n final_list = low_transactions + high_transaction\n if len(final_list) > 0:\n query = (\n \"SELECT t.date, t.amount, t.card \"\n + \"FROM transaction AS t \"\n + \"JOIN credit_card AS cc ON cc.card = t.card \"\n + \"JOIN card_holder AS ch ON ch.id = cc.id_card_holder \"\n + \"WHERE ch.id = \"\n + str(card_holder)\n + \" AND t.amount IN (\"\n + str(final_list)[1:-1]\n + \") \"\n + \"ORDER BY date\"\n )\n data = pd.read_sql(query, engine)\n return data\n else:\n return \"There are no fraudulent transactions identified for this card holder\"\n\n", "_____no_output_____" ], [ "# Find anomalous transactions for 3 random card holders\nfor i in range(1, 4):\n card_holder = random.randint(1, 25)\n print(\"*\" * 60)\n print(f\"Looking for fraudulent transactions for card holder id {card_holder}\")\n print(find_outliers_sd(card_holder))\n", "************************************************************\nLooking for fraudulent transactions for card holder id 23\n date amount card\n0 2018-06-21 22:11:26 20.65 4150721559116778\n************************************************************\nLooking for fraudulent transactions for card holder id 20\n date amount card\n0 2018-01-14 06:19:11 21.11 3535651398328201\n1 2018-05-11 12:43:50 20.56 4586962917519654607\n2 2018-08-26 07:15:18 23.13 4506405265172173\n3 2018-10-07 08:16:54 20.44 4586962917519654607\n4 2018-11-09 19:38:36 20.27 3535651398328201\n************************************************************\nLooking for fraudulent transactions for card holder id 24\n date amount card\n0 2018-03-20 13:05:54 1011.0 30142966699187\n1 2018-04-21 18:40:47 525.0 30142966699187\n2 2018-05-08 13:21:01 1901.0 30142966699187\n3 2018-12-21 09:56:32 1301.0 30142966699187\n4 2018-12-25 19:10:42 1035.0 30142966699187\n" ] ], [ [ "## Identifying Outliers Using Interquartile Range", "_____no_output_____" ] ], [ [ "# Write a function that locates outliers using interquartile range\ndef find_outliers_iqr(card_holder=1):\n query = (\n \"SELECT t.date, t.amount, t.card \"\n + \"FROM transaction AS t \"\n + \"JOIN credit_card AS cc ON cc.card = t.card \"\n + \"JOIN card_holder AS ch ON ch.id = cc.id_card_holder \"\n + \"WHERE ch.id = \"\n + str(card_holder)\n + \" ORDER BY date\"\n )\n data = pd.read_sql(query, engine)\n # calculate interquartile range\n q25, q75 = np.percentile(data[\"amount\"], 25), np.percentile(data[\"amount\"], 75)\n iqr = q75 - q25\n # calculate the outlier cutoff\n cut_off = iqr * 1.5\n lower, upper = q25 - cut_off, q75 + cut_off\n # identify outliers\n outliers = [x for x in data[\"amount\"] if x < lower or x > upper]\n if len(outliers) > 0:\n query = (\n \"SELECT t.date, t.amount, t.card \"\n + \"FROM transaction AS t \"\n + \"JOIN credit_card AS cc ON cc.card = t.card \"\n + \"JOIN card_holder AS ch ON ch.id = cc.id_card_holder \"\n + \"WHERE ch.id = \"\n + str(card_holder)\n + \" AND t.amount IN (\"\n + str(outliers)[1:-1]\n + \") \"\n + \"ORDER BY date\"\n )\n data = pd.read_sql(query, engine)\n return data\n else:\n return \"There are no fraudulent transactions identified for this card holder\"\n\n", "_____no_output_____" ], [ "# Find anomalous transactions for 3 random card holders\nfor i in range(1, 4):\n card_holder = random.randint(1, 25)\n print(\"*\" * 60)\n print(f\"Looking for fraudulent transactions for card holder id {card_holder}\")\n print(find_outliers_iqr(card_holder))\n", "************************************************************\nLooking for fraudulent transactions for card holder id 21\nThere are no fraudulent transactions identified for this card holder\n************************************************************\nLooking for fraudulent transactions for card holder id 11\nThere are no fraudulent transactions identified for this card holder\n************************************************************\nLooking for fraudulent transactions for card holder id 8\nThere are no fraudulent transactions identified for this card holder\n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb9619255567cbf56b4d331eb16775289e43ed0f
112,564
ipynb
Jupyter Notebook
src/demo.ipynb
lychengr3x/Bird-Species-Classification-Using-Transfer-Learning
d3477f67c8a3281517073285cd47c5a9fe9409d2
[ "MIT" ]
4
2020-11-04T00:41:35.000Z
2021-11-03T02:31:26.000Z
src/demo.ipynb
lychengr3x/Bird-Species-Classification-Using-Transfer-Learning
d3477f67c8a3281517073285cd47c5a9fe9409d2
[ "MIT" ]
null
null
null
src/demo.ipynb
lychengr3x/Bird-Species-Classification-Using-Transfer-Learning
d3477f67c8a3281517073285cd47c5a9fe9409d2
[ "MIT" ]
null
null
null
119.115344
72,679
0.795334
[ [ [ "%matplotlib notebook\nimport torch\nimport matplotlib.pyplot as plt\nfrom data import BirdsDataset\nfrom argument import Args\nfrom model import VGG16Transfer, ClassificationStatsManager, Resnet18Transfer\nimport nntools as nt\nfrom utils import imshow, plot", "_____no_output_____" ], [ "args = Args()", "_____no_output_____" ], [ "vars(args)", "_____no_output_____" ], [ "args.plot=True", "_____no_output_____" ], [ "device = 'cuda' if torch.cuda.is_available() else 'cpu'\ntrain_set = BirdsDataset(args.root_dir, image_size=args.image_size)\nval_set = BirdsDataset(args.root_dir, mode='val',\n image_size=args.image_size)\nnum_classes = train_set.number_of_classes()\n\nif args.model == 'vgg':\n net = VGG16Transfer(num_classes)\nelse:\n net = Resnet18Transfer(num_classes)\nnet = net.to(device)\nadam = torch.optim.Adam(net.parameters(), lr=args.lr)\nstats_manager = ClassificationStatsManager()\nexp = nt.Experiment(net, train_set, val_set, adam, stats_manager, batch_size=args.batch_size,\n output_dir=args.output_dir, perform_validation_during_training=True)\n\nif args.plot:\n fig, axes = plt.subplots(ncols=2, figsize=(7, 3))\n exp.run(num_epochs=args.num_epochs,\n plot=lambda exp: plot(exp, fig=fig, axes=axes))\nelse:\n exp.run(num_epochs=args.num_epochs)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb961e79dc823e01fd23923127239107b09db2dd
14,247
ipynb
Jupyter Notebook
tutorial/source/jit.ipynb
gchanan/pyro
ff67eef68b359146f44cc171f5b2a47569a32e1a
[ "Apache-2.0" ]
null
null
null
tutorial/source/jit.ipynb
gchanan/pyro
ff67eef68b359146f44cc171f5b2a47569a32e1a
[ "Apache-2.0" ]
null
null
null
tutorial/source/jit.ipynb
gchanan/pyro
ff67eef68b359146f44cc171f5b2a47569a32e1a
[ "Apache-2.0" ]
null
null
null
35.440299
886
0.593178
[ [ [ "# Using the PyTorch JIT Compiler with Pyro\n\nThis tutorial shows how to use the PyTorch [jit compiler](https://pytorch.org/docs/master/jit.html) in Pyro models.\n\n#### Summary:\n- You can use compiled functions in Pyro models.\n- You cannot use pyro primitives inside compiled functions.\n- If your model has static structure, you can use a `Jit*` version of an `ELBO` algorithm, e.g.\n ```diff\n - Trace_ELBO()\n + JitTrace_ELBO()\n ```\n- The [HMC](http://docs.pyro.ai/en/dev/mcmc.html#pyro.infer.mcmc.HMC) and [NUTS](http://docs.pyro.ai/en/dev/mcmc.html#pyro.infer.mcmc.NUTS) classes accept `jit_compile=True` kwarg.\n- Models should input all tensors as `*args` and all non-tensors as `**kwargs`.\n- Each different value of `**kwargs` triggers a separate compilation.\n- Use `**kwargs` to specify all variation in structure (e.g. time series length).\n- To ignore jit warnings in safe code blocks, use `with pyro.util.ignore_jit_warnings():`.\n- To ignore all jit warnings in `HMC` or `NUTS`, pass `ignore_jit_warnings=True`.\n\n#### Table of contents\n- [Introduction](#Introduction)\n- [A simple model](#A-simple-model)\n- [Varying structure](#Varying-structure)", "_____no_output_____" ] ], [ [ "import os\nimport torch\nimport pyro\nimport pyro.distributions as dist\nfrom torch.distributions import constraints\nfrom pyro import poutine\nfrom pyro.distributions.util import broadcast_shape\nfrom pyro.infer import Trace_ELBO, JitTrace_ELBO, TraceEnum_ELBO, JitTraceEnum_ELBO, SVI\nfrom pyro.infer.mcmc import MCMC, NUTS\nfrom pyro.infer.autoguide import AutoDiagonalNormal\nfrom pyro.optim import Adam\n\nsmoke_test = ('CI' in os.environ)\nassert pyro.__version__.startswith('1.3.1')\npyro.enable_validation(True) # <---- This is always a good idea!", "_____no_output_____" ] ], [ [ "\n## Introduction\n\nPyTorch 1.0 includes a [jit compiler](https://pytorch.org/docs/master/jit.html) to speed up models. You can think of compilation as a \"static mode\", whereas PyTorch usually operates in \"eager mode\".\n\nPyro supports the jit compiler in two ways. First you can use compiled functions inside Pyro models (but those functions cannot contain Pyro primitives). Second, you can use Pyro's jit inference algorithms to compile entire inference steps; in static models this can reduce the Python overhead of Pyro models and speed up inference.\n\nThe rest of this tutorial focuses on Pyro's jitted inference algorithms: [JitTrace_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.trace_elbo.JitTrace_ELBO), [JitTraceGraph_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.tracegraph_elbo.JitTraceGraph_ELBO), [JitTraceEnum_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.traceenum_elbo.JitTraceEnum_ELBO), [JitMeanField_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.trace_mean_field_elbo.JitTraceMeanField_ELBO), [HMC(jit_compile=True)](http://docs.pyro.ai/en/dev/mcmc.html#pyro.infer.mcmc.HMC), and [NUTS(jit_compile=True)](http://docs.pyro.ai/en/dev/mcmc.html#pyro.infer.mcmc.NUTS). For further reading, see the [examples/](https://github.com/pyro-ppl/pyro/tree/dev/examples) directory, where most examples include a `--jit` option to run in compiled mode.\n\n## A simple model\n\nLet's start with a simple Gaussian model and an [autoguide](http://docs.pyro.ai/en/dev/infer.autoguide.html).", "_____no_output_____" ] ], [ [ "def model(data):\n loc = pyro.sample(\"loc\", dist.Normal(0., 10.))\n scale = pyro.sample(\"scale\", dist.LogNormal(0., 3.))\n with pyro.plate(\"data\", data.size(0)):\n pyro.sample(\"obs\", dist.Normal(loc, scale), obs=data)\n\nguide = AutoDiagonalNormal(model)\n\ndata = dist.Normal(0.5, 2.).sample((100,))", "_____no_output_____" ] ], [ [ "First let's run as usual with an SVI object and `Trace_ELBO`.", "_____no_output_____" ] ], [ [ "%%time\npyro.clear_param_store()\nelbo = Trace_ELBO()\nsvi = SVI(model, guide, Adam({'lr': 0.01}), elbo)\nfor i in range(2 if smoke_test else 1000):\n svi.step(data)", "CPU times: user 2.71 s, sys: 31.4 ms, total: 2.74 s\nWall time: 2.76 s\n" ] ], [ [ "Next to run with a jit compiled inference, we simply replace\n```diff\n- elbo = Trace_ELBO()\n+ elbo = JitTrace_ELBO()\n```\nAlso note that the `AutoDiagonalNormal` guide behaves a little differently on its first invocation (it runs the model to produce a prototype trace), and we don't want to record this warmup behavior when compiling. Thus we call the `guide(data)` once to initialize, then run the compiled SVI,", "_____no_output_____" ] ], [ [ "%%time\npyro.clear_param_store()\n\nguide(data) # Do any lazy initialization before compiling.\n\nelbo = JitTrace_ELBO()\nsvi = SVI(model, guide, Adam({'lr': 0.01}), elbo)\nfor i in range(2 if smoke_test else 1000):\n svi.step(data)", "CPU times: user 1.1 s, sys: 30.4 ms, total: 1.13 s\nWall time: 1.16 s\n" ] ], [ [ "Notice that we have a more than 2x speedup for this small model.\n\nLet us now use the same model, but we will instead use MCMC to generate samples from the model's posterior. We will use the No-U-Turn(NUTS) sampler.", "_____no_output_____" ] ], [ [ "%%time\nnuts_kernel = NUTS(model)\npyro.set_rng_seed(1)\nmcmc_run = MCMC(nuts_kernel, num_samples=100).run(data)", "_____no_output_____" ] ], [ [ "We can compile the potential energy computation in NUTS using the `jit_compile=True` argument to the NUTS kernel. We also silence JIT warnings due to the presence of tensor constants in the model by using `ignore_jit_warnings=True`.", "_____no_output_____" ] ], [ [ "%%time\nnuts_kernel = NUTS(model, jit_compile=True, ignore_jit_warnings=True)\npyro.set_rng_seed(1)\nmcmc_run = MCMC(nuts_kernel, num_samples=100).run(data)", "_____no_output_____" ] ], [ [ "We notice a significant increase in sampling throughput when JIT compilation is enabled.", "_____no_output_____" ], [ "## Varying structure\n\nTime series models often run on datasets of multiple time series with different lengths. To accomodate varying structure like this, Pyro requires models to separate all model inputs into tensors and non-tensors.$^\\dagger$\n\n- Non-tensor inputs should be passed as `**kwargs` to the model and guide. These can determine model structure, so that a model is compiled for each value of the passed `**kwargs`.\n- Tensor inputs should be passed as `*args`. These must not determine model structure. However `len(args)` may determine model structure (as is used e.g. in semisupervised models).\n\nTo illustrate this with a time series model, we will pass in a sequence of observations as a tensor `arg` and the sequence length as a non-tensor `kwarg`:", "_____no_output_____" ] ], [ [ "def model(sequence, num_sequences, length, state_dim=16):\n # This is a Gaussian HMM model.\n with pyro.plate(\"states\", state_dim):\n trans = pyro.sample(\"trans\", dist.Dirichlet(0.5 * torch.ones(state_dim)))\n emit_loc = pyro.sample(\"emit_loc\", dist.Normal(0., 10.))\n emit_scale = pyro.sample(\"emit_scale\", dist.LogNormal(0., 3.))\n\n # We're doing manual data subsampling, so we need to scale to actual data size.\n with poutine.scale(scale=num_sequences):\n # We'll use enumeration inference over the hidden x.\n x = 0\n for t in pyro.markov(range(length)):\n x = pyro.sample(\"x_{}\".format(t), dist.Categorical(trans[x]),\n infer={\"enumerate\": \"parallel\"})\n pyro.sample(\"y_{}\".format(t), dist.Normal(emit_loc[x], emit_scale),\n obs=sequence[t])\n\nguide = AutoDiagonalNormal(poutine.block(model, expose=[\"trans\", \"emit_scale\", \"emit_loc\"]))\n\n# This is fake data of different lengths.\nlengths = [24] * 50 + [48] * 20 + [72] * 5\nsequences = [torch.randn(length) for length in lengths]", "_____no_output_____" ] ], [ [ "Now lets' run SVI as usual.", "_____no_output_____" ] ], [ [ "%%time\npyro.clear_param_store()\nelbo = TraceEnum_ELBO(max_plate_nesting=1)\nsvi = SVI(model, guide, Adam({'lr': 0.01}), elbo)\nfor i in range(1 if smoke_test else 10):\n for sequence in sequences:\n svi.step(sequence, # tensor args\n num_sequences=len(sequences), length=len(sequence)) # non-tensor args", "CPU times: user 52.4 s, sys: 270 ms, total: 52.7 s\nWall time: 52.8 s\n" ] ], [ [ "Again we'll simply swap in a `Jit*` implementation\n```diff\n- elbo = TraceEnum_ELBO(max_plate_nesting=1)\n+ elbo = JitTraceEnum_ELBO(max_plate_nesting=1)\n```\nNote that we are manually specifying the `max_plate_nesting` arg. Usually Pyro can figure this out automatically by running the model once on the first invocation; however to avoid this extra work when we run the compiler on the first step, we pass this in manually.", "_____no_output_____" ] ], [ [ "%%time\npyro.clear_param_store()\n\n# Do any lazy initialization before compiling.\nguide(sequences[0], num_sequences=len(sequences), length=len(sequences[0]))\n\nelbo = JitTraceEnum_ELBO(max_plate_nesting=1)\nsvi = SVI(model, guide, Adam({'lr': 0.01}), elbo)\nfor i in range(1 if smoke_test else 10):\n for sequence in sequences:\n svi.step(sequence, # tensor args\n num_sequences=len(sequences), length=len(sequence)) # non-tensor args", "CPU times: user 21.9 s, sys: 201 ms, total: 22.1 s\nWall time: 22.2 s\n" ] ], [ [ "Again we see more than 2x speedup. Note that since there were three different sequence lengths, compilation was triggered three times.\n\n$^\\dagger$ Note this section is only valid for SVI, and HMC/NUTS assume fixed model arguments.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb9625667cf941ad01af2a0d144f1d8b5e238ca0
32,583
ipynb
Jupyter Notebook
Week01-02/Workshop01.ipynb
ds-connectors/Physics-88-Fa21
147ea6ea06798fc6e7d7eac9f06076365c291fc9
[ "BSD-3-Clause" ]
1
2021-08-30T17:52:58.000Z
2021-08-30T17:52:58.000Z
Week01-02/Workshop01.ipynb
ds-connectors/Physics-88-Fa21
147ea6ea06798fc6e7d7eac9f06076365c291fc9
[ "BSD-3-Clause" ]
null
null
null
Week01-02/Workshop01.ipynb
ds-connectors/Physics-88-Fa21
147ea6ea06798fc6e7d7eac9f06076365c291fc9
[ "BSD-3-Clause" ]
null
null
null
29.94761
758
0.586471
[ [ [ "Your name here. \nYour section number here. ", "_____no_output_____" ], [ "# Workshop 1: Python basics, and a little plotting", "_____no_output_____" ], [ "**Submit this notebook to bCourses to receive a grade for this Workshop.**\n\nPlease complete workshop activities in code cells in this iPython notebook. The activities titled **Practice** are purely for you to explore Python, and no particular output is expected. Some of them have some code written, and you should try to modify it in different ways to understand how it works. Although no particular output is expected at submission time, it is _highly_ recommended that you read and work through the practice activities before or alongside the exercises. However, the activities titled **Exercise** have specific tasks and specific outputs expected. Include comments in your code when necessary. Enter your name in the cell at the top of the notebook. The workshop should be submitted on bCourses under the Assignments tab.\n\nTo submit the assignment, click File->Download As->Notebook (.ipynb). Then upload the completed (.ipynb) file to the corresponding bCourses assignment.", "_____no_output_____" ], [ "## Practice: Writing Python code\n\n### The iPython Interpreter\n\nTime to write your first python code! In Jupyter, the code is written in \"Cells\". Click on the \"+\" button above to create a new cell and type in \"2+2\" (without the quotes ... or with them!) and see what happens! To execute, click \"Run\" button or press \"Shift-Enter\". Also try switching the type of the cell from \"Code\" to \"Markdown\" and see what happens", "_____no_output_____" ] ], [ [ "2+2", "_____no_output_____" ] ], [ [ "## Practice: Peforming arithmetic in Python\n\nIf you get bored of using WolframAlpha to help with physics homework, Python can also be used as a \"glorified calculator\". Python code must follow certain syntax in order to run properly--it tends to be a bit more picky than Wolfram. However, once you get used to the Python language, you have the freedom to calculate pretty much anything you can think of.\n\nTo start, let's see how to perform the basic arithmetic operations. The syntax is\n\n<h3><center><i>number</i> operator <i>number</i></center></h3>\n\nRun the cells below and take a look at several of the different operators that you can use in Python (text after \"#\" are non-executable comments).", "_____no_output_____" ] ], [ [ "3+2 #addition", "_____no_output_____" ], [ "3-2 #subtraction", "_____no_output_____" ], [ "3*2 #multiplication", "_____no_output_____" ], [ "3/2 #division", "_____no_output_____" ], [ "3%2 #modulus (remainder after division) see https://en.wikipedia.org/wiki/Modulo_operation", "_____no_output_____" ], [ "3**2 #exponentiation, note: 3^2 means something different in Python", "_____no_output_____" ] ], [ [ "Python cares __*a lot*__ about the spaces, tabs, and enters you type (this is known as whitespace in programming). Many of your errors this semester will involve improper indentation. However, in this case, you are free to put a lot of space between numbers and operators as long as you keep everything in one line.", "_____no_output_____" ] ], [ [ " 5 * 3 #This is valid code", "_____no_output_____" ] ], [ [ "You are not limited to just 2 numbers and a single operator; you can put a whole bunch of operations on one line.", "_____no_output_____" ] ], [ [ "5 * 4 + 3 / 2", "_____no_output_____" ] ], [ [ "Python follows the standard order of operations (PEMDAS) : Parentheses -> Exponentiation -> Multiplication/Division -> Addition/Subtraction. If you use parentheses, make sure every ```(``` has a corresponding ```)```", "_____no_output_____" ] ], [ [ "5 * (4 + 3) / 2", "_____no_output_____" ] ], [ [ "## Practice: Strings vs numbers\n\nIf you're familiar with programming in other languages, you are probably aware that different [_types_](https://realpython.com/python-data-types/) of things exist--you can do more than work with numbers (and not all numbers are the same type). If you'd like to work with letters, words, or sentences in Python, then you'll be using something called a string. To input a string, simply put single `' '` or double `\" \"` quotes around your desired phrase.", "_____no_output_____" ] ], [ [ "\"Hello world\"", "_____no_output_____" ] ], [ [ "Some (but not all) of the arithmetic operations also work with strings; you can add two of them together.", "_____no_output_____" ] ], [ [ "\"Phys\" + \"ics\"", "_____no_output_____" ] ], [ [ "You can multiply a string by a number.", "_____no_output_____" ] ], [ [ "\"ha\"*3", "_____no_output_____" ] ], [ [ "This one doesn't work; try reading the error message and see if you understand what it's saying (this is a useful skill to develop). ", "_____no_output_____" ] ], [ [ "\"error\"/3", "_____no_output_____" ] ], [ [ "## Practice: Printing\n\nUp until this point, we've just been typing a single line of code in each Jupyter cell and running it. Most Python interpreters will display the result of the final thing you typed, but occassionally you want to display the results of many things in a single Python script.", "_____no_output_____" ] ], [ [ "\"These are some numbers:\"\n3*2\n3*3\n3*4", "_____no_output_____" ] ], [ [ "In the cell above, there are several multiplications happening but only the final result is displayed. To display everything, we simply use a \"print statement\" on each line.", "_____no_output_____" ] ], [ [ "print(\"These are some numbers:\")\nprint(3*2)\nprint(3*3)\nprint(3*4)", "_____no_output_____" ] ], [ [ "If you'd like to print multiple things on one line, you can separate them by commas within the print statement.", "_____no_output_____" ] ], [ [ "print(\"These are some numbers:\", 3*2, 3*3, 3*4)", "_____no_output_____" ] ], [ [ "## Exercise 1: Four Fours\n\n[Inspired by Harvey Mudd College's CS5 course] Here's an arithmetic game to try your hand at. Your task is to compute each of the numbers, from 1 through 11, using exactly four 4's and simple math operations. You're allowed to use `+` (addition), `-` (subtraction), `*` (multiplication), `/` (division), `sqrt()` (square root), `factorial()` (factorial), and `%` (modulus). You're also allowed to use `.4` (that's one 4) or `44` (that's two 4's) if you'd like. Just remember, you must use exactly four 4 digits total!\n\nAs a reminder, four factorial (denoted by $!$ in mathematics) is $4! = 4 \\cdot 3 \\cdot 2 \\cdot 1$, and the modulus operator (usually denoted by $\\text{mod}$ in mathematics) is the remainder after division. For instance, $\\ 5\\ \\text{mod}\\ 2 = 1$, $\\ 13\\ \\text{mod}\\ 7 = 6$, and $\\ 14\\ \\text{mod}\\ 7 = 0$.\n\nWe've given you `zero` for free, as `4 - 4 + 4 - 4`. Of course, we could have also done `44 * (.4 - .4)` or `factorial(4) - 4 * (4 + sqrt(4))`, since both of those also yield `0` (or rather, `0.0`. Why is that?) and use exactly four 4's.", "_____no_output_____" ] ], [ [ "### Exercise 1\n\nfrom math import factorial, sqrt\n \nprint('Zero:', 4 - 4 + 4 - 4)\nprint('One:')\nprint('Two:')\nprint('Three:')\nprint('Four:')\nprint('Five:')\nprint('Six:')\nprint('Seven:')\nprint('Eight:')\nprint('Nine:')\nprint('Ten:')\nprint('Eleven:')", "_____no_output_____" ] ], [ [ "Your final source code will be full of four fours formulas, but your final output should look like this:\n\n Zero: 0\n One: 1\n Two: 2\n Three: 3\n Four: 4\n Five: 5\n Six: 6\n Seven: 7\n Eight: 8\n Nine: 9\n Ten: 10\n Eleven: 11\n\nIt's ok if some of these have a trailing `.0` (`0.0`, for instance), but make sure you understand why they do!", "_____no_output_____" ], [ "## Practice: Variables, functions, namespaces\n\n### Variables\n\nSuppose you calculate something in Python and would like to use the result later in your program (instead of just printing it and immediately throwing it away). One big difference between a calculator and a computer language is an ability to store the values in memory, give that memory block a name, and use the value in later calculations. Such named memory block is called a _variable_. To create a variable, use an _assignment_ opperator = . Once you have created the variable, you can use it in the calculations. ", "_____no_output_____" ] ], [ [ "x = \"Phys\"\ny = \"ics!\"\nz = x + y # Put 'em together\nz # See what we got!", "_____no_output_____" ], [ "y + x # Backwards!", "_____no_output_____" ], [ "len(z) # 8 characters in total ...", "_____no_output_____" ], [ "len(z)**2 # Computing the area?", "_____no_output_____" ], [ "z[0] # Grab the first character", "_____no_output_____" ], [ "z[1:3] # Grab the next two characters", "_____no_output_____" ], [ "z[:4]", "_____no_output_____" ], [ "z[:4] == x # Test a match!", "_____no_output_____" ], [ "z[4:] == y", "_____no_output_____" ], [ "z[:] # The whole string", "_____no_output_____" ], [ "z[::-1] # The whole string, right to left", "_____no_output_____" ], [ "z[1::3] # Start at the second character and take every third character from there", "_____no_output_____" ], [ "z*3 + 5*z[-1] # Woo!", "_____no_output_____" ] ], [ [ "### Namespaces", "_____no_output_____" ], [ "This notebook and interpreter are a great place to test things out and mess around. Some interpreters (like Canopy) comes preloaded with a couple libraries (like numpy and matplotlib) that we will use a lot in this course. In Jupyter, you have to pre-load each package before using it. This is a good python practice anyway ! Here is an example. ", "_____no_output_____" ] ], [ [ "log(e)", "_____no_output_____" ] ], [ [ "Both the function `log` and the number `e` are from the `numpy` library, which needs to be loaded into Jupyter. \"pylab\" adds `matplotlib` (the standard plotting tool) to `numpy`, so we will use that. ", "_____no_output_____" ] ], [ [ "from pylab import *\nlog(e)", "_____no_output_____" ] ], [ [ "Or type `pie([1,2,3])`, since `pie` is defined by matplotlib! ", "_____no_output_____" ] ], [ [ "pie([1,2,3]) \nmatplotlib.pyplot.show() #This line is needed so matplotlib actually displays the plot", "_____no_output_____" ] ], [ [ "Note that we imported all library definitions from `pylab` into the default <i>namespace</i>, and can use the functions directly instead of having to add the name or alias of the package:", "_____no_output_____" ] ], [ [ "import numpy as np\nnp.log(np.e)", "_____no_output_____" ] ], [ [ "Loading into the default namespace can be convenient, but also confusing since many names and variables are already used in ways you might not expect. When writing scripts you'll have to manually import any library you want to use. This little inconvenience is greatly worth the confusion it can save.", "_____no_output_____" ], [ "### Functions (looking a bit ahead)\n\nYou'll often find yourself performing the same operations on several different variables. For example, we might want to convert heights from feet to meters.", "_____no_output_____" ] ], [ [ "burj_khalifa = 2717 #height in feet\nshanghai_tower = 2073 #height in feet", "_____no_output_____" ], [ "print(burj_khalifa / 3.281) #height in meters\nprint(shanghai_tower / 3.281) #height in meters", "_____no_output_____" ] ], [ [ "You could just type the same thing over and over (or copy and paste), but this becomes tedious as your operations become more complex. To simplify things, you can define a function in Python (above, you were able to use the `log()` function from the `numpy` library).", "_____no_output_____" ] ], [ [ "'''A function definition starts with the 'def' keyword, \nfollowed by the function name. The input variables are then\nplaced in parentheses after the function name. The first line\nends with a colon'''\n\ndef feet_to_meters(height): \n \n #The operations to be performed by the function are now written out at the first indentation level\n #You can indent with tabs or a constant number of spaces; just be consistent\n converted_height = height / 3.281\n print(\"Your height is being converted to meters.\")\n \n return converted_height #To return a value from a function, use the 'return' keyword", "_____no_output_____" ] ], [ [ "To use a function, simply type its name with the appropriate input variables in parentheses.", "_____no_output_____" ] ], [ [ "feet_to_meters(burj_khalifa)", "_____no_output_____" ] ], [ [ "If you'd like a function with multiple input variables, simply separate them with commas in the function declaration.", "_____no_output_____" ] ], [ [ "def difference_in_meters(height1, height2): \n \n converted_height1 = height1 / 3.281\n converted_height2 = height2 / 3.281\n \n return converted_height1 - converted_height2", "_____no_output_____" ], [ "difference_in_meters(burj_khalifa, shanghai_tower)", "_____no_output_____" ] ], [ [ "## Practice: Formatted output\n\nUsually the data you manipulate has finate precision. You do not know it absolutely precisely, and therefore you should not report it with an arbitrary number of digits. One of the cardinal rules of a good science paper: round off all your numbers to the precision you know them (or care about) -- and no more ! \n\n#### Examples:", "_____no_output_____" ] ], [ [ "x = 20.0 # I only know 3 digits\nprint(x) # OK, let Python handle it", "_____no_output_____" ] ], [ [ "That's actually pretty good -- Python remembered stored precision !\nWhat happens if you now use x in a calculation ? ", "_____no_output_____" ] ], [ [ "print(sqrt(x))", "_____no_output_____" ] ], [ [ "Do we really know the output to 10 significant digits ? No ! So let's truncate it", "_____no_output_____" ] ], [ [ "print('sqrt(x) = {0:5.3f}'.format(sqrt(x)))", "_____no_output_____" ] ], [ [ "There are several formatting options available to you, but the basic idea is this:\nplace `{:.#f}` wherever you'd like to insert a variable into your string (where `#` is\nthe number of digits you'd like after the decimal point). Then type `.format()` after \nthe string and place the variable names within the parentheses. ", "_____no_output_____" ] ], [ [ "from math import e\n\nprint(\"Euler's number with 5 decimal places is {:.5f} and with 3 decimal places is {:.3f}\".format(e,e))", "_____no_output_____" ] ], [ [ "For more formatting options, see https://pyformat.info/", "_____no_output_____" ], [ "### Practice\n\nUsing what you just learned, try writing program to print only 4 decimal places of $\\pi$ and $\\log\\pi$. The result should look like:\n\n Hello world! Have some pie! 3.1416\n And some pie from a log! 1.1447", "_____no_output_____" ] ], [ [ "from math import pi \n\n#Your print statement here", "_____no_output_____" ] ], [ [ "## Exercise 2: Coulomb force\n\nWrite a function that calculates the magnitude of the force between two charged particles. The function should take the charge of each particle ($q_1$ and $q_2$) and the distance between them, $r$, as input (three input variables total). The electrostatic force between two particles is given by:\n\n$ F = k\\frac{q_1 q_2}{r^2}$", "_____no_output_____" ] ], [ [ "k = 8.99e9 #Coulomb constant, units: N * m**2 / C**2\n\ndef calculate_force(q1, q2, r):\n \n #calculate (and return) the force between the two particles\n ", "_____no_output_____" ] ], [ [ "Now call the function with random input values (of your choosing) and print the result with 3 decimal places. What happens if you call the function with the value $r=0$ ? ", "_____no_output_____" ], [ "## Practice: Simple plotting\n\nIn order to do some plotting, we'll need the tools from two commonly used Python libraries: `matplotlib` (similar to Matlab plotting) and `numpy` (NUMerical PYthon). You've seen importing at work before with `from math import sqrt`; we can also import an entire library (or a large part of it) with the following syntax:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "You could have also typed `import numpy`, but programmers are lazy when it comes to typing. By including `as np`, you now only have to type the two-letter word `np` when you'd like to use functions from the library. The `np` and `plt` part of the import statements can be whatever you like--these are just the standard names.\n\nNumpy has a lot of the same functions as the `math` library; for example we have `sqrt`, `log`, and `exp`:", "_____no_output_____" ] ], [ [ "np.sqrt(4)", "_____no_output_____" ], [ "np.log(4)", "_____no_output_____" ], [ "np.exp(3)", "_____no_output_____" ], [ "np.log(np.exp(5))", "_____no_output_____" ] ], [ [ "We could have just gotten these functions from the `math` library, so why bother with `numpy`? There's another variable type in Python known as a *__list__*, which is exactly like it sounds--just a list of some things (numbers, strings, more lists, etc.). We'll talk about these more at some point, but the important thing is that `numpy` has a way better alternative: the `numpy` array. Usually anything you'd want to do with a list can also be done with a `numpy` array, but faster. \n\nLet's just demonstrate by example. Suppose we want to plot the function `x**2`. To do this, we'll plot a collection of (x,y) points and connect them with lines. If the points are spaced closely enough, the plot will look nice and smooth on the screen.", "_____no_output_____" ] ], [ [ "x_values = np.linspace(-5, 5, 11)\nprint(x_values)", "_____no_output_____" ] ], [ [ "The `linspace` function from `numpy` gave us an array of 11 numbers, evenly spaced between -5 and 5. We'll want our points a bit closer, so let's change 11 to something larger.", "_____no_output_____" ] ], [ [ "x_values = np.linspace(-5, 5 , 1000)\ny_values = x_values**2", "_____no_output_____" ] ], [ [ "To get the corresponding y values, we can just perform operations on the entire array of x values. Now, we can plot these using the `matplotlib` library.", "_____no_output_____" ] ], [ [ "plt.plot(x_values, y_values)", "_____no_output_____" ] ], [ [ "There's a ton of stuff you can do with `matplotlib.pyplot` or the `matplotlib` library as a whole, but here are a few basics to get you started.", "_____no_output_____" ] ], [ [ "plt.plot(x_values, x_values**3) #As before, this plots the (x,y) points and connects them with lines\n\nplt.show() #This forces matplotlib to display the current figure\n\nplt.figure() #This creates a new, empty figure\n\nplt.plot(x_values, np.exp(x_values), 'g--') #There are lots of optional arguments that do cool things\n\nplt.title(r'$e^x$') #Creates a title; you can use LaTeX formatting in matplotlib as shown here\n\nplt.xlabel('y values') #Label for x-axis\n\nplt.ylabel('exp(x)') #Label for y-axis\n\nplt.show()", "_____no_output_____" ] ], [ [ "## Exercise 3: Plotting Radioactivity Data\n\n[Adapted from Ayars, Problem 0-2]", "_____no_output_____" ], [ "The file Ba137.txt contains two columns. The first is counts from a Geiger counter, the second is time in seconds. If you opened this Workshop notebook using the Interact Link (from the bCourses page), then you should already have Ba137.txt in your datahub directory. \n\nIf not, it's available [here](https://raw.githubusercontent.com/celegante/code_chapter_0-_github/master/Ba137.txt). Open the link, right-click and save as a .txt file. Then upload to datahub.berkeley.edu or move it to whichever folder you're keeping this notebook.", "_____no_output_____" ], [ "1. Make a useful graph of this data, with axes labels and a title.\n2. If this data follows an exponential curve, then plotting the natural log of the data (or plotting the raw data on a logarithmic scale) will result in a straight line. Determine whether this is the case, and explain your conclusion with---you guessed it---an appropriate graph.", "_____no_output_____" ], [ "Be sure to add comments throughout your code so it's clear what each section of the code is doing! It may help to refer to the lecture notes or Ayars Chapter 0.\n\nTry using `'x'` or `'^'` as the marker type in your `plt.plot()` functions (instead of `'g-'`, for instance), to get a single x or triangle for each data point instead of a connected line. Google if you'd like to learn more options!\n\nOnce you're through, your code should produce two graphs, one with the data, another with the natural log of the data, both labelled appropriately. It should also print out a clear answer to the question in part 2 (e.g., `Yes, the data follows an exponential curve`, or `No, the data does not follow an exponential curve`).", "_____no_output_____" ] ], [ [ "### Exercise 3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n### Load the data here\ncounts, times = np.loadtxt('Ba137.txt', unpack = True)\n\nplt.figure() # Start a clean figure for your first plot\n\n### Your code for the first plot here!\n\nplt.figure() # Start a clean figure for your second plot\n\n### Your code for the second plot here!\n\nplt.show() # This tells python to display the plots you've made", "_____no_output_____" ] ], [ [ "#### Hints\n\nPut the file in the same directory as your python file, and use numpy's `loadtxt` or `genfromtxt` function to load each column into an array for use in your plots. \n\nIf your file isn't loading correctly, it might be because your IPython working directory isn't the same as the directory your script and Ba137.txt are in.\n\nIf you'd like to learn more about what `loadtxt` does (or why the `unpack = True` option is important), type `loadtxt?` or `help(loadtxt)` into the python interpreter for documentation. Press `q` to get out of the documentation.", "_____no_output_____" ], [ "## Practice: Debugging\n\n[Adapted from Langtangen, Exercise 1.16] Working with a partner, type these statements into your python interpreter. Figure out why some statements fail and correct the errors.\n\n*Hint: Try testing the left- and right-hand sides seperately before you put them together in statements. It's ok if you don't understand yet why some of the expressions evaluate to the results they do (like the last one).*\n\n 1a = 2\n a1 = b\n x = 2\n y = X + 4 # is it 6?\n 5 = 5 # is it True?\n 4/5 == 4.0/5.0 # is it True? (this depends on which version of Python you're using)\n type(10/2) == type(10/2.) # is it True? (again, this depends on the Python version)\n from Math import factorial\n print factorial(pi) \n discount = 12%", "_____no_output_____" ], [ "## You're done!\n\nCongratulations, you've finished this week's workshop! You're welcome to leave early or get started on this week's homework.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb9637f10cc37596fd2b9ffe217f88249501802b
885,159
ipynb
Jupyter Notebook
src/2UnzippingHtm/unzipping_htm.ipynb
Densuke-fitness/MDandAAnalysisFlow
520af7747bd0d07008eb83f56acad586f15e7fa7
[ "MIT" ]
null
null
null
src/2UnzippingHtm/unzipping_htm.ipynb
Densuke-fitness/MDandAAnalysisFlow
520af7747bd0d07008eb83f56acad586f15e7fa7
[ "MIT" ]
null
null
null
src/2UnzippingHtm/unzipping_htm.ipynb
Densuke-fitness/MDandAAnalysisFlow
520af7747bd0d07008eb83f56acad586f15e7fa7
[ "MIT" ]
null
null
null
67.932387
129
0.705812
[ [ [ "# 4.3.2 有価証券報告書内のMD&Aが記載されているhtmファイルを抽出 \n\n Zipファイルで送られてきた大量の有価証券書のデータをPythonにより自動的に解凍した。その後、有価証券報告書内のMD&A情報のみが記載されたhtmファイルのみを抽出をした。", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport glob\nimport os\nimport zipfile\nfrom datetime import datetime as dt\nimport bs4 \nimport re", "_____no_output_____" ], [ "def make_industry_zip_list_hash(data_frame, data_frame_name):\n \n industry_zip_list_hash = {}\n industry_li = make_type_of_industry_list(data_frame)\n zip_files = call_zip_files(data_frame_name)\n for industry in industry_li :\n industry_zip_list_hash[industry]= list(filter(lambda x: industry in x , zip_files))\n \n return industry_zip_list_hash\n \n\ndef make_type_of_industry_list(data_frame : pd.DataFrame, industry_col=\"[業種(東証)]\"):\n return list(set(data_frame[industry_col]))\n\n\ndef call_zip_files(data_frame_name):\n zip_files = glob.glob(f\"../**/SampleData/{data_frame_name}/**/**.zip\", recursive=True)\n return zip_files\n\ndef call_unziped_htm_files_dir(data_frame_name: str):\n\n unziped_htm_files_dir = os.getcwd()+ \"/UnzipedHtmFiles\"\n if not os.path.exists(unziped_htm_files_dir) :\n os.mkdir(unziped_htm_files_dir)\n\n unziped_htm_files_dir_with_df_name = unziped_htm_files_dir + f\"/{data_frame_name}\"\n if not os.path.exists(unziped_htm_files_dir_with_df_name) :\n os.mkdir(unziped_htm_files_dir_with_df_name)\n \n return unziped_htm_files_dir_with_df_name\n\n\n#--------------------------------------------------------------------\ndef unzip_html_to_unziped_htm_files_dir(industry_zip_list_hash, filepath_unziped):\n \n sum_files_len = sum(map(len, industry_zip_list_hash.values()))\n zip_files_len = 1\n \n for industy_name, zip_files in industry_zip_list_hash.items() :\n zip_files_len += len(zip_files)\n \n industry_dir = call_industry_dir(filepath_unziped, industy_name)\n \n for idx ,zip_file in enumerate(zip_files):\n try:\n with zipfile.ZipFile(zip_file) as existing_zip:\n\n candidate_files = existing_zip.namelist()\n\n for c_f in candidate_files:\n basename = os.path.basename(c_f)\n #第2部のもの(MD&Aが記載されている見出しの部分)を判定\n if re.match(r'01020.*htm', basename) != None :\n print(c_f)\n date_str = c_f[-20:-10]\n date_dt = dt.strptime(date_str, '%Y-%m-%d')\n\n existing_zip.extract(c_f, industry_dir)\n\n print(f\"{idx + 1} / {len(zip_files)} || {zip_files_len - 1}/{sum_files_len}\")\n except Exception :\n print(zip_file)\n with open(\"unzipping_html_error.txt\", \"a\") as f:\n f.write(zip_file + \"\\n\")\n \n \ndef call_industry_dir(filepath_unziped, industry_name: str):\n industry_dir = filepath_unziped+ f\"/{industry_name}\"\n if not os.path.exists(industry_dir) :\n os.mkdir(industry_dir)\n return industry_dir", "_____no_output_____" ], [ "f = call_zip_files(\"renketsu\") + call_zip_files(\"hirenketsu\")\nlen_f = len(f)\nprint(len_f, len_f/3)", "6171 2057.0\n" ], [ "#renkestsuとhirenketsuを設定\ndata_frame_name =\"renketsu\" \ndata_frame = pd.read_csv(\"/home/jovyan/1CalliingEdinetApi\"+f\"/EdinetIdxFiles/edinet_{data_frame_name}.csv\", skiprows=4)", "_____no_output_____" ], [ "industry_zip_list_hash = make_industry_zip_list_hash(data_frame, data_frame_name)\nfilepath_unziped = call_unziped_htm_files_dir(data_frame_name)\nunzip_html_to_unziped_htm_files_dir(industry_zip_list_hash, filepath_unziped)", "XBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E27633-000_2018-03-31_01_2018-06-27_ixbrl.htm\n1 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03867-000_2018-03-31_01_2018-06-22_ixbrl.htm\n2 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E00165-000_2018-03-31_01_2018-06-22_ixbrl.htm\n3 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E01202-000_2018-03-31_01_2018-06-20_ixbrl.htm\n4 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04047-000_2018-03-31_01_2018-06-25_ixbrl.htm\n5 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03953-000_2018-03-31_01_2018-06-21_ixbrl.htm\n6 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E27759-000_2018-03-31_01_2018-06-27_ixbrl.htm\n7 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03861-000_2018-03-31_01_2018-06-28_ixbrl.htm\n8 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E25621-000_2018-03-31_01_2018-06-22_ixbrl.htm\n9 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04233-000_2018-03-31_01_2018-06-21_ixbrl.htm\n10 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03938-000_2018-03-31_01_2018-06-22_ixbrl.htm\n11 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E05651-000_2018-03-31_01_2018-06-22_ixbrl.htm\n12 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E27281-000_2018-03-31_01_2018-06-25_ixbrl.htm\n13 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04032-000_2018-03-31_01_2018-06-22_ixbrl.htm\n14 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03945-000_2018-03-31_01_2018-06-22_ixbrl.htm\n15 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04031-000_2018-03-31_01_2018-06-25_ixbrl.htm\n16 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E31970-000_2018-03-31_01_2018-06-26_ixbrl.htm\n17 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E05179-000_2018-03-31_01_2018-06-25_ixbrl.htm\n18 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03991-000_2018-03-31_01_2018-06-26_ixbrl.htm\n19 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04060-000_2018-03-31_01_2018-06-26_ixbrl.htm\n20 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E00926-000_2018-03-31_01_2018-06-28_ixbrl.htm\n21 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04065-000_2018-03-31_01_2018-06-28_ixbrl.htm\n22 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E05325-000_2018-03-31_01_2018-06-29_ixbrl.htm\n23 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E33622-000_2018-03-31_01_2018-06-26_ixbrl.htm\n24 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03977-000_2018-03-31_01_2018-06-27_ixbrl.htm\n25 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E31979-000_2018-03-31_01_2018-06-27_ixbrl.htm\n26 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03907-000_2018-03-31_01_2018-06-29_ixbrl.htm\n27 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04034-000_2018-03-31_01_2018-06-27_ixbrl.htm\n28 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03863-000_2018-03-31_01_2018-06-27_ixbrl.htm\n29 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04020-000_2018-03-31_01_2018-06-28_ixbrl.htm\n30 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03858-000_2018-03-31_01_2018-06-27_ixbrl.htm\n31 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03856-000_2018-03-31_01_2018-06-28_ixbrl.htm\n32 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04044-000_2018-03-31_01_2018-06-28_ixbrl.htm\n33 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03997-000_2018-03-31_01_2018-06-27_ixbrl.htm\n34 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04049-000_2018-03-31_01_2018-06-28_ixbrl.htm\n35 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03984-000_2018-03-31_01_2018-06-28_ixbrl.htm\n36 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03855-000_2018-03-31_01_2018-06-28_ixbrl.htm\n37 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03943-000_2018-03-31_01_2018-06-28_ixbrl.htm\n38 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03971-000_2018-03-31_01_2018-06-28_ixbrl.htm\n39 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03801-000_2018-03-31_01_2018-06-28_ixbrl.htm\n40 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E00568-000_2018-03-31_01_2018-06-28_ixbrl.htm\n41 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E25686-000_2018-03-31_01_2018-06-28_ixbrl.htm\n42 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03958-000_2018-03-31_01_2018-06-28_ixbrl.htm\n43 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03967-000_2018-03-31_01_2018-06-28_ixbrl.htm\n44 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E30066-000_2018-03-31_01_2018-06-28_ixbrl.htm\n45 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E02740-000_2018-03-31_01_2018-06-29_ixbrl.htm\n46 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04047-000_2019-03-31_01_2019-06-24_ixbrl.htm\n47 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04233-000_2019-03-31_01_2019-06-14_ixbrl.htm\n48 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E01202-000_2019-03-31_01_2019-06-19_ixbrl.htm\n49 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03953-000_2019-03-31_01_2019-06-20_ixbrl.htm\n50 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E00165-000_2019-03-31_01_2019-06-21_ixbrl.htm\n51 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03867-000_2019-03-31_01_2019-06-21_ixbrl.htm\n52 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E05325-000_2019-03-31_01_2019-06-28_ixbrl.htm\n53 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E25621-000_2019-03-31_01_2019-06-21_ixbrl.htm\n54 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E05651-000_2019-03-31_01_2019-06-21_ixbrl.htm\n55 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03861-000_2019-03-31_01_2019-06-27_ixbrl.htm\n56 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03938-000_2019-03-31_01_2019-06-21_ixbrl.htm\n57 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04032-000_2019-03-31_01_2019-06-21_ixbrl.htm\n58 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E33622-000_2019-03-31_01_2019-06-25_ixbrl.htm\n59 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E02740-000_2019-03-31_01_2019-06-26_ixbrl.htm\n60 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E25686-000_2019-03-31_01_2019-06-24_ixbrl.htm\n61 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E05179-000_2019-03-31_01_2019-06-24_ixbrl.htm\n62 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E30066-000_2019-03-31_01_2019-06-28_ixbrl.htm\n63 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04031-000_2019-03-31_01_2019-06-24_ixbrl.htm\n64 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E31979-000_2019-03-31_01_2019-06-25_ixbrl.htm\n65 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04060-000_2019-03-31_01_2019-06-25_ixbrl.htm\n66 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03991-000_2019-03-31_01_2019-06-25_ixbrl.htm\n67 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03863-000_2019-03-31_01_2019-06-26_ixbrl.htm\n68 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04065-000_2019-03-31_01_2019-06-27_ixbrl.htm\n69 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E04020-000_2019-03-31_01_2019-06-27_ixbrl.htm\n70 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E27633-000_2019-03-31_01_2019-06-26_ixbrl.htm\n71 / 138 || 138/6171\nXBRL/PublicDoc/0102010_honbun_jpcrp030000-asr-001_E03977-000_2019-03-31_01_2019-06-26_ixbrl.htm\n72 / 138 || 138/6171\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb963949033bbb04b3e69521e05219d1d2681b61
2,517
ipynb
Jupyter Notebook
tomef/tools/tools.py.ipynb
unlikelymaths/tomef
57b629a3ee932486c55afcf62ef9d8224488ae65
[ "MIT" ]
null
null
null
tomef/tools/tools.py.ipynb
unlikelymaths/tomef
57b629a3ee932486c55afcf62ef9d8224488ae65
[ "MIT" ]
14
2020-01-28T22:36:41.000Z
2022-03-11T23:44:22.000Z
tomef/tools/tools.py.ipynb
unlikelymaths/tomef
57b629a3ee932486c55afcf62ef9d8224488ae65
[ "MIT" ]
null
null
null
29.611765
161
0.508542
[ [ [ "# Tools\n<div style=\"position: absolute; right:0;top:0\"><a href=\"../evaluation.py.ipynb\" style=\"text-decoration: none\"> <font size=\"5\">↑</font></a></div>\n\nVarious Scripts. Command line use:\n\n`python evaluation.py --script scriptname`\n\n## Overview\n`scriptname` in brackets\n- [Export Results](./export_results.ipynb) (export)\n- [Clear Data](./clear_data.ipynb) (cleardata)", "_____no_output_____" ] ], [ [ "import argparse\nfrom os.path import join, dirname\n\ndef run_tools_():\n parser = argparse.ArgumentParser(description=\"Topic Modeling Evaluation Framework\")\n parser.add_argument('-s','--script',\n action='store',\n choices=['letter', 'wiki', 'tweetsodp', 'export', 'cleardata'],\n help='Runs a script from the tools folder')\n parser.add_argument('-p','--printconfig',\n action='store_true',\n help='Prints the configuration')\n args = parser.parse_args()\n \n if args.script:\n if args.script == \"letter\":\n pass\n elif args.script == \"wiki\":\n pass\n elif args.script == \"tweetsodp\":\n pass\n elif args.script == \"export\":\n from tools.export_results import main as export_results_main\n export_results_main(join(dirname(__file__),'../'))\n elif args.script == 'cleardata':\n from tools.clear_data import main as clear_data_main\n clear_data_main()\n exit()\n \ndef run_tools():\n try:\n get_ipython\n except:\n run_tools_()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code" ] ]
cb963c2af01501866aafb04696f76f6e55e02288
30,030
ipynb
Jupyter Notebook
2_Training.ipynb
jenchen/image-captioning
3af340d7eb4ef4c6d2cb0a58af0e0c28486fb2d6
[ "MIT" ]
null
null
null
2_Training.ipynb
jenchen/image-captioning
3af340d7eb4ef4c6d2cb0a58af0e0c28486fb2d6
[ "MIT" ]
null
null
null
2_Training.ipynb
jenchen/image-captioning
3af340d7eb4ef4c6d2cb0a58af0e0c28486fb2d6
[ "MIT" ]
null
null
null
59.701789
734
0.62674
[ [ [ "# Computer Vision Nanodegree\n\n## Project: Image Captioning\n\n---\n\nIn this notebook, you will train your CNN-RNN model. \n\nYou are welcome and encouraged to try out many different architectures and hyperparameters when searching for a good model.\n\nThis does have the potential to make the project quite messy! Before submitting your project, make sure that you clean up:\n- the code you write in this notebook. The notebook should describe how to train a single CNN-RNN architecture, corresponding to your final choice of hyperparameters. You should structure the notebook so that the reviewer can replicate your results by running the code in this notebook. \n- the output of the code cell in **Step 2**. The output should show the output obtained when training the model from scratch.\n\nThis notebook **will be graded**. \n\nFeel free to use the links below to navigate the notebook:\n- [Step 1](#step1): Training Setup\n- [Step 2](#step2): Train your Model\n- [Step 3](#step3): (Optional) Validate your Model", "_____no_output_____" ], [ "<a id='step1'></a>\n## Step 1: Training Setup\n\nIn this step of the notebook, you will customize the training of your CNN-RNN model by specifying hyperparameters and setting other options that are important to the training procedure. The values you set now will be used when training your model in **Step 2** below.\n\nYou should only amend blocks of code that are preceded by a `TODO` statement. **Any code blocks that are not preceded by a `TODO` statement should not be modified**.\n\n### Task #1\n\nBegin by setting the following variables:\n- `batch_size` - the batch size of each training batch. It is the number of image-caption pairs used to amend the model weights in each training step. \n- `vocab_threshold` - the minimum word count threshold. Note that a larger threshold will result in a smaller vocabulary, whereas a smaller threshold will include rarer words and result in a larger vocabulary. \n- `vocab_from_file` - a Boolean that decides whether to load the vocabulary from file. \n- `embed_size` - the dimensionality of the image and word embeddings. \n- `hidden_size` - the number of features in the hidden state of the RNN decoder. \n- `num_epochs` - the number of epochs to train the model. We recommend that you set `num_epochs=3`, but feel free to increase or decrease this number as you wish. [This paper](https://arxiv.org/pdf/1502.03044.pdf) trained a captioning model on a single state-of-the-art GPU for 3 days, but you'll soon see that you can get reasonable results in a matter of a few hours! (_But of course, if you want your model to compete with current research, you will have to train for much longer._)\n- `save_every` - determines how often to save the model weights. We recommend that you set `save_every=1`, to save the model weights after each epoch. This way, after the `i`th epoch, the encoder and decoder weights will be saved in the `models/` folder as `encoder-i.pkl` and `decoder-i.pkl`, respectively.\n- `print_every` - determines how often to print the batch loss to the Jupyter notebook while training. Note that you **will not** observe a monotonic decrease in the loss function while training - this is perfectly fine and completely expected! You are encouraged to keep this at its default value of `100` to avoid clogging the notebook, but feel free to change it.\n- `log_file` - the name of the text file containing - for every step - how the loss and perplexity evolved during training.\n\nIf you're not sure where to begin to set some of the values above, you can peruse [this paper](https://arxiv.org/pdf/1502.03044.pdf) and [this paper](https://arxiv.org/pdf/1411.4555.pdf) for useful guidance! **To avoid spending too long on this notebook**, you are encouraged to consult these suggested research papers to obtain a strong initial guess for which hyperparameters are likely to work best. Then, train a single model, and proceed to the next notebook (**3_Inference.ipynb**). If you are unhappy with your performance, you can return to this notebook to tweak the hyperparameters (and/or the architecture in **model.py**) and re-train your model.\n\n### Question 1\n\n**Question:** Describe your CNN-RNN architecture in detail. With this architecture in mind, how did you select the values of the variables in Task 1? If you consulted a research paper detailing a successful implementation of an image captioning model, please provide the reference.\n\n**Answer:** I referenced the two papers suggested above to come up with an initial design of my CNN-RNN architecture. The CNN architecture was provided in the initial project code and is a pre-trained ResNet-50 model. My RNN architecture is based on the second paper, \"Show and Tell: A Neural Image Caption Generator\". Thus, I chose `vocab_threshold` of 5, `embed_size` of 512, and `hidden_size` of 512. I think 512 is a good choice because a large word embedding increases the chance of learning useful information. Additionally, I selected a `batch_size` of 128, since it is a power of 2 (taking advantage of vector optimizations) and batch sizes of 128 and 256 are commonly used.\n\n\n### (Optional) Task #2\n\nNote that we have provided a recommended image transform `transform_train` for pre-processing the training images, but you are welcome (and encouraged!) to modify it as you wish. When modifying this transform, keep in mind that:\n- the images in the dataset have varying heights and widths, and \n- if using a pre-trained model, you must perform the corresponding appropriate normalization.\n\n### Question 2\n\n**Question:** How did you select the transform in `transform_train`? If you left the transform at its provided value, why do you think that it is a good choice for your CNN architecture?\n\n**Answer:** I left `transform_train` at its provided value. Since I used the CNN architecture as provided, I kept the transform function unchanged. By applying random cropping, the image transform extends the amount of data for training and makes the neural net more robust. Additionally, horizontal flipping makes sense because images are more likely to be mirrored across the vertical axis. A dog facing left and a dog facing right should be interpreted as dogs in a similar position. Normalization is also an important step. The data augmentation introduced by the image transformation function makes it a good choice for the CNN architecture.\n\n\n### Task #3\n\nNext, you will specify a Python list containing the learnable parameters of the model. For instance, if you decide to make all weights in the decoder trainable, but only want to train the weights in the embedding layer of the encoder, then you should set `params` to something like:\n```\nparams = list(decoder.parameters()) + list(encoder.embed.parameters()) \n```\n\n### Question 3\n\n**Question:** How did you select the trainable parameters of your architecture? Why do you think this is a good choice?\n\n**Answer:** I selected the trainable parameters of my architecture based on the recommended values. All the weights in the decoder and only the weights in the embedding layer of the encoder are trained, while the other parameters of the encoder won't be trained since we're using a pre-trained model.\n\n### Task #4\n\nFinally, you will select an [optimizer](http://pytorch.org/docs/master/optim.html#torch.optim.Optimizer).\n\n### Question 4\n\n**Question:** How did you select the optimizer used to train your model?\n\n**Answer:** I initially used I used SGD since the paper recommends it. After experimentation, I decided to go with the Adam optimizer to train my final model. SGD was very slow and Adam was faster and produced significantly better perplexity scores (with perplexity <30). Models that are better at predicting a sample have low perplexity.", "_____no_output_____" ] ], [ [ "import nltk\nnltk.download('punkt')", "[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\n" ], [ "% load_ext autoreload\n\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms\nimport sys\nsys.path.append('/opt/cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\nfrom data_loader import get_loader\nfrom model import EncoderCNN, DecoderRNN\nimport math\n\n\n## TODO #1: Select appropriate values for the Python variables below.\nbatch_size = 128 # batch size\nvocab_threshold = 5 # minimum word count threshold\nvocab_from_file = True # if True, load existing vocab file\nembed_size = 512 # dimensionality of image and word embeddings\nhidden_size = 512 # number of features in hidden state of the RNN decoder\nnum_epochs = 3 # number of training epochs\nsave_every = 1 # determines frequency of saving model weights\nprint_every = 100 # determines window for printing average loss\nlog_file = 'training_log.txt' # name of file with saved training loss and perplexity\n\n# (Optional) TODO #2: Amend the image transform below.\ntransform_train = transforms.Compose([ \n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n\n# Build data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=vocab_from_file)\n\n# The size of the vocabulary.\nvocab_size = len(data_loader.dataset.vocab)\n\n# Initialize the encoder and decoder. \nencoder = EncoderCNN(embed_size)\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\n# Move models to GPU if CUDA is available. \ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nencoder.to(device)\ndecoder.to(device)\n\n# Define the loss function. \ncriterion = nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()\n\n# TODO #3: Specify the learnable parameters of the model.\nparams = list(decoder.parameters()) + list(encoder.embed.parameters()) \n\n# TODO #4: Define the optimizer.\noptimizer = torch.optim.Adam(params)\n\n# Set the total number of training steps per epoch.\ntotal_step = math.ceil(len(data_loader.dataset.caption_lengths) / data_loader.batch_sampler.batch_size)", "Vocabulary successfully loaded from vocab.pkl file!\nloading annotations into memory...\nDone (t=1.11s)\ncreating index...\n" ] ], [ [ "<a id='step2'></a>\n## Step 2: Train your Model\n\nOnce you have executed the code cell in **Step 1**, the training procedure below should run without issue. \n\nIt is completely fine to leave the code cell below as-is without modifications to train your model. However, if you would like to modify the code used to train the model below, you must ensure that your changes are easily parsed by your reviewer. In other words, make sure to provide appropriate comments to describe how your code works! \n\nYou may find it useful to load saved weights to resume training. In that case, note the names of the files containing the encoder and decoder weights that you'd like to load (`encoder_file` and `decoder_file`). Then you can load the weights by using the lines below:\n\n```python\n# Load pre-trained weights before resuming training.\nencoder.load_state_dict(torch.load(os.path.join('./models', encoder_file)))\ndecoder.load_state_dict(torch.load(os.path.join('./models', decoder_file)))\n```\n\nWhile trying out parameters, make sure to take extensive notes and record the settings that you used in your various training runs. In particular, you don't want to encounter a situation where you've trained a model for several hours but can't remember what settings you used :).\n\n### A Note on Tuning Hyperparameters\n\nTo figure out how well your model is doing, you can look at how the training loss and perplexity evolve during training - and for the purposes of this project, you are encouraged to amend the hyperparameters based on this information. \n\nHowever, this will not tell you if your model is overfitting to the training data, and, unfortunately, overfitting is a problem that is commonly encountered when training image captioning models. \n\nFor this project, you need not worry about overfitting. **This project does not have strict requirements regarding the performance of your model**, and you just need to demonstrate that your model has learned **_something_** when you generate captions on the test data. For now, we strongly encourage you to train your model for the suggested 3 epochs without worrying about performance; then, you should immediately transition to the next notebook in the sequence (**3_Inference.ipynb**) to see how your model performs on the test data. If your model needs to be changed, you can come back to this notebook, amend hyperparameters (if necessary), and re-train the model.\n\nThat said, if you would like to go above and beyond in this project, you can read about some approaches to minimizing overfitting in section 4.3.1 of [this paper](http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=7505636). In the next (optional) step of this notebook, we provide some guidance for assessing the performance on the validation dataset.", "_____no_output_____" ] ], [ [ "import torch.utils.data as data\nimport numpy as np\nimport os\nimport requests\nimport time\n\n# Open the training log file.\nf = open(log_file, 'w')\n\nold_time = time.time()\nresponse = requests.request(\"GET\", \n \"http://metadata.google.internal/computeMetadata/v1/instance/attributes/keep_alive_token\", \n headers={\"Metadata-Flavor\":\"Google\"})\n\nfor epoch in range(1, num_epochs+1):\n \n for i_step in range(1, total_step+1):\n \n if time.time() - old_time > 60:\n old_time = time.time()\n requests.request(\"POST\", \n \"https://nebula.udacity.com/api/v1/remote/keep-alive\", \n headers={'Authorization': \"STAR \" + response.text})\n \n # Randomly sample a caption length, and sample indices with that length.\n indices = data_loader.dataset.get_train_indices()\n # Create and assign a batch sampler to retrieve a batch with the sampled indices.\n new_sampler = data.sampler.SubsetRandomSampler(indices=indices)\n data_loader.batch_sampler.sampler = new_sampler\n \n # Obtain the batch.\n images, captions = next(iter(data_loader))\n\n # Move batch of images and captions to GPU if CUDA is available.\n images = images.to(device)\n captions = captions.to(device)\n \n # Zero the gradients.\n decoder.zero_grad()\n encoder.zero_grad()\n \n # Pass the inputs through the CNN-RNN model.\n features = encoder(images)\n outputs = decoder(features, captions)\n \n # Calculate the batch loss.\n loss = criterion(outputs.view(-1, vocab_size), captions.view(-1))\n \n # Backward pass.\n loss.backward()\n \n # Update the parameters in the optimizer.\n optimizer.step()\n \n # Get training statistics.\n stats = 'Epoch [%d/%d], Step [%d/%d], Loss: %.4f, Perplexity: %5.4f' % (epoch, num_epochs, i_step, total_step, loss.item(), np.exp(loss.item()))\n \n # Print training statistics (on same line).\n print('\\r' + stats, end=\"\")\n sys.stdout.flush()\n \n # Print training statistics to file.\n f.write(stats + '\\n')\n f.flush()\n \n # Print training statistics (on different line).\n if i_step % print_every == 0:\n print('\\r' + stats)\n \n # Save the weights.\n if epoch % save_every == 0:\n torch.save(decoder.state_dict(), os.path.join('./models', 'decoder-%d.pkl' % epoch))\n torch.save(encoder.state_dict(), os.path.join('./models', 'encoder-%d.pkl' % epoch))\n\n# Close the training log file.\nf.close()", "Epoch [1/3], Step [100/3236], Loss: 3.6239, Perplexity: 37.48471\nEpoch [1/3], Step [200/3236], Loss: 3.1404, Perplexity: 23.11358\nEpoch [1/3], Step [300/3236], Loss: 3.0967, Perplexity: 22.1238\nEpoch [1/3], Step [400/3236], Loss: 3.0676, Perplexity: 21.49042\nEpoch [1/3], Step [500/3236], Loss: 2.9404, Perplexity: 18.9239\nEpoch [1/3], Step [600/3236], Loss: 2.8282, Perplexity: 16.9155\nEpoch [1/3], Step [700/3236], Loss: 2.7990, Perplexity: 16.4287\nEpoch [1/3], Step [800/3236], Loss: 2.6820, Perplexity: 14.6139\nEpoch [1/3], Step [900/3236], Loss: 2.4851, Perplexity: 12.0017\nEpoch [1/3], Step [1000/3236], Loss: 2.6709, Perplexity: 14.4536\nEpoch [1/3], Step [1100/3236], Loss: 2.3593, Perplexity: 10.5840\nEpoch [1/3], Step [1200/3236], Loss: 2.3194, Perplexity: 10.1696\nEpoch [1/3], Step [1300/3236], Loss: 2.3720, Perplexity: 10.7186\nEpoch [1/3], Step [1400/3236], Loss: 2.4119, Perplexity: 11.15474\nEpoch [1/3], Step [1500/3236], Loss: 2.3795, Perplexity: 10.7997\nEpoch [1/3], Step [1600/3236], Loss: 2.3247, Perplexity: 10.2233\nEpoch [1/3], Step [1700/3236], Loss: 2.5268, Perplexity: 12.5134\nEpoch [1/3], Step [1800/3236], Loss: 2.1708, Perplexity: 8.76544\nEpoch [1/3], Step [1900/3236], Loss: 2.2815, Perplexity: 9.79152\nEpoch [1/3], Step [2000/3236], Loss: 2.1799, Perplexity: 8.84551\nEpoch [1/3], Step [2100/3236], Loss: 2.2065, Perplexity: 9.08429\nEpoch [1/3], Step [2200/3236], Loss: 2.2165, Perplexity: 9.17510\nEpoch [1/3], Step [2300/3236], Loss: 2.1406, Perplexity: 8.50452\nEpoch [1/3], Step [2400/3236], Loss: 2.4853, Perplexity: 12.0042\nEpoch [1/3], Step [2500/3236], Loss: 2.1120, Perplexity: 8.26453\nEpoch [1/3], Step [2600/3236], Loss: 2.1271, Perplexity: 8.39043\nEpoch [1/3], Step [2700/3236], Loss: 2.1332, Perplexity: 8.44184\nEpoch [1/3], Step [2800/3236], Loss: 2.0957, Perplexity: 8.13101\nEpoch [1/3], Step [2900/3236], Loss: 2.2598, Perplexity: 9.58117\nEpoch [1/3], Step [3000/3236], Loss: 2.1951, Perplexity: 8.98091\nEpoch [1/3], Step [3100/3236], Loss: 2.0428, Perplexity: 7.71224\nEpoch [1/3], Step [3200/3236], Loss: 2.2843, Perplexity: 9.81868\nEpoch [2/3], Step [100/3236], Loss: 2.1602, Perplexity: 8.672755\nEpoch [2/3], Step [200/3236], Loss: 2.1486, Perplexity: 8.57271\nEpoch [2/3], Step [300/3236], Loss: 2.4173, Perplexity: 11.2155\nEpoch [2/3], Step [400/3236], Loss: 2.3438, Perplexity: 10.4204\nEpoch [2/3], Step [500/3236], Loss: 2.0606, Perplexity: 7.85104\nEpoch [2/3], Step [600/3236], Loss: 2.1495, Perplexity: 8.58036\nEpoch [2/3], Step [700/3236], Loss: 2.1013, Perplexity: 8.17695\nEpoch [2/3], Step [800/3236], Loss: 2.1093, Perplexity: 8.24217\nEpoch [2/3], Step [900/3236], Loss: 2.0459, Perplexity: 7.73593\nEpoch [2/3], Step [1000/3236], Loss: 2.0698, Perplexity: 7.9231\nEpoch [2/3], Step [1100/3236], Loss: 2.1618, Perplexity: 8.68655\nEpoch [2/3], Step [1200/3236], Loss: 2.3400, Perplexity: 10.3816\nEpoch [2/3], Step [1300/3236], Loss: 2.0491, Perplexity: 7.76075\nEpoch [2/3], Step [1400/3236], Loss: 2.0541, Perplexity: 7.79959\nEpoch [2/3], Step [1500/3236], Loss: 2.0187, Perplexity: 7.52873\nEpoch [2/3], Step [1600/3236], Loss: 2.1680, Perplexity: 8.74058\nEpoch [2/3], Step [1700/3236], Loss: 1.9661, Perplexity: 7.14275\nEpoch [2/3], Step [1800/3236], Loss: 1.9652, Perplexity: 7.13656\nEpoch [2/3], Step [1900/3236], Loss: 2.1052, Perplexity: 8.20876\nEpoch [2/3], Step [2000/3236], Loss: 1.9908, Perplexity: 7.32115\nEpoch [2/3], Step [2100/3236], Loss: 2.1415, Perplexity: 8.51187\nEpoch [2/3], Step [2200/3236], Loss: 2.7824, Perplexity: 16.1574\nEpoch [2/3], Step [2300/3236], Loss: 2.1612, Perplexity: 8.68132\nEpoch [2/3], Step [2400/3236], Loss: 2.0250, Perplexity: 7.57602\nEpoch [2/3], Step [2500/3236], Loss: 2.8415, Perplexity: 17.1420\nEpoch [2/3], Step [2600/3236], Loss: 2.0138, Perplexity: 7.49196\nEpoch [2/3], Step [2700/3236], Loss: 2.1041, Perplexity: 8.19960\nEpoch [2/3], Step [2800/3236], Loss: 2.0494, Perplexity: 7.76293\nEpoch [2/3], Step [2900/3236], Loss: 1.9698, Perplexity: 7.16928\nEpoch [2/3], Step [3000/3236], Loss: 2.1085, Perplexity: 8.23572\nEpoch [2/3], Step [3100/3236], Loss: 2.0151, Perplexity: 7.50161\nEpoch [2/3], Step [3200/3236], Loss: 1.8978, Perplexity: 6.67105\nEpoch [3/3], Step [100/3236], Loss: 1.9430, Perplexity: 6.979408\nEpoch [3/3], Step [200/3236], Loss: 2.1278, Perplexity: 8.39668\nEpoch [3/3], Step [300/3236], Loss: 1.9606, Perplexity: 7.10383\nEpoch [3/3], Step [400/3236], Loss: 1.8707, Perplexity: 6.49252\nEpoch [3/3], Step [500/3236], Loss: 1.9794, Perplexity: 7.23856\nEpoch [3/3], Step [600/3236], Loss: 2.0009, Perplexity: 7.39608\nEpoch [3/3], Step [700/3236], Loss: 1.8993, Perplexity: 6.68102\nEpoch [3/3], Step [800/3236], Loss: 1.9123, Perplexity: 6.76854\nEpoch [3/3], Step [900/3236], Loss: 2.7445, Perplexity: 15.5567\nEpoch [3/3], Step [1000/3236], Loss: 1.8934, Perplexity: 6.6417\nEpoch [3/3], Step [1100/3236], Loss: 2.1756, Perplexity: 8.80789\nEpoch [3/3], Step [1200/3236], Loss: 1.9674, Perplexity: 7.15219\nEpoch [3/3], Step [1300/3236], Loss: 1.8194, Perplexity: 6.16826\nEpoch [3/3], Step [1400/3236], Loss: 2.2362, Perplexity: 9.35803\nEpoch [3/3], Step [1500/3236], Loss: 1.8438, Perplexity: 6.32029\nEpoch [3/3], Step [1600/3236], Loss: 2.0412, Perplexity: 7.70011\nEpoch [3/3], Step [1700/3236], Loss: 1.8665, Perplexity: 6.46590\nEpoch [3/3], Step [1800/3236], Loss: 1.9141, Perplexity: 6.78106\nEpoch [3/3], Step [1900/3236], Loss: 1.9906, Perplexity: 7.31972\nEpoch [3/3], Step [2000/3236], Loss: 1.8777, Perplexity: 6.53881\nEpoch [3/3], Step [2100/3236], Loss: 1.9040, Perplexity: 6.71282\nEpoch [3/3], Step [2200/3236], Loss: 1.9244, Perplexity: 6.85118\nEpoch [3/3], Step [2300/3236], Loss: 1.8678, Perplexity: 6.47370\nEpoch [3/3], Step [2400/3236], Loss: 2.1070, Perplexity: 8.22378\nEpoch [3/3], Step [2500/3236], Loss: 1.8958, Perplexity: 6.65829\nEpoch [3/3], Step [2600/3236], Loss: 1.7855, Perplexity: 5.96253\nEpoch [3/3], Step [2700/3236], Loss: 1.9551, Perplexity: 7.06489\nEpoch [3/3], Step [2800/3236], Loss: 2.0558, Perplexity: 7.81299\nEpoch [3/3], Step [2900/3236], Loss: 2.1580, Perplexity: 8.65373\nEpoch [3/3], Step [3000/3236], Loss: 1.9254, Perplexity: 6.85805\nEpoch [3/3], Step [3100/3236], Loss: 1.8341, Perplexity: 6.25961\nEpoch [3/3], Step [3200/3236], Loss: 2.0032, Perplexity: 7.41304\nEpoch [3/3], Step [3236/3236], Loss: 1.9834, Perplexity: 7.26748" ] ], [ [ "<a id='step3'></a>\n## Step 3: (Optional) Validate your Model\n\nTo assess potential overfitting, one approach is to assess performance on a validation set. If you decide to do this **optional** task, you are required to first complete all of the steps in the next notebook in the sequence (**3_Inference.ipynb**); as part of that notebook, you will write and test code (specifically, the `sample` method in the `DecoderRNN` class) that uses your RNN decoder to generate captions. That code will prove incredibly useful here. \n\nIf you decide to validate your model, please do not edit the data loader in **data_loader.py**. Instead, create a new file named **data_loader_val.py** containing the code for obtaining the data loader for the validation data. You can access:\n- the validation images at filepath `'/opt/cocoapi/images/train2014/'`, and\n- the validation image caption annotation file at filepath `'/opt/cocoapi/annotations/captions_val2014.json'`.\n\nThe suggested approach to validating your model involves creating a json file such as [this one](https://github.com/cocodataset/cocoapi/blob/master/results/captions_val2014_fakecap_results.json) containing your model's predicted captions for the validation images. Then, you can write your own script or use one that you [find online](https://github.com/tylin/coco-caption) to calculate the BLEU score of your model. You can read more about the BLEU score, along with other evaluation metrics (such as TEOR and Cider) in section 4.1 of [this paper](https://arxiv.org/pdf/1411.4555.pdf). For more information about how to use the annotation file, check out the [website](http://cocodataset.org/#download) for the COCO dataset.", "_____no_output_____" ] ], [ [ "# (Optional) TODO: Validate your model.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
cb9673455e52a399b9474b7117e1bbed03e4a01c
174,190
ipynb
Jupyter Notebook
notebooks/week 2/C1_W2_Assignment.ipynb
mattborghi/practical-data-science-specialization
71ffab6d6a402409cd7b0795efbdc5367fabb3d4
[ "MIT" ]
null
null
null
notebooks/week 2/C1_W2_Assignment.ipynb
mattborghi/practical-data-science-specialization
71ffab6d6a402409cd7b0795efbdc5367fabb3d4
[ "MIT" ]
null
null
null
notebooks/week 2/C1_W2_Assignment.ipynb
mattborghi/practical-data-science-specialization
71ffab6d6a402409cd7b0795efbdc5367fabb3d4
[ "MIT" ]
null
null
null
117.616475
64,060
0.84421
[ [ [ "# Detect data bias with Amazon SageMaker Clarify\n\n### Introduction\n\n\nBias can be present in your data before any model training occurs. Inspecting the dataset for bias can help detect collection gaps, inform your feature engineering, and understand societal biases the dataset may reflect. In this lab you will analyze bias on the dataset, generate and analyze bias report, and prepare the dataset for the model training.", "_____no_output_____" ], [ "### Table of Contents\n\n- [1. Analyze the dataset](#c1w2-1.)\n - [1.1. Create a pandas data frame from the CSV file](#c1w2-1.1.)\n - [1.2. Upload the dataset to S3 bucket](#c1w2-1.2.)\n- [2. Analyze class imbalance on the dataset with Amazon SageMaker Clarify](#c1w2-2.)\n - [2.1. Configure a `DataConfig`](#c1w2-2.1.)\n - [Exercise 1](#c1w2-ex-1)\n - [2.2. Configure `BiasConfig`](#c1w2-2.2.)\n - [2.3. Configure Amazon SageMaker Clarify as a processing job](#c1w2-2.3.)\n - [2.4. Run the Amazon SageMaker Clarify processing job](#c1w2-2.4.)\n - [Exercise 2](#c1w2-ex-2)\n - [2.5. Run and review the Amazon SageMaker Clarify processing job on the unbalanced dataset](#c1w2-2.5.)\n - [2.6. Analyze unbalanced bias report](#c1w2-2.6.)\n- [3. Balance the dataset by `product_category` and `sentiment`](#c1w2-3.)\n- [4. Analyze bias on balanced dataset with Amazon SageMaker Clarify](#c1w2-4.)\n - [4.1. Configure a `DataConfig`](#c1w2-4.1.)\n - [Exercise 3](#c1w2-ex-3)\n - [4.2. Configure `BiasConfig`](#c1w2-4.2.)\n - [4.3. Configure SageMaker Clarify as a processing job](#c1w2-4.3.)\n - [4.4. Run the Amazon SageMaker Clarify processing job](#c1w2-4.4.)\n - [Exercise 4](#c1w2-ex-4)\n - [4.5. Run and review the Clarify processing job on the balanced dataset](#c1w2-4.5.)\n - [4.6. Analyze balanced bias report](#c1w2-4.6.)", "_____no_output_____" ], [ "First, let's install and import required modules.", "_____no_output_____" ] ], [ [ "# please ignore warning messages during the installation\n!pip install --disable-pip-version-check -q sagemaker==2.35.0", "/opt/conda/lib/python3.7/site-packages/secretstorage/dhcrypto.py:16: CryptographyDeprecationWarning: int_from_bytes is deprecated, use int.from_bytes instead\n from cryptography.utils import int_from_bytes\n/opt/conda/lib/python3.7/site-packages/secretstorage/util.py:25: CryptographyDeprecationWarning: int_from_bytes is deprecated, use int.from_bytes instead\n from cryptography.utils import int_from_bytes\n\u001b[33mWARNING: Running pip as root will break packages and permissions. You should install packages reliably by using venv: https://pip.pypa.io/warnings/venv\u001b[0m\n" ], [ "import boto3\nimport sagemaker\nimport pandas as pd\nimport numpy as np\n\nsess = sagemaker.Session()\nbucket = sess.default_bucket()\nrole = sagemaker.get_execution_role()\nregion = boto3.Session().region_name", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\n%matplotlib inline\n%config InlineBackend.figure_format='retina'", "_____no_output_____" ] ], [ [ "<a name='c1w2-1.'></a>\n# 1. Analyze the dataset", "_____no_output_____" ], [ "<a name='c1w2-1.1.'></a>\n### 1.1. Create a pandas data frame from the CSV file", "_____no_output_____" ], [ "Create a pandas dataframe from each of the product categories and concatenate them into one.", "_____no_output_____" ] ], [ [ "!aws s3 cp 's3://dlai-practical-data-science/data/transformed/womens_clothing_ecommerce_reviews_transformed.csv' ./", "download: s3://dlai-practical-data-science/data/transformed/womens_clothing_ecommerce_reviews_transformed.csv to ./womens_clothing_ecommerce_reviews_transformed.csv\n" ], [ "path = './womens_clothing_ecommerce_reviews_transformed.csv'\n\ndf = pd.read_csv(path)\ndf.head()", "_____no_output_____" ] ], [ [ "As you saw in the previous lab, there are way more positive reviews than negative or neutral. Such a dataset is called unbalanced. \n\nIn this case, using a relatively small data subset you could visualize the occurring unbalances. At scale, you would need to perform bias analysis. Let's use this dataset as an example.", "_____no_output_____" ] ], [ [ "import seaborn as sns\n\nsns.countplot(data=df, x='sentiment', hue='product_category')\n\nplt.legend(loc='upper right',bbox_to_anchor=(1.3, 1.1))", "_____no_output_____" ] ], [ [ "<a name='c1w2-1.2.'></a>\n### 1.2. Upload the dataset to S3 bucket\n\nUpload the dataset to a private S3 bucket in a folder called `bias/unbalanced`.", "_____no_output_____" ] ], [ [ "data_s3_uri_unbalanced = sess.upload_data(bucket=bucket, \n key_prefix='bias/unbalanced', \n path='./womens_clothing_ecommerce_reviews_transformed.csv')\ndata_s3_uri_unbalanced", "_____no_output_____" ] ], [ [ "You can review the uploaded CSV file in the S3 bucket.\n\n**Instructions**: \n- open the link\n- click on the S3 bucket name `sagemaker-us-east-1-ACCOUNT`\n- go to the folder `bias/unbalanced`\n- check the existence of the file `womens_clothing_ecommerce_reviews_transformed.csv`", "_____no_output_____" ] ], [ [ "from IPython.core.display import display, HTML\n\ndisplay(HTML('<b>Review <a target=\"top\" href=\"https://s3.console.aws.amazon.com/s3/home?region={}#\">Amazon S3 bucket</a></b>'.format(region)))", "_____no_output_____" ] ], [ [ "<a name='c1w2-2.'></a>\n# 2. Analyze class imbalance on the dataset with Amazon SageMaker Clarify\nLet's analyze bias in `sentiment` with respect to the `product_category` facet on the dataset.", "_____no_output_____" ], [ "<a name='c1w2-2.1.'></a>\n### 2.1. Configure a `DataConfig`\n\nInformation about the input data needs to be provided to the processor. This can be done with the `DataConfig` of the Clarify container. It stores information about the dataset to be analyzed, for example the dataset file, its format, headers and labels.", "_____no_output_____" ], [ "<a name='c1w2-ex-1'></a>\n### Exercise 1\n\nConfigure a `DataConfig` for Clarify.\n\n**Instructions**: Use `DataConfig` to configure the target column (`'sentiment'` label), data input (`data_s3_uri_unbalanced`) and output paths (`bias_report_unbalanced_output_path`) with their formats (header names and the dataset type):\n\n```python\ndata_config_unbalanced = clarify.DataConfig(\n s3_data_input_path=..., # S3 object path containing the unbalanced dataset\n s3_output_path=..., # path to store the output\n label='...', # target column\n headers=df_unbalanced.columns.to_list(),\n dataset_type='text/csv'\n)\n```", "_____no_output_____" ] ], [ [ "from sagemaker import clarify\n\nbias_report_unbalanced_output_path = 's3://{}/bias/generated_bias_report/unbalanced'.format(bucket)\n\ndata_config_unbalanced = clarify.DataConfig(\n ### BEGIN SOLUTION - DO NOT delete this comment for grading purposes\n s3_data_input_path=data_s3_uri_unbalanced, # Replace None\n s3_output_path=bias_report_unbalanced_output_path, # Replace None\n label='sentiment', # Replace None\n ### END SOLUTION - DO NOT delete this comment for grading purposes\n headers=df.columns.to_list(),\n dataset_type='text/csv'\n)", "_____no_output_____" ] ], [ [ "<a name='c1w2-2.2.'></a>\n### 2.2. Configure `BiasConfig`\nBias is measured by calculating a metric and comparing it across groups. To compute it, you will specify the required information in the `BiasConfig` API. SageMaker Clarify needs the sensitive columns (`facet_name`) and the desirable outcomes (`label_values_or_threshold`). Here `product_category` is the sensitive facet and the desired outcome is with the `sentiment==1`.\n\n\nSageMaker Clarify can handle both categorical and continuous data for `label_values_or_threshold`. In this case you are using categorical data.", "_____no_output_____" ] ], [ [ "bias_config_unbalanced = clarify.BiasConfig(\n label_values_or_threshold=[1], # desired sentiment\n facet_name='product_category' # sensitive column (facet)\n)", "_____no_output_____" ] ], [ [ "<a name='c1w2-2.3.'></a>\n### 2.3. Configure Amazon SageMaker Clarify as a processing job", "_____no_output_____" ], [ "Now you need to construct an object called `SageMakerClarifyProcessor`. This allows you to scale the process of data bias detection using two parameters, `instance_count` and `instance_type`. `Instance_count` represents how many nodes you want in the distributor cluster during the data detection. `Instance_type` specifies the processing capability (compute capacity, memory capacity) available for each one of those nodes.", "_____no_output_____" ] ], [ [ "clarify_processor_unbalanced = clarify.SageMakerClarifyProcessor(role=role,\n instance_count=1,\n instance_type='ml.m5.large',\n sagemaker_session=sess)", "_____no_output_____" ] ], [ [ "<a name='c1w2-2.4.'></a>\n### 2.4. Run the Amazon SageMaker Clarify processing job", "_____no_output_____" ], [ "<a name='c1w2-ex-2'></a>\n### Exercise 2\n\nRun the configured processing job to compute the requested bias `methods` of the input data\n\n**Instructions**: Apply the `run_pre_training_bias` method to the configured Clarify processor, passing the configured input/output data (`data_config_unbalanced`), configuration of sensitive groups (`bias_config_unbalanced`) with the other job setup parameters:\n```python\nclarify_processor_unbalanced.run_pre_training_bias(\n data_config=..., # configured input/output data\n data_bias_config=..., # configured sensitive groups\n methods=[\"CI\", \"DPL\", \"KL\", \"JS\", \"LP\", \"TVD\", \"KS\"], # selector of a subset of potential metrics\n wait=False, # whether the call should wait until the job completes (default: True)\n logs=False # whether to show the logs produced by the job. Only meaningful when wait is True (default: True)\n)\n```", "_____no_output_____" ] ], [ [ "clarify_processor_unbalanced.run_pre_training_bias(\n ### BEGIN SOLUTION - DO NOT delete this comment for grading purposes\n data_config=data_config_unbalanced, # Replace None\n data_bias_config=bias_config_unbalanced, # Replace None\n ### END SOLUTION - DO NOT delete this comment for grading purposes\n methods=[\"CI\", \"DPL\", \"KL\", \"JS\", \"LP\", \"TVD\", \"KS\"],\n wait=False,\n logs=False\n)", "\nJob Name: Clarify-Pretraining-Bias-2021-06-25-15-52-31-970\nInputs: [{'InputName': 'dataset', 'AppManaged': False, 'S3Input': {'S3Uri': 's3://sagemaker-us-east-1-390574811984/bias/unbalanced/womens_clothing_ecommerce_reviews_transformed.csv', 'LocalPath': '/opt/ml/processing/input/data', 'S3DataType': 'S3Prefix', 'S3InputMode': 'File', 'S3DataDistributionType': 'FullyReplicated', 'S3CompressionType': 'None'}}, {'InputName': 'analysis_config', 'AppManaged': False, 'S3Input': {'S3Uri': 's3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/unbalanced/analysis_config.json', 'LocalPath': '/opt/ml/processing/input/config', 'S3DataType': 'S3Prefix', 'S3InputMode': 'File', 'S3DataDistributionType': 'FullyReplicated', 'S3CompressionType': 'None'}}]\nOutputs: [{'OutputName': 'analysis_result', 'AppManaged': False, 'S3Output': {'S3Uri': 's3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/unbalanced', 'LocalPath': '/opt/ml/processing/output', 'S3UploadMode': 'EndOfJob'}}]\n" ], [ "run_unbalanced_bias_processing_job_name = clarify_processor_unbalanced.latest_job.job_name\nprint(run_unbalanced_bias_processing_job_name)", "Clarify-Pretraining-Bias-2021-06-25-15-52-31-970\n" ] ], [ [ "<a name='c1w2-2.5.'></a>\n### 2.5. Run and review the Amazon SageMaker Clarify processing job on the unbalanced dataset\n\nReview the created Amazon SageMaker Clarify processing job and the Cloud Watch logs.\n\n**Instructions**: \n- open the link\n- note that you are in the section Amazon SageMaker -> Processing jobs\n- check the processing job name\n- note which other properties of the processing job you can see in the console", "_____no_output_____" ] ], [ [ "from IPython.core.display import display, HTML\n\ndisplay(HTML('<b>Review <a target=\"blank\" href=\"https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/{}\">processing job</a></b>'.format(region, run_unbalanced_bias_processing_job_name)))\n", "_____no_output_____" ] ], [ [ "**Instructions**: \n- open the link\n- open the log stream with the name, which starts from the processing job name\n- have a quick look at the log messages", "_____no_output_____" ] ], [ [ "from IPython.core.display import display, HTML\n\ndisplay(HTML('<b>Review <a target=\"blank\" href=\"https://console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/ProcessingJobs;prefix={};streamFilter=typeLogStreamPrefix\">CloudWatch logs</a> after about 5 minutes</b>'.format(region, run_unbalanced_bias_processing_job_name)))", "_____no_output_____" ], [ "running_processor = sagemaker.processing.ProcessingJob.from_processing_name(processing_job_name=run_unbalanced_bias_processing_job_name,\n sagemaker_session=sess)", "_____no_output_____" ] ], [ [ "### _This cell will take approximately 5-10 minutes to run._", "_____no_output_____" ] ], [ [ "%%time\n\nrunning_processor.wait(logs=False)", "......!CPU times: user 44.3 ms, sys: 4.44 ms, total: 48.7 ms\nWall time: 30.3 s\n" ] ], [ [ "<a name='c1w2-2.6.'></a>\n### 2.6. Analyze unbalanced bias report\nIn this run, you analyzed bias for `sentiment` relative to the `product_category` for the unbalanced data. Let's have a look at the bias report.", "_____no_output_____" ], [ "List the files in the output path `bias_report_unbalanced_output_path`:", "_____no_output_____" ] ], [ [ "!aws s3 ls $bias_report_unbalanced_output_path/", "2021-06-25 15:57:49 31732 analysis.json\n2021-06-25 15:52:33 346 analysis_config.json\n2021-06-25 15:57:49 387134 report.html\n2021-06-25 15:57:49 121999 report.ipynb\n2021-06-25 15:57:49 139371 report.pdf\n" ] ], [ [ "Download generated bias report from S3 bucket:", "_____no_output_____" ] ], [ [ "!aws s3 cp --recursive $bias_report_unbalanced_output_path ./generated_bias_report/unbalanced/", "download: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/unbalanced/analysis_config.json to generated_bias_report/unbalanced/analysis_config.json\ndownload: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/unbalanced/analysis.json to generated_bias_report/unbalanced/analysis.json\ndownload: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/unbalanced/report.ipynb to generated_bias_report/unbalanced/report.ipynb\ndownload: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/unbalanced/report.html to generated_bias_report/unbalanced/report.html\ndownload: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/unbalanced/report.pdf to generated_bias_report/unbalanced/report.pdf\n" ] ], [ [ "Review the downloaded bias report (in HTML format):", "_____no_output_____" ] ], [ [ "from IPython.core.display import display, HTML\n\ndisplay(HTML('<b>Review <a target=\"blank\" href=\"./generated_bias_report/unbalanced/report.html\">unbalanced bias report</a></b>'))", "_____no_output_____" ] ], [ [ "The bias report shows a number of metrics, but here you can focus on just two of them: \n- Class Imbalance (CI). Measures the imbalance in the number of members between different facet values. Answers the question, does a `product_category` have disproportionately more reviews than others? Values of CI will become equal for even distribution between facets. Here, different CI values show the existence of imbalance.\n- Difference in Positive Proportions in Labels (DPL). Measures the imbalance of positive outcomes between different facet values. Answers the question, does a `product_category` have disproportionately higher ratings than others? With the range over the interval from -1 to 1, if there is no bias, you want to see this value as close as possible to zero. Here, non-zero values indicate the imbalances.", "_____no_output_____" ], [ "<a name='c1w2-3.'></a>\n# 3. Balance the dataset by `product_category` and `sentiment`\nLet's balance the dataset by `product_category` and `sentiment`. Then you can configure and run SageMaker Clarify processing job to analyze the bias of it. Which metrics values do you expect to see in the bias report?", "_____no_output_____" ] ], [ [ "df_grouped_by = df.groupby(['product_category', 'sentiment'])\ndf_balanced = df_grouped_by.apply(lambda x: x.sample(df_grouped_by.size().min()).reset_index(drop=True))", "_____no_output_____" ], [ "df_balanced", "_____no_output_____" ] ], [ [ "Visualize the distribution of review sentiment in the balanced dataset.", "_____no_output_____" ] ], [ [ "import seaborn as sns\n\nsns.countplot(data=df_balanced, x='sentiment', hue='product_category')\n\nplt.legend(loc='upper right',bbox_to_anchor=(1.3, 1.1))\n", "_____no_output_____" ] ], [ [ "<a name='c1w2-4.'></a>\n# 4. Analyze bias on balanced dataset with Amazon SageMaker Clarify\nLet's analyze bias in `sentiment` with respect to the `product_category` facet on your balanced dataset.", "_____no_output_____" ], [ "Save and upload balanced data to S3 bucket.", "_____no_output_____" ] ], [ [ "path_balanced = './womens_clothing_ecommerce_reviews_balanced.csv'\ndf_balanced.to_csv(path_balanced, index=False, header=True)\n\ndata_s3_uri_balanced = sess.upload_data(bucket=bucket, key_prefix='bias/balanced', path=path_balanced)\ndata_s3_uri_balanced", "_____no_output_____" ] ], [ [ "You can review the uploaded CSV file in the S3 bucket and prefix `bias/balanced`.", "_____no_output_____" ] ], [ [ "from IPython.core.display import display, HTML\n\ndisplay(HTML('<b>Review <a target=\"top\" href=\"https://s3.console.aws.amazon.com/s3/home?region={}#\">Amazon S3 bucket</a></b>'.format(region)))", "_____no_output_____" ] ], [ [ "<a name='c1w2-4.1.'></a>\n### 4.1. Configure a `DataConfig`", "_____no_output_____" ], [ "<a name='c1w2-ex-3'></a>\n### Exercise 3\n\nConfigure a `DataConfig` for Clarify to analyze bias on the balanced dataset.\n\n**Instructions**: Pass the S3 object path containing the balanced dataset, the path to store the output (`bias_report_balanced_output_path`) and the target column. You can use exercise 1 as an example.", "_____no_output_____" ] ], [ [ "from sagemaker import clarify\n\nbias_report_balanced_output_path = 's3://{}/bias/generated_bias_report/balanced'.format(bucket)\n\ndata_config_balanced = clarify.DataConfig(\n ### BEGIN SOLUTION - DO NOT delete this comment for grading purposes\n s3_data_input_path=data_s3_uri_balanced, # Replace None\n s3_output_path=bias_report_balanced_output_path, # Replace None\n label='sentiment', # Replace None\n ### END SOLUTION - DO NOT delete this comment for grading purposes\n headers=df_balanced.columns.to_list(),\n dataset_type='text/csv'\n)", "_____no_output_____" ] ], [ [ "<a name='c1w2-4.2.'></a>\n### 4.2. Configure `BiasConfig`\n\n`BiasConfig` for the balanced dataset will have the same settings as before.", "_____no_output_____" ] ], [ [ "bias_config_balanced = clarify.BiasConfig(\n label_values_or_threshold=[1], # desired sentiment\n facet_name='product_category' # sensitive column (facet)\n)", "_____no_output_____" ] ], [ [ "<a name='c1w2-4.3.'></a>\n### 4.3. Configure SageMaker Clarify as a processing job\n\n`SageMakerClarifyProcessor` object will also have the same parameters.", "_____no_output_____" ] ], [ [ "clarify_processor_balanced = clarify.SageMakerClarifyProcessor(role=role,\n instance_count=1,\n instance_type='ml.m5.large',\n sagemaker_session=sess)", "_____no_output_____" ] ], [ [ "<a name='c1w2-4.4.'></a>\n### 4.4. Run the Amazon SageMaker Clarify processing job", "_____no_output_____" ], [ "<a name='c1w2-ex-4'></a>\n### Exercise 4\n\nRun the configured processing job for the balanced dataset.\n\n**Instructions**: Apply the `run_pre_training_bias` method to the configured Clarify processor, passing the input/output data, configuration of sensitive groups with the other job setup parameters. You can use exercise 2 as an example.\n", "_____no_output_____" ] ], [ [ "clarify_processor_balanced.run_pre_training_bias(\n ### BEGIN SOLUTION - DO NOT delete this comment for grading purposes\n data_config=data_config_balanced, # Replace None\n data_bias_config=bias_config_balanced, # Replace None\n ### END SOLUTION - DO NOT delete this comment for grading purposes\n methods=[\"CI\", \"DPL\", \"KL\", \"JS\", \"LP\", \"TVD\", \"KS\"],\n wait=False,\n logs=False\n)", "\nJob Name: Clarify-Pretraining-Bias-2021-06-25-16-05-21-917\nInputs: [{'InputName': 'dataset', 'AppManaged': False, 'S3Input': {'S3Uri': 's3://sagemaker-us-east-1-390574811984/bias/balanced/womens_clothing_ecommerce_reviews_balanced.csv', 'LocalPath': '/opt/ml/processing/input/data', 'S3DataType': 'S3Prefix', 'S3InputMode': 'File', 'S3DataDistributionType': 'FullyReplicated', 'S3CompressionType': 'None'}}, {'InputName': 'analysis_config', 'AppManaged': False, 'S3Input': {'S3Uri': 's3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/balanced/analysis_config.json', 'LocalPath': '/opt/ml/processing/input/config', 'S3DataType': 'S3Prefix', 'S3InputMode': 'File', 'S3DataDistributionType': 'FullyReplicated', 'S3CompressionType': 'None'}}]\nOutputs: [{'OutputName': 'analysis_result', 'AppManaged': False, 'S3Output': {'S3Uri': 's3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/balanced', 'LocalPath': '/opt/ml/processing/output', 'S3UploadMode': 'EndOfJob'}}]\n" ], [ "run_balanced_bias_processing_job_name = clarify_processor_balanced.latest_job.job_name\nprint(run_balanced_bias_processing_job_name)", "Clarify-Pretraining-Bias-2021-06-25-16-05-21-917\n" ] ], [ [ "<a name='c1w2-4.5.'></a>\n### 4.5. Run and review the Clarify processing job on the balanced dataset\nReview the results of the run following the links:", "_____no_output_____" ] ], [ [ "from IPython.core.display import display, HTML\n\ndisplay(HTML('<b>Review <a target=\"blank\" href=\"https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/{}\">processing job</a></b>'.format(region, run_balanced_bias_processing_job_name)))\n", "_____no_output_____" ], [ "from IPython.core.display import display, HTML\n\ndisplay(HTML('<b>Review <a target=\"blank\" href=\"https://console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/ProcessingJobs;prefix={};streamFilter=typeLogStreamPrefix\">CloudWatch logs</a> after about 5 minutes</b>'.format(region, run_balanced_bias_processing_job_name)))\n", "_____no_output_____" ], [ "running_processor = sagemaker.processing.ProcessingJob.from_processing_name(processing_job_name=run_balanced_bias_processing_job_name,\n sagemaker_session=sess)", "_____no_output_____" ] ], [ [ "### _This cell will take approximately 5-10 minutes to run._", "_____no_output_____" ] ], [ [ "%%time\n\nrunning_processor.wait(logs=False)", "...............................................................!CPU times: user 277 ms, sys: 33.8 ms, total: 311 ms\nWall time: 5min 16s\n" ] ], [ [ "<a name='c1w2-4.6.'></a>\n### 4.6. Analyze balanced bias report", "_____no_output_____" ], [ "List the files in the output path `bias_report_balanced_output_path`:", "_____no_output_____" ] ], [ [ "!aws s3 ls $bias_report_balanced_output_path/", "2021-06-25 16:10:40 29889 analysis.json\n2021-06-25 16:05:22 346 analysis_config.json\n2021-06-25 16:10:40 394888 report.html\n2021-06-25 16:10:40 129753 report.ipynb\n2021-06-25 16:10:40 141422 report.pdf\n" ] ], [ [ "Download generated bias report from S3 bucket:", "_____no_output_____" ] ], [ [ "!aws s3 cp --recursive $bias_report_balanced_output_path ./generated_bias_report/balanced/", "download: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/balanced/analysis_config.json to generated_bias_report/balanced/analysis_config.json\ndownload: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/balanced/report.ipynb to generated_bias_report/balanced/report.ipynb\ndownload: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/balanced/analysis.json to generated_bias_report/balanced/analysis.json\ndownload: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/balanced/report.pdf to generated_bias_report/balanced/report.pdf\ndownload: s3://sagemaker-us-east-1-390574811984/bias/generated_bias_report/balanced/report.html to generated_bias_report/balanced/report.html\n" ] ], [ [ "Review the downloaded bias report (in HTML format):", "_____no_output_____" ] ], [ [ "from IPython.core.display import display, HTML\n\ndisplay(HTML('<b>Review <a target=\"blank\" href=\"./generated_bias_report/balanced/report.html\">balanced bias report</a></b>'))", "_____no_output_____" ] ], [ [ "In this run, you analyzed bias for `sentiment` relative to the `product_category` for the balanced data. Note that the Class Imbalance (CI) metric is equal across all product categories for the target label, `sentiment`. And Difference in Positive Proportions in Labels (DPL) metric values are zero.", "_____no_output_____" ], [ "Upload the notebook into S3 bucket for grading purposes.\n\n**Note**: you may need to click on \"Save\" button before the upload.", "_____no_output_____" ] ], [ [ "!aws s3 cp ./C1_W2_Assignment.ipynb s3://$bucket/C1_W2_Assignment_Learner.ipynb", "upload: ./C1_W2_Assignment.ipynb to s3://sagemaker-us-east-1-390574811984/C1_W2_Assignment_Learner.ipynb\n" ] ], [ [ "Please go to the main lab window and click on `Submit` button (see the `Finish the lab` section of the instructions).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb96744f75380bee2ead9f59f99bf17a6615e15c
12,733
ipynb
Jupyter Notebook
Recursivity.ipynb
dguari1/BME3240_2021
b069d6e6336f44dcb8d3ef79bbcf5410cde68dcc
[ "MIT" ]
4
2021-08-28T03:42:39.000Z
2021-11-04T17:14:29.000Z
Recursivity.ipynb
dguari1/BME3240_2021
b069d6e6336f44dcb8d3ef79bbcf5410cde68dcc
[ "MIT" ]
null
null
null
Recursivity.ipynb
dguari1/BME3240_2021
b069d6e6336f44dcb8d3ef79bbcf5410cde68dcc
[ "MIT" ]
null
null
null
27.80131
355
0.523522
[ [ [ "## Recursive Functions\n\nA recursive function is a function that makes calls to itself. It works like the loops we described before, but sometimes it the situation is better to use recursion than loops.\n\nEvery recursive function has two components: a base case and a recursive step. The base case is usually the smallest input and has an easily verifiable solution. This is also the mechanism that stops the function from calling itself forever. The recursive step is the set of all cases where a recursive call, or a function call to itself, is made.\n", "_____no_output_____" ], [ "Consider the example of computing the factorial of a number. For example, the factorial of a number $n$ is given by $f(n) = 1 \\ \\times \\ 2 \\ \\times \\ 3 \\ \\times \\ \\dots \\ \\times \\ (n-1) \\ \\times \\ n$. \n\nThe recursive from of a factorial is \n$$\nf(n) = \\left\\{ \\begin{array}{ll} 1 & if \\ n=1 \\\\\nn \\ \\times \\ f(n-1) & otherwise\\end{array} \\right.\n$$\n\nwhich can be expressed in code as", "_____no_output_____" ] ], [ [ "def factorial_n(n):\n \n assert type(n) == int, 'Input must be an integer'\n \n if n == 1: #this is the base case\n return 1\n else: #this is the recursive step\n return n * factorial_n(n-1)", "_____no_output_____" ], [ "factorial_n(1)", "_____no_output_____" ], [ "factorial_n(2)", "_____no_output_____" ], [ "factorial_n(5)", "_____no_output_____" ], [ "1*2*3*4*5", "_____no_output_____" ], [ "#We can use debbuging tools to understand the code\nfrom pdb import set_trace\n\ndef factorial_n(n):\n \n assert type(n) == int, 'Input must be an integer'\n \n set_trace()\n if n == 1: #this is the base case\n return 1\n else: #this is the recursive step\n return n * factorial_n(n-1)", "_____no_output_____" ], [ "factorial_n(1)", "> \u001b[0;32m<ipython-input-1-32ed92fcd0a4>\u001b[0m(9)\u001b[0;36mfactorial_n\u001b[0;34m()\u001b[0m\n\u001b[0;32m 7 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 8 \u001b[0;31m \u001b[0mset_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m----> 9 \u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0mn\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m#this is the base case\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 10 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 11 \u001b[0;31m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m#this is the recursive step\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\n" ], [ "factorial_n(3)", "> \u001b[0;32m<ipython-input-1-32ed92fcd0a4>\u001b[0m(9)\u001b[0;36mfactorial_n\u001b[0;34m()\u001b[0m\n\u001b[0;32m 7 \u001b[0;31m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 8 \u001b[0;31m \u001b[0mset_trace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m----> 9 \u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0mn\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m#this is the base case\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 10 \u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 11 \u001b[0;31m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m#this is the recursive step\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\n" ] ], [ [ "## mini challenge 1\n\nFibonacci numbers were originally developed to model the idealized population growth of rabbits. Since then, they have been found to be significant in any naturally occurring phenomena.\n\nUse recursivity to compute the Fibonacci numbers.\n\nThe recursive form of the Fibonacci numbers.\n\n$$\nf(n) = \\left\\{ \\begin{array}{ll} 1 & if \\ n=1 \\\\\n1 & if \\ n=2 \\\\\nf(n-1) + f(n-2) & otherwise\\end{array} \\right.\n$$\n \n ", "_____no_output_____" ] ], [ [ "#examples\n\nfibonacci(1) = 1\nfibonacci(2) = 1\nfibonacci(3) = 2\nfibonacci(4) = 3\nfibonacci(5) = 5\nfibonacci(35) = 9227465", "_____no_output_____" ], [ "def fibonacci(n) :\n \n assert type(n) == int, 'Input must be an integer'\n \n if n == 1:\n return 1\n if n == 2:\n return 1\n else: \n return fibonacci(n-1) + fibonacci(n-2)", "_____no_output_____" ] ], [ [ "## mini challenge 2\n\nAn integer number $n$ is said to be **prime** if is divisible only by itself and one. If $n$ is divisible by any other number between $1$ and $n$, the the number is not prime. \n\nWrite a recursive function to verify if a number n is prime. ", "_____no_output_____" ] ], [ [ "def prime(N, div = 2):\n \n if N == 1:\n return True\n else:\n if N == 2:\n return True\n elif (N%div) == 0 :\n return False\n else:\n prime(N,div+1)\n \n return True", "_____no_output_____" ], [ "prime(7)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb967fb6e97077850f86bf78836fe24766ea12ec
12,684
ipynb
Jupyter Notebook
tests/coffea_bench_1.ipynb
oshadura/coffea-bench
97578fef5b17d3fed38d5be36dfaa2c593d95037
[ "BSD-3-Clause" ]
null
null
null
tests/coffea_bench_1.ipynb
oshadura/coffea-bench
97578fef5b17d3fed38d5be36dfaa2c593d95037
[ "BSD-3-Clause" ]
null
null
null
tests/coffea_bench_1.ipynb
oshadura/coffea-bench
97578fef5b17d3fed38d5be36dfaa2c593d95037
[ "BSD-3-Clause" ]
1
2021-07-05T16:19:05.000Z
2021-07-05T16:19:05.000Z
45.625899
242
0.572138
[ [ [ "!pip install pytest ipytest pytest-csv pytest-benchmark", "/bin/bash: /opt/conda/lib/libtinfo.so.6: no version information available (required by /bin/bash)\nRequirement already satisfied: pytest in /opt/conda/lib/python3.8/site-packages (6.2.4)\nRequirement already satisfied: ipytest in /opt/conda/lib/python3.8/site-packages (0.10.0)\nRequirement already satisfied: pytest-csv in /opt/conda/lib/python3.8/site-packages (3.0.0)\nRequirement already satisfied: pytest-benchmark in /opt/conda/lib/python3.8/site-packages (3.4.1)\nRequirement already satisfied: packaging in /opt/conda/lib/python3.8/site-packages (from pytest) (21.0)\nRequirement already satisfied: attrs>=19.2.0 in /opt/conda/lib/python3.8/site-packages (from pytest) (21.2.0)\nRequirement already satisfied: pluggy<1.0.0a1,>=0.12 in /opt/conda/lib/python3.8/site-packages (from pytest) (0.13.1)\nRequirement already satisfied: iniconfig in /opt/conda/lib/python3.8/site-packages (from pytest) (1.1.1)\nRequirement already satisfied: toml in /opt/conda/lib/python3.8/site-packages (from pytest) (0.10.2)\nRequirement already satisfied: py>=1.8.2 in /opt/conda/lib/python3.8/site-packages (from pytest) (1.10.0)\nRequirement already satisfied: ipython in /opt/conda/lib/python3.8/site-packages (from ipytest) (7.25.0)\nRequirement already satisfied: six>=1.0.0 in /opt/conda/lib/python3.8/site-packages (from pytest-csv) (1.15.0)\nRequirement already satisfied: py-cpuinfo in /opt/conda/lib/python3.8/site-packages (from pytest-benchmark) (8.0.0)\nRequirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.8/site-packages (from packaging->pytest) (2.4.7)\nRequirement already satisfied: setuptools>=18.5 in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (57.1.0)\nRequirement already satisfied: pexpect>4.3; sys_platform != \"win32\" in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (4.8.0)\nRequirement already satisfied: jedi>=0.16 in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (0.18.0)\nRequirement already satisfied: decorator in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (5.0.9)\nRequirement already satisfied: backcall in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (0.2.0)\nRequirement already satisfied: pickleshare in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (0.7.5)\nRequirement already satisfied: matplotlib-inline in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (0.1.2)\nRequirement already satisfied: prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0 in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (3.0.19)\nRequirement already satisfied: pygments in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (2.9.0)\nRequirement already satisfied: traitlets>=4.2 in /opt/conda/lib/python3.8/site-packages (from ipython->ipytest) (5.0.5)\nRequirement already satisfied: ptyprocess>=0.5 in /opt/conda/lib/python3.8/site-packages (from pexpect>4.3; sys_platform != \"win32\"->ipython->ipytest) (0.7.0)\nRequirement already satisfied: parso<0.9.0,>=0.8.0 in /opt/conda/lib/python3.8/site-packages (from jedi>=0.16->ipython->ipytest) (0.8.2)\nRequirement already satisfied: wcwidth in /opt/conda/lib/python3.8/site-packages (from prompt-toolkit!=3.0.0,!=3.0.1,<3.1.0,>=2.0.0->ipython->ipytest) (0.2.5)\nRequirement already satisfied: ipython-genutils in /opt/conda/lib/python3.8/site-packages (from traitlets>=4.2->ipython->ipytest) (0.2.0)\n" ], [ "import numpy as np\nimport pytest\n%matplotlib inline\nfrom coffea import hist\nimport coffea.processor as processor\nimport awkward as ak\n\nfrom dask.distributed import Client, LocalCluster\nimport time\nimport os\nimport ipytest\n\nipytest.config(rewrite_asserts=True, magics=True)", "_____no_output_____" ], [ "fileset = {'SingleMu' : [\"root://eospublic.cern.ch//eos/root-eos/benchmark/Run2012B_SingleMu.root\"]}\n\nfrom dask.distributed import Client, Worker, WorkerPlugin\nfrom typing import List\nimport os\n\nclass DependencyInstaller(WorkerPlugin):\n def __init__(self, dependencies: List[str]):\n self._depencendies = \" \".join(f\"'{dep}'\" for dep in dependencies)\n\n def setup(self, worker: Worker):\n os.system(f\"pip install {self._depencendies}\")\n\n\ndependency_installer = DependencyInstaller([\n \"pytest-benchmark\",\n])\n\nclient = Client(\"tls://localhost:8786\")\n#Uncomment only if we would like to compare the same number of workers\n#cluster = CoffeaCasaCluster()\n#cluster.scale(10)\n#client = Client(cluster)\nclient.register_worker_plugin(dependency_installer)", "_____no_output_____" ], [ "# This program plots an event-level variable (in this case, MET, but switching it is as easy as a dict-key change). It also demonstrates an easy use of the book-keeping cutflow tool, to keep track of the number of events processed.\n\n# The processor class bundles our data analysis together while giving us some helpful tools. It also leaves looping and chunks to the framework instead of us.\nclass Processor(processor.ProcessorABC):\n def __init__(self):\n # Bins and categories for the histogram are defined here. For format, see https://coffeateam.github.io/coffea/stubs/coffea.hist.hist_tools.Hist.html && https://coffeateam.github.io/coffea/stubs/coffea.hist.hist_tools.Bin.html\n dataset_axis = hist.Cat(\"dataset\", \"\")\n MET_axis = hist.Bin(\"MET\", \"MET [GeV]\", 50, 0, 100)\n \n # The accumulator keeps our data chunks together for histogramming. It also gives us cutflow, which can be used to keep track of data.\n self._accumulator = processor.dict_accumulator({\n 'MET': hist.Hist(\"Counts\", dataset_axis, MET_axis),\n 'cutflow': processor.defaultdict_accumulator(int)\n })\n \n @property\n def accumulator(self):\n return self._accumulator\n \n def process(self, events):\n output = self.accumulator.identity()\n \n # This is where we do our actual analysis. The dataset has columns similar to the TTree's; events.columns can tell you them, or events.[object].columns for deeper depth.\n dataset = events.metadata[\"dataset\"]\n MET = events.MET.pt\n \n # We can define a new key for cutflow (in this case 'all events'). Then we can put values into it. We need += because it's per-chunk (demonstrated below)\n output['cutflow']['all events'] += ak.size(MET)\n output['cutflow']['number of chunks'] += 1\n \n # This fills our histogram once our data is collected. The hist key ('MET=') will be defined in the bin in __init__.\n output['MET'].fill(dataset=dataset, MET=MET)\n return output\n\n def postprocess(self, accumulator):\n return accumulator", "_____no_output_____" ], [ "# Function which we are interested to benchmark where chunk_size is changed dependending on iteration of benchmark run.\ndef coffea_processor_adlexample1(chunk_size):\n output = processor.run_uproot_job(fileset,\n treename = 'Events',\n processor_instance = Processor(),\n executor = processor.dask_executor,\n chunksize = chunk_size,\n executor_args = {'schema': processor.NanoAODSchema,\n 'client': client,\n 'savemetrics': True}\n )\n return output", "_____no_output_____" ], [ "@pytest.mark.parametrize(\"chunk_size\", range(100000, 200000, 100000))\ndef test_coffea_processor_adlexample1(benchmark, chunk_size):\n output = benchmark(coffea_processor_adlexample1, chunk_size)\n # Custom metrics available with `savemetrics` option\n benchmark.extra_info['events_s_thread'] = output[1]['entries'] / output[1]['processtime']", "_____no_output_____" ], [ "ipytest.run(\"-qq\")", ". [100%]\n\n---------------------------------------------------------- benchmark: 1 tests ---------------------------------------------------------\nName (time in s) Min Max Mean StdDev Median IQR Outliers OPS Rounds Iterations\n---------------------------------------------------------------------------------------------------------------------------------------\ntest_coffea_processor_adlexample1[100000] 55.1104 69.7225 61.5893 7.0034 57.3705 12.3335 2;0 0.0162 5 1\n---------------------------------------------------------------------------------------------------------------------------------------\n\nLegend:\n Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.\n OPS: Operations Per Second, computed as 1 / Mean\n" ], [ "benchmark1.json\n{\n \"min\": 55.1104,\n \"max\": 69.7225,\n \"mean\": 61.5893\n}", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb968f87ab2b018077224c28d72731bf3f1cd182
49,989
ipynb
Jupyter Notebook
Coronavirus Data- Extract & Transform/world_o_meters_data_clean.ipynb
Grace-Bijun-Li/ETL_CoronavirusData
dadcfa1fc7cda1180fad5ea033a49dca3aef5a91
[ "Apache-2.0" ]
null
null
null
Coronavirus Data- Extract & Transform/world_o_meters_data_clean.ipynb
Grace-Bijun-Li/ETL_CoronavirusData
dadcfa1fc7cda1180fad5ea033a49dca3aef5a91
[ "Apache-2.0" ]
null
null
null
Coronavirus Data- Extract & Transform/world_o_meters_data_clean.ipynb
Grace-Bijun-Li/ETL_CoronavirusData
dadcfa1fc7cda1180fad5ea033a49dca3aef5a91
[ "Apache-2.0" ]
1
2021-03-27T07:44:48.000Z
2021-03-27T07:44:48.000Z
36.514974
470
0.327692
[ [ [ "import pandas as pd\nimport datetime as dt", "_____no_output_____" ], [ "clean_df = pd.read_csv('coronavirus_states_raw.csv',index_col=False).drop([\"#\",\"Source\",\"Projections\"],axis=1)\nclean_df", "_____no_output_____" ], [ "# Drop rows which are totals or non-state/territories e.g. Grand Princess Ship\nclean_df.drop(clean_df[(clean_df[\"USAState\"]==\"Total:\") | (clean_df[\"USAState\"]==\"USA Total\") | (clean_df[\"USAState\"]==\"Federal Prisons\") | (clean_df[\"USAState\"]==\"Navajo Nation\") | (clean_df[\"USAState\"]==\"Grand Princess Ship\")| (clean_df[\"USAState\"]==\"Wuhan Repatriated\")| (clean_df[\"USAState\"]==\"Diamond Princess Ship\")| (clean_df[\"USAState\"]==\"Veteran Affairs\")| (clean_df[\"USAState\"]==\"US Military\")].index, inplace=True)\nclean_df.rename(columns = {\"USAState\": \"state_name\",\"Population\": \"state_population\"},inplace=True)", "_____no_output_____" ], [ "# Drop columns which could be calculated in the database\ncols=[7,8,10]\nclean_df.drop(clean_df.columns[cols],axis=1,inplace=True)", "_____no_output_____" ], [ "# Convert all data types to integers/floats by removing commas, special characters etc. \nclean_new_cases = pd.concat([clean_df[\"NewCases\"].str.split()\n .str[0]\n .str.replace(',','').astype(float) for col in clean_df], axis=1)", "_____no_output_____" ], [ "# replace the old columns with the new\nclean_df[\"NewCases\"] = clean_new_cases", "_____no_output_____" ], [ "# Add a 'date' column to the data frame to show that the covid data is as of 2021-03-20\nclean_df['date'] = pd.Timestamp('2021-03-20')", "_____no_output_____" ], [ "# check the types\nclean_df.dtypes", "_____no_output_____" ], [ "# Merge the data frame with states_id_df to add the 'state_id' column\nstates_id_df = pd.read_csv('States.csv',index_col=False)\nclean_df = clean_df.merge(states_id_df, on='state_name')\n\n# Add 'pop_earnings_id' column to the data frame\nclean_df['covid_id'] = list(range(1,57))\nclean_df = clean_df[['covid_id','state_id','TotalCases','NewCases','TotalDeaths','NewDeaths','TotalRecovered','ActiveCases','TotalTests','state_population','date']]\nclean_df", "_____no_output_____" ], [ "# Export the clean data to a csv\nclean_df.to_csv('covid_data.csv',index=False)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb96907f2b300b33e20edc6789abf6f6d1615495
168,574
ipynb
Jupyter Notebook
notebooks/Simulate-twobody-data.ipynb
katiechambe/yellowCard
aa5100d25d11bbfba79951299b7a59c5dfbd5098
[ "MIT" ]
3
2021-07-21T15:26:01.000Z
2021-11-26T03:30:11.000Z
notebooks/Simulate-twobody-data.ipynb
katiechambe/yellowCard
aa5100d25d11bbfba79951299b7a59c5dfbd5098
[ "MIT" ]
6
2022-02-15T18:24:28.000Z
2022-03-31T17:44:12.000Z
notebooks/Simulate-twobody-data.ipynb
katiechambe/yellowCard
aa5100d25d11bbfba79951299b7a59c5dfbd5098
[ "MIT" ]
null
null
null
157.545794
54,340
0.887391
[ [ [ "from astropy.constants import G\nimport astropy.coordinates as coord\nimport astropy.table as at\nimport astropy.units as u\nfrom astropy.time import Time\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\n\nfrom gala.units import galactic, UnitSystem\nfrom twobody import TwoBodyKeplerElements, KeplerOrbit\nfrom twobody.anomaly import mean_anomaly_from_eccentric_anomaly\n\nusys = UnitSystem(1e12*u.Msun, u.kpc, u.Gyr, u.radian)", "_____no_output_____" ], [ "true_m31_sky_c = coord.SkyCoord(\n 10.64628564*u.deg,\n 41.23456631*u.deg\n)", "_____no_output_____" ] ], [ [ "## Simulate some Keplerian data", "_____no_output_____" ] ], [ [ "M1 = 1.4e12 * u.Msun\nM2 = 2.4e12 * u.Msun\nM = M1 + M2\na = 511 * u.kpc\neta = 4.3 * u.rad\ne = 0.981", "_____no_output_____" ], [ "mean_anomaly = mean_anomaly_from_eccentric_anomaly(eta, e)", "_____no_output_____" ], [ "elem = TwoBodyKeplerElements(\n a=a, m1=M1, m2=M2, e=e, \n omega=0*u.rad, i=90*u.deg,\n M0=0.*u.rad, t0=0. * u.Gyr,\n units=galactic\n)", "_____no_output_____" ], [ "orb1 = KeplerOrbit(elem.primary)\norb2 = KeplerOrbit(elem.secondary)\nRomega = coord.matrix_utilities.rotation_matrix(elem.secondary.omega, 'z')", "_____no_output_____" ], [ "xyz1 = orb1.orbital_plane(0. * u.Gyr)\nxyz2 = orb2.orbital_plane(0. * u.Gyr).transform(Romega)\nxyz1, xyz2", "_____no_output_____" ], [ "time = (elem.P * (mean_anomaly / (2*np.pi*u.rad))).to(u.Gyr)\nxyz1 = orb1.orbital_plane(time)\nxyz2 = orb2.orbital_plane(time).transform(Romega)\n\n(xyz1.without_differentials() \n - xyz2.without_differentials()).norm().to(u.kpc)", "_____no_output_____" ], [ "a * (1 - e * np.cos(eta))", "_____no_output_____" ], [ "times = np.linspace(0, 1, 1024) * elem.P\nxyzs1 = orb1.orbital_plane(times)\nxyzs2 = orb2.orbital_plane(times).transform(Romega)\n\nrs = (xyzs1.without_differentials() \n - xyzs2.without_differentials()).norm().to(u.kpc)", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1, figsize=(8, 8))\nax.plot(xyzs1.x, xyzs1.y, marker='')\nax.plot(xyzs2.x, xyzs2.y, marker='')\n\nax.plot(xyz1.x, xyz1.y, zorder=100, ms=10, color='tab:orange')\nax.plot(xyz2.x, xyz2.y, zorder=100, ms=10, color='tab:red')\n\nax.set_xlim(-2*a.value, 2*a.value)\nax.set_ylim(-2*a.value, 2*a.value)", "_____no_output_____" ], [ "plt.plot(times.value, rs.value)", "_____no_output_____" ], [ "dxs = xyzs1.without_differentials() - xyzs2.without_differentials()\ndvs = xyzs1.differentials['s'] - xyzs2.differentials['s']\n\ndx_cyl = dxs.represent_as(coord.CylindricalRepresentation)\ndv_cyl = dvs.represent_as(coord.CylindricalDifferential, dxs)\n\nvrads = dv_cyl.d_rho\nvtans = (dx_cyl.rho * dv_cyl.d_phi).to(u.km/u.s, u.dimensionless_angles())", "_____no_output_____" ], [ "etas = np.linspace(0, 2*np.pi, 1024) * u.rad\nmean_anoms = mean_anomaly_from_eccentric_anomaly(etas, e)\neq_times = elem.P * (mean_anoms / (2*np.pi*u.rad))\neq_vrad = np.sqrt(G * M / a) * (e * np.sin(etas)) / (1 - e * np.cos(etas))\neq_vtan = np.sqrt(G * M / a) * np.sqrt(1 - e**2) / (1 - e * np.cos(etas))", "_____no_output_____" ], [ "plt.plot(times.value, vrads.to_value(u.km/u.s))\nplt.plot(times.value, vtans.to_value(u.km/u.s))\n\nplt.plot(eq_times.value, eq_vrad.to_value(u.km/u.s))\nplt.plot(eq_times.value, eq_vtan.to_value(u.km/u.s))\n\nplt.ylim(-500, 500)", "_____no_output_____" ] ], [ [ "### Transform to ICRS", "_____no_output_____" ] ], [ [ "dx = xyz1.without_differentials() - xyz2.without_differentials()\ndv = xyz1.differentials['s'] - xyz2.differentials['s']\n\ndx_cyl = dx.represent_as(coord.CylindricalRepresentation)\ndv_cyl = dv.represent_as(coord.CylindricalDifferential, dx)\n\nvrad = dv_cyl.d_rho.to(u.km/u.s)\nvtan = (dx_cyl.rho * dv_cyl.d_phi).to(u.km/u.s, u.dimensionless_angles())\n\nr = dx.norm()\n\nsun_galcen_dist = coord.Galactocentric().galcen_distance\ngamma = coord.Galactocentric().galcen_coord.separation(true_m31_sky_c)\nsun_m31_dist = (sun_galcen_dist * np.cos(gamma)) + np.sqrt(\n r**2 - sun_galcen_dist**2 * np.sin(gamma)**2\n)\n\nr, sun_m31_dist", "_____no_output_____" ], [ "vscale = np.sqrt(G * M / a)\nprint(vscale.decompose(usys).value, \n vrad.decompose(usys).value, \n vtan.decompose(usys).value)", "182.90067763093654 -117.99130053868998 25.469820155258876\n" ], [ "alpha = 32.4 * u.deg\ngalcen_pos = coord.SkyCoord(true_m31_sky_c.ra, \n true_m31_sky_c.dec, \n distance=sun_m31_dist)\ngalcen_pos = galcen_pos.transform_to(coord.Galactocentric())\n# galcen_pos = coord.CartesianRepresentation(\n# -375 * u.kpc, 605 * u.kpc, -279 * u.kpc)\n# galcen_pos = coord.Galactocentric(galcen_pos / galcen_pos.norm() * r)\n\ngalcen_sph = galcen_pos.represent_as('spherical')\ngc_Rz = coord.matrix_utilities.rotation_matrix(-galcen_sph.lon, 'z')\ngc_Ry = coord.matrix_utilities.rotation_matrix(galcen_sph.lat, 'y')\ngc_Rx = coord.matrix_utilities.rotation_matrix(alpha, 'x')\nR = gc_Rz @ gc_Ry @ gc_Rx", "_____no_output_____" ], [ "fake_X = R @ [r.value, 0, 0] * r.unit\nfake_V = R @ [vrad.to_value(u.km/u.s), vtan.to_value(u.km/u.s), 0.] * u.km/u.s\nfake_galcen = coord.Galactocentric(*fake_X, *fake_V)", "_____no_output_____" ], [ "fake_icrs = fake_galcen.transform_to(coord.ICRS())", "_____no_output_____" ], [ "fake_icrs", "_____no_output_____" ] ], [ [ "## Check roundtripping", "_____no_output_____" ] ], [ [ "def tt_sph_to_xyz(r, lon, lat):\n return [\n r * np.cos(lon) * np.cos(lat),\n r * np.sin(lon) * np.cos(lat),\n r * np.sin(lat)\n ]\n\ndef tt_cross(a, b):\n return np.array([\n a[1]*b[2] - a[2]*b[1],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0]\n ])\n\ndef tt_rotation_matrix(angle_rad, axis):\n s = np.sin(angle_rad)\n c = np.cos(angle_rad)\n \n if axis == 'x':\n R = np.array([\n 1., 0, 0,\n 0, c, s,\n 0, -s, c\n ])\n \n elif axis == 'y':\n R = np.array([\n c, 0, -s,\n 0, 1., 0,\n s, 0, c\n ])\n \n elif axis == 'z':\n R = np.array([\n c, s, 0,\n -s, c, 0,\n 0, 0, 1.\n ])\n \n else:\n raise ValueError('bork')\n \n return np.reshape(R, (3, 3))", "_____no_output_____" ], [ "def ugh(m31_ra_rad, m31_dec_rad, m31_distance_kpc, r, vrad, vtan):\n galcen_frame = coord.Galactocentric()\n \n # tangent bases: ra, dec, r\n M = np.array([\n [-np.sin(m31_ra_rad), np.cos(m31_ra_rad), 0.],\n [-np.sin(m31_dec_rad)*np.cos(m31_ra_rad), -np.sin(m31_dec_rad)*np.sin(m31_ra_rad), np.cos(m31_dec_rad)],\n [np.cos(m31_dec_rad)*np.cos(m31_ra_rad), np.cos(m31_dec_rad)*np.sin(m31_ra_rad), np.sin(m31_dec_rad)]\n ])\n\n # Matrix to go from ICRS to Galactocentric\n R_I2G, offset_I2G = coord.builtin_frames.galactocentric.get_matrix_vectors(\n galcen_frame, inverse=False)\n dxyz_I2G = offset_I2G.xyz.to_value(usys['length'])\n dvxyz_I2G = offset_I2G.differentials['s'].d_xyz.to_value(usys['velocity'])\n\n # Matrix to go from Galactocentric to ICRS\n R_G2I, offset_G2I = coord.builtin_frames.galactocentric.get_matrix_vectors(\n galcen_frame, inverse=True)\n dxyz_G2I = offset_G2I.xyz.to_value(usys['length'])\n dvxyz_G2I = offset_G2I.differentials['s'].d_xyz.to_value(usys['velocity'])\n \n m31_icrs_xyz = tt_sph_to_xyz(m31_distance_kpc, \n m31_ra_rad, m31_dec_rad)\n m31_galcen_xyz = np.dot(R_I2G, m31_icrs_xyz) + dxyz_I2G\n m31_galcen_lon = np.arctan2(m31_galcen_xyz[1], m31_galcen_xyz[0])\n m31_galcen_lat = np.arcsin(m31_galcen_xyz[2] / r)\n\n xhat = m31_galcen_xyz / r\n\n Rz = tt_rotation_matrix(-m31_galcen_lon, 'z')\n print(gc_Ry)\n Ry = tt_rotation_matrix(m31_galcen_lat, 'y')\n print(Ry)\n Rx = tt_rotation_matrix(alpha, 'x')\n yhat = np.dot(np.dot(Rz, np.dot(Ry, Rx)), [0, 1, 0.])\n zhat = tt_cross(xhat, yhat)\n R_LGtoG = np.stack((xhat, yhat, zhat), axis=1)\n print(R_LGtoG - R)\n\n x_LG = np.array([r, 0., 0.])\n v_LG = np.array([vrad, vtan, 0.])\n\n x_I = np.dot(R_G2I, np.dot(R_LGtoG, x_LG)) + dxyz_G2I\n v_I = np.dot(R_G2I, np.dot(R_LGtoG, v_LG)) + dvxyz_G2I\n v_I_tangent_plane = np.dot(M, v_I) # alpha, delta, radial\n \n shit1 = coord.CartesianRepresentation(*((R @ x_LG) * usys['length']))\n shit2 = coord.CartesianDifferential(*((R @ v_LG) * usys['velocity']))\n shit = coord.SkyCoord(shit1.with_differentials(shit2), frame=coord.Galactocentric())\n \n return x_I, v_I, shit.transform_to(coord.ICRS()).velocity", "_____no_output_____" ], [ "ugh(fake_icrs.ra.radian, fake_icrs.dec.radian, fake_icrs.distance.to_value(u.kpc), \n r.decompose(usys).value, vrad.decompose(usys).value, vtan.decompose(usys).value)", "[[ 0.93103307 0. 0.36493481]\n [ 0. 1. 0. ]\n [-0.36493481 0. 0.93103307]]\n[[ 0.93103307 0. 0.36493481]\n [ 0. 1. 0. ]\n [-0.36493481 0. 0.93103307]]\n[[-5.55111512e-17 -1.11022302e-16 0.00000000e+00]\n [ 1.11022302e-16 1.11022302e-16 -5.89805982e-17]\n [ 1.11022302e-16 0.00000000e+00 1.11022302e-16]]\n" ], [ "fake_icrs.velocity", "_____no_output_____" ], [ "def ugh2():\n galcen_frame = coord.Galactocentric()\n\n # Matrix to go from ICRS to Galactocentric\n R_I2G, offset_I2G = coord.builtin_frames.galactocentric.get_matrix_vectors(\n galcen_frame, inverse=False)\n dxyz_I2G = offset_I2G.xyz.to_value(usys['length'])\n dvxyz_I2G = offset_I2G.differentials['s'].d_xyz.to_value(usys['velocity'])\n\n # Matrix to go from Galactocentric to ICRS\n R_G2I, offset_G2I = coord.builtin_frames.galactocentric.get_matrix_vectors(\n galcen_frame, inverse=True)\n dxyz_G2I = offset_G2I.xyz.to_value(usys['length'])\n dvxyz_G2I = offset_G2I.differentials['s'].d_xyz.to_value(usys['velocity'])\n \n m31_icrs_xyz = tt_sph_to_xyz(m31_distance_kpc, \n m31_ra_rad, m31_dec_rad)\n m31_galcen_xyz = np.dot(R_I2G, m31_icrs_xyz) + dxyz_I2G\n m31_galcen_lon = np.arctan2(m31_galcen_xyz[1], m31_galcen_xyz[0])\n m31_galcen_lat = np.arcsin(m31_galcen_xyz[2] / r)\n\n xhat = m31_galcen_xyz / r\n\n Rz = tt_rotation_matrix(-m31_galcen_lon, 'z')\n Ry = tt_rotation_matrix(m31_galcen_lat, 'y')\n Rx = tt_rotation_matrix(alpha, 'x')\n yhat = np.dot(np.dot(Rz, np.dot(Ry, Rx)), [0, 1, 0.])\n zhat = tt_cross(xhat, yhat)\n R_LGtoG = np.stack((xhat, yhat, zhat), axis=1)\n\n x_LG = np.array([r, 0., 0.])\n v_LG = np.array([vrad, vtan, 0.])\n\n x_I = np.dot(R_G2I, np.dot(R_LGtoG, x_LG)) + dxyz_G2I\n v_I = np.dot(R_G2I, np.dot(R_LGtoG, v_LG)) + dvxyz_G2I\n v_I_tangent_plane = np.dot(M, v_I) # alpha, delta, radial\n \n shit1 = coord.CartesianRepresentation(*((R @ x_LG) * usys['length']))\n shit2 = coord.CartesianDifferential(*((R @ v_LG) * usys['velocity']))\n shit = coord.SkyCoord(shit1.with_differentials(shit2), frame=coord.Galactocentric())\n \n return x_I, v_I, shit.transform_to(coord.ICRS()).velocity", "_____no_output_____" ] ], [ [ "## Write data to files:", "_____no_output_____" ] ], [ [ "rng = np.random.default_rng(seed=42)\n\ndist_err = 11. * u.kpc\npmra_err = 3 * u.microarcsecond / u.yr\npmdec_err = 4 * u.microarcsecond / u.yr\nrv_err = 2. * u.km/u.s\nt_err = 0.11 * u.Gyr\n\ntbl = {}\ntbl['ra'] = u.Quantity(fake_icrs.ra)\ntbl['dec'] = u.Quantity(fake_icrs.dec)\n\ntbl['distance'] = rng.normal(fake_icrs.distance.to_value(u.kpc),\n dist_err.to_value(u.kpc)) * u.kpc\ntbl['distance_err'] = dist_err\n\ntbl['pm_ra_cosdec'] = rng.normal(\n fake_icrs.pm_ra_cosdec.to_value(pmra_err.unit),\n pmra_err.value) * pmra_err.unit\ntbl['pm_ra_cosdec_err'] = pmra_err\n\ntbl['pm_dec'] = rng.normal(\n fake_icrs.pm_dec.to_value(pmdec_err.unit),\n pmdec_err.value) * pmdec_err.unit\ntbl['pm_dec_err'] = pmdec_err\n\ntbl['radial_velocity'] = rng.normal(\n fake_icrs.radial_velocity.to_value(rv_err.unit),\n rv_err.value) * rv_err.unit\ntbl['radial_velocity_err'] = rv_err\n\ntbl['tperi'] = rng.normal(\n time.to_value(t_err.unit),\n t_err.value) * t_err.unit\ntbl['tperi_err'] = t_err\n\nt = at.QTable({k: [] * tbl[k].unit for k in tbl})\nt.add_row(tbl)\n\nt.meta['title'] = 'Simulated Two-body'\n\nt.write('../datasets/apw-simulated.ecsv', overwrite=True)", "_____no_output_____" ], [ "rng = np.random.default_rng(seed=42)\n\ndist_err = 1. * u.kpc\npmra_err = 0.1 * u.microarcsecond / u.yr\npmdec_err = 0.1 * u.microarcsecond / u.yr\nrv_err = 0.1 * u.km/u.s\nt_err = 0.02 * u.Gyr\n\ntbl = {}\ntbl['ra'] = u.Quantity(fake_icrs.ra)\ntbl['dec'] = u.Quantity(fake_icrs.dec)\n\ntbl['distance'] = rng.normal(fake_icrs.distance.to_value(u.kpc),\n dist_err.to_value(u.kpc)) * u.kpc\ntbl['distance_err'] = dist_err\n\ntbl['pm_ra_cosdec'] = rng.normal(\n fake_icrs.pm_ra_cosdec.to_value(pmra_err.unit),\n pmra_err.value) * pmra_err.unit\ntbl['pm_ra_cosdec_err'] = pmra_err\n\ntbl['pm_dec'] = rng.normal(\n fake_icrs.pm_dec.to_value(pmdec_err.unit),\n pmdec_err.value) * pmdec_err.unit\ntbl['pm_dec_err'] = pmdec_err\n\ntbl['radial_velocity'] = rng.normal(\n fake_icrs.radial_velocity.to_value(rv_err.unit),\n rv_err.value) * rv_err.unit\ntbl['radial_velocity_err'] = rv_err\n\ntbl['tperi'] = rng.normal(\n time.to_value(t_err.unit),\n t_err.value) * t_err.unit\ntbl['tperi_err'] = t_err\n\nt = at.QTable({k: [] * tbl[k].unit for k in tbl})\nt.add_row(tbl)\n\nt.meta['title'] = 'Simulated Two-body - precise'\n\nt.write('../datasets/apw-simulated-precise.ecsv', overwrite=True)", "_____no_output_____" ], [ "rng = np.random.default_rng(42)\n\ntbl = {}\n\nvrad_err = 1 * u.km/u.s\nvtan_err = 1 * u.km/u.s\nt_err = 0.1 * u.Gyr\nr_err = 1 * u.kpc\n\ntbl['vrad'] = rng.normal(\n vrad.to_value(vrad_err.unit),\n vrad_err.value) * vrad_err.unit\ntbl['vrad_err'] = vrad_err\n\ntbl['vtan'] = rng.normal(\n vtan.to_value(vtan_err.unit),\n vtan_err.value) * vtan_err.unit\ntbl['vtan_err'] = vtan_err\n\ntbl['r'] = rng.normal(\n r.to_value(r_err.unit),\n r_err.value) * r_err.unit\ntbl['r_err'] = r_err\n\ntbl['tperi'] = rng.normal(\n time.to_value(t_err.unit),\n t_err.value) * t_err.unit\ntbl['tperi_err'] = t_err\n\nt = at.QTable({k: [] * tbl[k].unit for k in tbl})\nt.add_row(tbl)\n\nt.meta['title'] = 'Simulated Two-body - simple vrad, vtan'\n\nt.write('../datasets/apw-simulated-simple.ecsv', overwrite=True)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
cb96a1023f91ee4703c455d9d2e23b92913eafa3
232,980
ipynb
Jupyter Notebook
Quick Viz.ipynb
lucasdurand/simple-voila
616ddc317b9309c526c8032cbafdda520c52acd9
[ "MIT" ]
null
null
null
Quick Viz.ipynb
lucasdurand/simple-voila
616ddc317b9309c526c8032cbafdda520c52acd9
[ "MIT" ]
null
null
null
Quick Viz.ipynb
lucasdurand/simple-voila
616ddc317b9309c526c8032cbafdda520c52acd9
[ "MIT" ]
null
null
null
96.59204
48,252
0.569942
[ [ [ "# Popularity of Python", "_____no_output_____" ], [ "## Google Trends", "_____no_output_____" ] ], [ [ "from pytrends.request import TrendReq\npytrends = TrendReq(hl='en-US', timeout=(10,30))", "_____no_output_____" ], [ "kw_list = [\"Python\",\"Java\",\"Javascript\",\"C++\",\".NET\"]", "_____no_output_____" ], [ "def get_trends(kw_list=kw_list, cat=0):\n pytrends.build_payload(kw_list, cat=cat, timeframe='all', geo='', gprop='')\n trends = pytrends.interest_over_time()\n return trends", "_____no_output_____" ], [ "get_trends().plot()", "BokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\nBokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\nBokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\nBokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\nBokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\n" ], [ "pytrends.categories()", "_____no_output_____" ], [ "trends = get_trends(cat=31)", "_____no_output_____" ], [ "import pandas as pd\npd.set_option('plotting.backend', 'pandas_bokeh')\npd.plotting.output_notebook()", "_____no_output_____" ], [ "trends.plot();", "BokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\nBokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\nBokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\nBokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\nBokehDeprecationWarning: 'legend' keyword is deprecated, use explicit 'legend_label', 'legend_field', or 'legend_group' keywords instead\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb96a2946c8252f774ae7207f4c30bf4eeefd8ef
134,487
ipynb
Jupyter Notebook
Chapter03/deployModel.ipynb
paulgholin/PyTorch-Computer-Vision-Cookbook
5163c26ccf9f8f70cef847b8efb75e60ae28892b
[ "MIT" ]
null
null
null
Chapter03/deployModel.ipynb
paulgholin/PyTorch-Computer-Vision-Cookbook
5163c26ccf9f8f70cef847b8efb75e60ae28892b
[ "MIT" ]
null
null
null
Chapter03/deployModel.ipynb
paulgholin/PyTorch-Computer-Vision-Cookbook
5163c26ccf9f8f70cef847b8efb75e60ae28892b
[ "MIT" ]
null
null
null
362.498652
125,388
0.935369
[ [ [ "from torch import nn\nfrom torchvision import models\n\n# load model\nmodel_resnet18 = models.resnet18(pretrained=False)\nnum_ftrs = model_resnet18.fc.in_features\n# change last layer\nnum_classes=10\nmodel_resnet18.fc = nn.Linear(num_ftrs, num_classes)", "_____no_output_____" ], [ "import torch \n\n# load state_dict into model\npath2weights=\"./models/resnet18_pretrained.pt\"\nmodel_resnet18.load_state_dict(torch.load(path2weights))", "_____no_output_____" ], [ "# set model in evaluation mode\nmodel_resnet18.eval()", "_____no_output_____" ], [ "# move model to cuda/gpu device\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\n model_resnet18=model_resnet18.to(device)", "_____no_output_____" ], [ "def deploy_model(model,dataset,device, num_classes=10,sanity_check=False):\n\n len_data=len(dataset)\n \n # initialize output tensor on CPU: due to GPU memory limits\n y_out=torch.zeros(len_data,num_classes)\n \n # initialize ground truth on CPU: due to GPU memory limits\n y_gt=np.zeros((len_data),dtype=\"uint8\")\n \n # move model to device\n model=model.to(device)\n \n elapsed_times=[]\n with torch.no_grad():\n for i in range(len_data):\n x,y=dataset[i]\n y_gt[i]=y\n start=time.time() \n yy=model(x.unsqueeze(0).to(device))\n y_out[i]=torch.softmax(yy,dim=1)\n elapsed=time.time()-start\n elapsed_times.append(elapsed)\n\n if sanity_check is True:\n break\n\n inference_time=np.mean(elapsed_times)*1000\n print(\"average inference time per image on %s: %.2f ms \" %(device,inference_time))\n return y_out.numpy(),y_gt", "_____no_output_____" ] ], [ [ "## Loading Test Dataset", "_____no_output_____" ] ], [ [ "from torchvision import datasets\nimport torchvision.transforms as transforms\n\n# define transformation\ndata_transformer = transforms.Compose([transforms.ToTensor()])\n\npath2data=\"./data\"\n\n# loading data\ntest0_ds=datasets.STL10(path2data, split='test', download=True,transform=data_transformer)\nprint(test0_ds.data.shape)", "Files already downloaded and verified\n(8000, 3, 96, 96)\n" ], [ "from sklearn.model_selection import StratifiedShuffleSplit\n\nsss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)\n\nindices=list(range(len(test0_ds)))\ny_test0=[y for _,y in test0_ds]\nfor test_index, val_index in sss.split(indices, y_test0):\n print(\"test:\", test_index, \"val:\", val_index)\n print(len(val_index),len(test_index))", "test: [2096 4321 2767 ... 3206 3910 2902] val: [6332 6852 1532 ... 5766 4469 1011]\n1600 6400\n" ], [ "from torch.utils.data import Subset\n\nval_ds=Subset(test0_ds,val_index)\ntest_ds=Subset(test0_ds,test_index)", "_____no_output_____" ], [ "mean=[0.4467106, 0.43980986, 0.40664646]\nstd=[0.22414584,0.22148906,0.22389975]", "_____no_output_____" ], [ "test0_transformer = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean, std),\n ]) ", "_____no_output_____" ], [ "test0_ds.transform=test0_transformer", "_____no_output_____" ], [ "import time\nimport numpy as np\n\n# deploy model \ny_out,y_gt=deploy_model(model_resnet18,val_ds,device=device,sanity_check=False)\nprint(y_out.shape,y_gt.shape)\n", "average inference time per image on cuda: 3.62 ms \n(1600, 10) (1600,)\n" ], [ "from sklearn.metrics import accuracy_score\n\n# get predictions\ny_pred = np.argmax(y_out,axis=1)\nprint(y_pred.shape,y_gt.shape)\n\n# compute accuracy \nacc=accuracy_score(y_pred,y_gt)\nprint(\"accuracy: %.2f\" %acc)\n", "(1600,) (1600,)\naccuracy: 0.88\n" ], [ "y_out,y_gt=deploy_model(model_resnet18,test_ds,device=device)\n\ny_pred = np.argmax(y_out,axis=1)\nacc=accuracy_score(y_pred,y_gt)\nprint(acc)", "average inference time per image on cuda: 3.53 ms \n0.87828125\n" ], [ "from torchvision import utils\nimport matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\nnp.random.seed(1)\n\ndef imshow(inp, title=None):\n mean=[0.4467106, 0.43980986, 0.40664646]\n std=[0.22414584,0.22148906,0.22389975]\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array(mean)\n std = np.array(std)\n inp = std * inp + mean\n inp = np.clip(inp, 0, 1)\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # pause a bit so that plots are updated \n\ngrid_size=4\nrnd_inds=np.random.randint(0,len(test_ds),grid_size)\nprint(\"image indices:\",rnd_inds)\n\nx_grid_test=[test_ds[i][0] for i in rnd_inds]\ny_grid_test=[(y_pred[i],y_gt[i]) for i in rnd_inds]\n\nx_grid_test=utils.make_grid(x_grid_test, nrow=4, padding=2)\nprint(x_grid_test.shape)\n\nplt.rcParams['figure.figsize'] = (10, 5)\nimshow(x_grid_test,y_grid_test)", "image indices: [5157 235 3980 5192]\ntorch.Size([3, 100, 394])\n" ], [ "device_cpu = torch.device(\"cpu\")\ny_out,y_gt=deploy_model(model_resnet18,val_ds,device=device_cpu,sanity_check=False)\nprint(y_out.shape,y_gt.shape)", "average inference time per image on cpu: 13.95 ms \n(1600, 10) (1600,)\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb96a9d8225919569071770738b311742af9b3bb
6,848
ipynb
Jupyter Notebook
docs/dialect/graphblas_dialect_tutorials/graphblas_optimize/fuse_multiply_apply.ipynb
chelini/mlir-graphblas-1
b5b957065b3bda8f7f5ce9a1a742606261b7a4c0
[ "Apache-2.0" ]
null
null
null
docs/dialect/graphblas_dialect_tutorials/graphblas_optimize/fuse_multiply_apply.ipynb
chelini/mlir-graphblas-1
b5b957065b3bda8f7f5ce9a1a742606261b7a4c0
[ "Apache-2.0" ]
null
null
null
docs/dialect/graphblas_dialect_tutorials/graphblas_optimize/fuse_multiply_apply.ipynb
chelini/mlir-graphblas-1
b5b957065b3bda8f7f5ce9a1a742606261b7a4c0
[ "Apache-2.0" ]
null
null
null
36.620321
273
0.558995
[ [ [ "# Fusing graphblas.matrix_multiply with graphblas.matrix_apply\n\nThis example will go over how to use the `--graphblas-optimize` pass from `graphblas-opt` to fuse `graphblas.matrix_multiply` ops with `graphblas.matrix_apply` ops into `graphblas.matrix_multiply` ops with a region attached.\n\nLet's first import some necessary libraries.", "_____no_output_____" ] ], [ [ "import tempfile\nfrom mlir_graphblas.cli import GRAPHBLAS_OPT_EXE", "_____no_output_____" ] ], [ [ "Since [sparse tensor encodings](https://mlir.llvm.org/docs/Dialects/SparseTensorOps/#sparsetensorencodingattr) can be very verbose in MLIR, let's write some helpers to make the MLIR code more readable.", "_____no_output_____" ] ], [ [ "def tersify_mlir(input_string: str) -> str:\n terse_string = input_string\n terse_string = terse_string.replace(\n '''#sparse_tensor.encoding<{ '''\n '''dimLevelType = [ \"dense\", \"compressed\" ], '''\n '''dimOrdering = affine_map<(d0, d1) -> (d0, d1)>, '''\n '''pointerBitWidth = 64, '''\n '''indexBitWidth = 64 '''\n '''}>''', \n \"#CSR64\")\n terse_string = terse_string.replace(\n '''#sparse_tensor.encoding<{ '''\n '''dimLevelType = [ \"dense\", \"compressed\" ], '''\n '''dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, '''\n '''pointerBitWidth = 64, '''\n '''indexBitWidth = 64 '''\n '''}>''', \n \"#CSC64\")\n return terse_string", "_____no_output_____" ] ], [ [ "## Fusion Details\n\nRecall that `graphblas.matrix_multiply` can take an optional region, e.g. this code squares each element of the matrix multiply product:\n```\n%answer = graphblas.matrix_multiply %argA, %argB { semiring = \"plus_times\" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64> {\n ^bb0(%value: f64):\n %result = std.mulf %value, %value: f64\n graphblas.yield %result : f64\n }\n```\n\nSince `graphblas.matrix_apply` ops only change tensors in an element-wise fashion, we can perform these element-wise changes in the region of a `graphblas.matrix_multiply` op if the `graphblas.matrix_apply` op is run on the result of a `graphblas.matrix_multiply` op.", "_____no_output_____" ], [ "## Simple Fusion\n\nHere, we'll show the simplest example of how we can fuse a `graphblas.matrix_multiply` op with a `graphblas.matrix_apply` op.", "_____no_output_____" ] ], [ [ "mlir_text = \"\"\"\n#CSR64 = #sparse_tensor.encoding<{\n dimLevelType = [ \"dense\", \"compressed\" ],\n dimOrdering = affine_map<(i,j) -> (i,j)>,\n pointerBitWidth = 64,\n indexBitWidth = 64\n}>\n\n#CSC64 = #sparse_tensor.encoding<{\n dimLevelType = [ \"dense\", \"compressed\" ],\n dimOrdering = affine_map<(i,j) -> (j,i)>,\n pointerBitWidth = 64,\n indexBitWidth = 64\n}>\n\nfunc @fuse_adjacent(%A: tensor<?x?xf64, #CSR64>, %B: tensor<?x?xf64, #CSC64>, %thunk: f64) -> tensor<?x?xf64, #CSR64> {\n %C = graphblas.matrix_multiply %A, %B { semiring = \"plus_plus\" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64> \n %apply_result = graphblas.matrix_apply %C, %thunk { apply_operator = \"min\" } : (tensor<?x?xf64, #CSR64>, f64) to tensor<?x?xf64, #CSR64>\n return %apply_result : tensor<?x?xf64, #CSR64>\n}\n\"\"\"\n\nwith tempfile.NamedTemporaryFile() as temp:\n temp_file_name = temp.name\n with open(temp_file_name, 'w') as f:\n f.write(mlir_text)\n temp.flush()\n\n output_mlir = ! cat $temp_file_name | $GRAPHBLAS_OPT_EXE --graphblas-optimize\n output_mlir = \"\\n\".join(output_mlir)\n output_mlir = tersify_mlir(output_mlir)\n\nprint(output_mlir)", "module {\n func @fuse_adjacent(%arg0: tensor<?x?xf64, #CSR64>, %arg1: tensor<?x?xf64, #CSC64>, %arg2: f64) -> tensor<?x?xf64, #CSR64> {\n %0 = graphblas.matrix_multiply %arg0, %arg1 {semiring = \"plus_plus\"} : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64> {\n ^bb0(%arg3: f64): // no predecessors\n %1 = cmpf olt, %arg3, %arg2 : f64\n %2 = select %1, %arg3, %arg2 : f64\n graphblas.yield %2 : f64\n }\n return %0 : tensor<?x?xf64, #CSR64>\n }\n}\n\n" ] ], [ [ "The code in the region attached to the `graphblas.matrix_multiply` in the lowered MLIR here may seem confusing at first, but it's simply calculating the minimum of each element (i.e. `%arg3`) and the thunk (i.e. `%thunk` or `%arg2`).\n\nIt's noteworthy that this fusion also works if the `graphblas.matrix_multiply` use takes a mask. Rather than explicitly demonstrating this, we'll leave it as an exercise for the reader as it's a fairly straightforward. \n\nSimilar to our previous `graphblas.matrix_multiply_reduce_to_scalar` examples, if the intermediate result from the `graphblas.matrix_multiply` op is used in other places outside of the `graphblas.matrix_apply` op, this fusion cannot apply. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb96ac93524f713579a02fcbe5e5bf3146938277
16,301
ipynb
Jupyter Notebook
cgatpipelines/tools/pipeline_docs/pipeline_peakcalling/notebooks/template_peakcalling_filtering_Report_insert_sizes.ipynb
kevinrue/cgat-flow
02b5a1867253c2f6fd6b4f3763e0299115378913
[ "MIT" ]
11
2018-09-07T11:33:23.000Z
2022-01-07T12:16:11.000Z
cgatpipelines/tools/pipeline_docs/pipeline_peakcalling/notebooks/template_peakcalling_filtering_Report_insert_sizes.ipynb
kevinrue/cgat-flow
02b5a1867253c2f6fd6b4f3763e0299115378913
[ "MIT" ]
102
2018-03-22T15:35:26.000Z
2022-03-23T17:46:16.000Z
cgatpipelines/tools/pipeline_docs/pipeline_peakcalling/notebooks/template_peakcalling_filtering_Report_insert_sizes.ipynb
kevinrue/cgat-flow
02b5a1867253c2f6fd6b4f3763e0299115378913
[ "MIT" ]
7
2018-06-11T15:01:41.000Z
2020-03-31T09:29:33.000Z
33.541152
365
0.597693
[ [ [ "Peakcalling Bam Stats and Filtering Report - Insert Sizes\n================================================================\n\nThis notebook is for the analysis of outputs from the peakcalling pipeline \n\nThere are severals stats that you want collected and graphed (topics covered in this notebook in bold).\n\nThese are: \n\n- how many reads input\n- how many reads removed at each step (numbers and percentages)\n- how many reads left after filtering\n- inset size distribution pre filtering for PE reads \n- how many reads mapping to each chromosome before filtering? \n- how many reads mapping to each chromosome after filtering?\n- X:Y reads ratio \n- **inset size distribution after filtering for PE reads** \n- samtools flags - check how many reads are in categories they shouldn't be \n- picard stats - check how many reads are in categories they shouldn't be \n\n\nThis notebook takes the sqlite3 database created by cgat peakcalling_pipeline.py and uses it for plotting the above statistics \n\nIt assumes a file directory of: \n\n location of database = project_folder/csvdb\n\n location of this notebook = project_folder/notebooks.dir/", "_____no_output_____" ], [ "Firstly lets load all the things that might be needed", "_____no_output_____" ], [ "Insert size distribution\n------------------------\nThis section get the size distribution of the fragements that have been sequeced in paired-end sequencing. The pipeline calculates the size distribution by caluculating the distance between the most 5' possition of both reads, for those mapping to the + stand this is the left-post possition, for those mapping to the - strand is the rightmost coordinate. \n\nThis plot is especially useful for ATAC-Seq experiments as good samples should show peaks with a period approximately equivelent to the length of a nucleosome (~ 146bp) a lack of this phasing might indicate poor quality samples and either over (if lots of small fragments) or under intergration (if an excess of large fragments) of the topoisomerase. ", "_____no_output_____" ] ], [ [ "import sqlite3\n\nimport pandas as pd\nimport numpy as np\n%matplotlib inline\n\nimport matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\n#import cgatcore.pipeline as P\nimport os\nimport statistics\n#import collections\n#load R and the R packages required\n#%load_ext rpy2.ipython\n#%R require(ggplot2)\n\n# use these functions to display tables nicely as html \nfrom IPython.display import display, HTML\nplt.style.use('ggplot')\n#plt.style.available", "_____no_output_____" ] ], [ [ "This is where we are and when the notebook was run\n", "_____no_output_____" ] ], [ [ "!pwd\n!date", "_____no_output_____" ] ], [ [ "First lets set the output path for where we want our plots to be saved and the database path and see what tables it contains", "_____no_output_____" ] ], [ [ "database_path = '../csvdb'\noutput_path = '.'\n#database_path= \"/ifs/projects/charlotteg/pipeline_peakcalling/csvdb\"", "_____no_output_____" ] ], [ [ "This code adds a button to see/hide code in html ", "_____no_output_____" ] ], [ [ "\nHTML('''<script>\ncode_show=true; \nfunction code_toggle() {\n if (code_show){\n $('div.input').hide();\n } else {\n $('div.input').show();\n }\n code_show = !code_show\n} \n$( document ).ready(code_toggle);\n</script>\n<form action=\"javascript:code_toggle()\"><input type=\"submit\" value=\"Click here to toggle on/off the raw code.\"></form>''')\n", "_____no_output_____" ] ], [ [ "The code below provides functions for accessing the project database and extract a table names so you can see what tables have been loaded into the database and are available for plotting. It also has a function for geting table from the database and indexing the table with the track name", "_____no_output_____" ] ], [ [ "def getTableNamesFromDB(database_path):\n # Create a SQL connection to our SQLite database\n con = sqlite3.connect(database_path)\n cur = con.cursor()\n # the result of a \"cursor.execute\" can be iterated over by row\n cur.execute(\"SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;\")\n available_tables = (cur.fetchall())\n #Be sure to close the connection.\n con.close()\n return available_tables\n\ndb_tables = getTableNamesFromDB(database_path)\nprint('Tables contained by the database:')\nfor x in db_tables: \n print('\\t\\t%s' % x[0])\n \n#This function retrieves a table from sql database and indexes it with track name\ndef getTableFromDB(statement,database_path):\n '''gets table from sql database depending on statement\n and set track as index if contains track in column names'''\n conn = sqlite3.connect(database_path)\n df = pd.read_sql_query(statement,conn)\n if 'track' in df.columns:\n df.index = df['track']\n return df", "_____no_output_____" ] ], [ [ "Insert Size Summary\n====================", "_____no_output_____" ], [ "1) lets getthe insert_sizes table from database\n\nFirsly lets look at the summary statistics that us the mean fragment size, sequencing type and mean read length. This table is produced using macs2 for PE data, or bamtools for SE data \n\n\nIf IDR has been run the insert_size table will contain entries for the pooled and pseudo replicates too - we don't really want this as it will duplicate the data from the origional samples so we subset this out ", "_____no_output_____" ] ], [ [ "insert_df = getTableFromDB('select * from insert_sizes;',database_path)\ninsert_df = insert_df[insert_df[\"filename\"].str.contains('pseudo')==False].copy()\ninsert_df = insert_df[insert_df[\"filename\"].str.contains('pooled')==False].copy()", "_____no_output_____" ], [ "def add_expt_to_insertdf(dataframe):\n ''' splits track name for example HsTh1-RATotal-R1.star into expt\n featues, expt, sample_treatment and replicate and adds these as \n collumns to the dataframe'''\n expt = []\n treatment = []\n replicate = []\n for value in dataframe.filename:\n x = value.split('/')[-1]\n x = x.split('_insert')[0]\n # split into design features\n y = x.split('-')\n expt.append(y[-3])\n treatment.append(y[-2])\n replicate.append(y[-1])\n\n if len(expt) == len(treatment) and len(expt)== len(replicate):\n print ('all values in list correctly')\n else:\n print ('error in loading values into lists')\n\n #add collums to dataframe \n dataframe['expt_name'] = expt\n dataframe['sample_treatment'] = treatment\n dataframe['replicate'] = replicate\n\n return dataframe\n\ninsert_df = add_expt_to_insertdf(insert_df)\ninsert_df", "_____no_output_____" ] ], [ [ "lets graph the fragment length mean and tag size grouped by sample so we can see if they are much different", "_____no_output_____" ] ], [ [ "ax = insert_df.boxplot(column='fragmentsize_mean', by='sample_treatment')\nax.set_title('for mean fragment size',size=10)\nax.set_ylabel('mean fragment length')\nax.set_xlabel('sample treatment')\n\nax = insert_df.boxplot(column='tagsize', by='sample_treatment')\nax.set_title('for tag size',size=10)\nax.set_ylabel('tag size')\nax.set_xlabel('sample treatment')\nax.set_ylim(((insert_df.tagsize.min()-2),(insert_df.tagsize.max()+2)))", "_____no_output_____" ] ], [ [ "Ok now get get the fragment length distributiions for each sample and plot them ", "_____no_output_____" ] ], [ [ "def getFraglengthTables(database_path):\n '''Takes path to sqlite3 database and retrieves fraglengths tables for individual samples\n , returns a dictionary where keys = sample table names, values = fraglengths dataframe'''\n frag_tabs = []\n db_tables = getTableNamesFromDB(database_path)\n for table_name in db_tables:\n if 'fraglengths' in str(table_name[0]):\n tab_name = str(table_name[0])\n statement ='select * from %s;' % tab_name\n df = getTableFromDB(statement,database_path)\n frag_tabs.append((tab_name,df))\n print('detected fragment length distribution tables for %s files: \\n' % len(frag_tabs))\n for val in frag_tabs:\n print(val[0])\n return frag_tabs\n\ndef getDFofFragLengths(database_path):\n ''' this takes a path to database and gets a dataframe where length of fragments is the index,\n each column is a sample and values are the number of reads that have that fragment length in that \n sample\n '''\n fraglength_dfs_list = getFraglengthTables(database_path)\n dfs=[]\n for item in fraglength_dfs_list:\n track = item[0].split('_filtered_fraglengths')[0]\n df = item[1]\n #rename collumns so that they are correct - correct this in the pipeline then delete this\n #df.rename(columns={'frequency':'frag_length', 'frag_length':'frequency'}, inplace=True)\n df.index = df.frag_length\n df.drop('frag_length',axis=1,inplace=True)\n df.rename(columns={'frequency':track},inplace=True)\n dfs.append(df)\n \n frag_length_df = pd.concat(dfs,axis=1)\n frag_length_df.fillna(0, inplace=True)\n return frag_length_df\n\n\n#Note the frequency and fragment lengths are around the wrong way! \n#frequency is actually fragment length, and fragement length is the frequency \n\n#This gets the tables from db and makes master df of all fragment length frequencies \nfrag_length_df = getDFofFragLengths(database_path)\n\n#plot fragment length frequencies \nax = frag_length_df.divide(1000).plot()\nax.set_ylabel('Number of fragments\\n(thousands)')\nax.legend(loc=2,bbox_to_anchor=(1.05, 1),borderaxespad=0. )\nax.set_title('fragment length distribution')\nax.set_xlabel('fragment length (bp)')\nax.set_xlim()\n\n", "_____no_output_____" ] ], [ [ "Now lets zoom in on the interesting region of the plot (the default in the code looks at fragment lengths from 0 to 800bp - you can change this below by setting the tuple in the ax.set_xlim() function", "_____no_output_____" ] ], [ [ "ax = frag_length_df.divide(1000).plot(figsize=(9,9))\nax.set_ylabel('Number of fragments\\n(thousands)')\nax.legend(loc=2,bbox_to_anchor=(1.05, 1),borderaxespad=0. )\nax.set_title('fragment length distribution')\nax.set_xlabel('fragment length (bp)')\nax.set_xlim((0,800))", "_____no_output_____" ] ], [ [ "it is a bit trickly to see differences between samples of different library sizes so lets look and see if the reads for each fragment length is similar ", "_____no_output_____" ] ], [ [ "percent_frag_length_df = pd.DataFrame(index=frag_length_df.index)\n\nfor column in frag_length_df:\n total_frags = frag_length_df[column].sum()\n percent_frag_length_df[column] = frag_length_df[column].divide(total_frags)*100\n \n\nax = percent_frag_length_df.plot(figsize=(9,9))\nax.set_ylabel('Percentage of fragments')\nax.legend(loc=2,bbox_to_anchor=(1.05, 1),borderaxespad=0. )\nax.set_title('percentage fragment length distribution')\nax.set_xlabel('fragment length (bp)')\nax.set_xlim((0,800))\n", "_____no_output_____" ] ], [ [ "SUMMARISE HERE\n==============\nFrom these plots you should be able to tell wether there are any distinctive patterns in the size of the fragment lengths,this is especially important for ATAC-Seq data as in successful experiments you should be able to detect nucleosome phasing - it can also indicate over fragmentation or biases in cutting.", "_____no_output_____" ], [ "Lets looks at the picard insert size metrics also ", "_____no_output_____" ] ], [ [ "insert_df = getTableFromDB('select * from picard_stats_insert_size_metrics;',database_path)\nfor c in insert_df.columns:\n print (c)\ninsert_df", "_____no_output_____" ] ], [ [ "These metrics are actually quite different to the ones we calculate themselves - for some reason it seems to split the files into 2 and dives a distribution for smaller fragments and for larger fragments- not sure why at the moment ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb96b70509722fac6f4edae6abcfb1025642c4ee
344,836
ipynb
Jupyter Notebook
examples/splicing_cnn_perturbed_multicell.ipynb
lafleur1/isolearn
36ae6eaa07a19f2281641ce2bd4b63f87d827e6b
[ "MIT" ]
5
2019-05-31T06:23:19.000Z
2021-03-13T05:27:31.000Z
examples/splicing_cnn_perturbed_multicell.ipynb
lafleur1/isolearn
36ae6eaa07a19f2281641ce2bd4b63f87d827e6b
[ "MIT" ]
null
null
null
examples/splicing_cnn_perturbed_multicell.ipynb
lafleur1/isolearn
36ae6eaa07a19f2281641ce2bd4b63f87d827e6b
[ "MIT" ]
5
2019-06-13T18:55:14.000Z
2022-02-08T13:32:51.000Z
530.516923
81,280
0.941987
[ [ [ "import keras\nfrom keras.models import Sequential, Model, load_model\nfrom keras.layers import Dense, Dropout, Flatten, Input, Lambda, Concatenate\nfrom keras.layers import Conv1D, MaxPooling1D\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras import backend as K\nimport keras.losses\n\nimport tensorflow as tf\n\nimport pandas as pd\n\nimport os\nimport numpy as np\n\nimport scipy.sparse as sp\nimport scipy.io as spio\n\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nimport isolearn.io as isoio\nimport isolearn.keras as iso\n\nfrom scipy.stats import pearsonr\n", "Using TensorFlow backend.\n" ] ], [ [ "<h2>Load 5' Alternative Splicing Data</h2>\n\n- Load a Pandas DataFrame + Matlab Matrix of measured Splicing Sequences<br/>\n- isolearn.io loads all .csv and .mat files of a directory into memory as a dictionary<br/>\n- The DataFrame has one column - padded_sequence - containing the splice donor sequence<br/>\n- The Matrix contains RNA-Seq counts of measured splicing at each position across the sequence<br/>\n", "_____no_output_____" ] ], [ [ "#Load Splicing Data\n\nsplicing_dict = isoio.load('data/processed_data/splicing_5ss_data/splicing_5ss_data')\n", "_____no_output_____" ] ], [ [ "<h2>Create a Training and Test Set</h2>\n\n- We create an index containing row numbers corresponding to training and test sequences<br/>\n- Notice that we do not alter the underlying DataFrame, we only make lists of pointers to rows<br/>\n", "_____no_output_____" ] ], [ [ "#Generate training, validation and test set indexes\n\nvalid_set_size = 0.10\ntest_set_size = 0.10\n\ndata_index = np.arange(len(splicing_dict['df']), dtype=np.int)\n\ntrain_index = data_index[:-int(len(data_index) * (valid_set_size + test_set_size))]\nvalid_index = data_index[train_index.shape[0]:-int(len(data_index) * test_set_size)]\ntest_index = data_index[train_index.shape[0] + valid_index.shape[0]:]\n\nprint('Training set size = ' + str(train_index.shape[0]))\nprint('Validation set size = ' + str(valid_index.shape[0]))\nprint('Test set size = ' + str(test_index.shape[0]))", "Training set size = 211718\nValidation set size = 26465\nTest set size = 26464\n" ] ], [ [ "<h2>Create Data Generators</h2>\n\n- In Isolearn, we always build data generators that will encode and feed us the data on the fly<br/>\n- Here, for example, we create a training and test generator separately (using list comprehension)<br/>\n- First argument: The list of row indices (of data points) for this generator<br/>\n- Second argument: Dictionary or data sources<br/>\n- Third argument: Batch size for the data generator\n- Fourth argument: List of inputs, where each input is specified as a dictionary of attributes<br/>\n- Fifth argument: List of outputs<br/>\n- Sixth argument: List of any randomizers (see description below)<br/>\n- Seventh argument: Shuffle the dataset or not<br/>\n- Eight argument: True if some data source matrices are in sparse format<br/>\n- Ninth argument: In Keras, we typically want to specfiy the Outputs as Inputs when training. <br/>This argument achieves this by moving the outputs over to the input list and replaces the output with a dummy encoder.<br/>\n\nIn this example, we specify a One-Hot encoder as the input encoder for the entire splice donor sequence (centered on the splice donor).<br/>\nWe also specify the target output as the normalized RNA-Seq count at position 120 in the count matrix for each cell line (4 outputs).<br/>\n\nBesides the canonical splice donor at position 120 in the sequence, there are many other splice donors inserted randomly at neighboring positions. If we wanted to learn a general model of splicing, it would be a lot better if we could stochastically \"align\" sequences on any of the possible splice donors, perturbing both the input sequence and the RNA-Seq count matrix that we estimate splice donor usage from.<br/>\n\nThis is achieved using the built-in CutAlignSampler class, which allows us to randomly sample a position in the sequence with supporting splice junction counts, and shift both the sequence and splice count vector to be centered around that position. In this example, we specfiy the sampling rate of splice donors to be 0.5 (p_pos) and the rate of sampling some other, non-splice-site, position at a rate of 0.5 (p_neg).<br/>\n", "_____no_output_____" ] ], [ [ "#Create a One-Hot data generator, to be used for a convolutional net to regress SD1 Usage\n\ntotal_cuts = splicing_dict['hek_count'] + splicing_dict['hela_count'] + splicing_dict['mcf7_count'] + splicing_dict['cho_count']\nshifter = iso.CutAlignSampler(total_cuts, 240, 120, [], 0.0, p_pos=0.5, p_neg=0.5, sparse_source=True)\n\nsplicing_gens = {\n gen_id : iso.DataGenerator(\n idx,\n {\n 'df' : splicing_dict['df'],\n 'hek_count' : splicing_dict['hek_count'],\n 'hela_count' : splicing_dict['hela_count'],\n 'mcf7_count' : splicing_dict['mcf7_count'],\n 'cho_count' : splicing_dict['cho_count'],\n },\n batch_size=32,\n inputs = [\n {\n 'id' : 'seq',\n 'source_type' : 'dataframe',\n 'source' : 'df',\n 'extractor' : iso.SequenceExtractor('padded_sequence', start_pos=0, end_pos=240, shifter=shifter if gen_id == 'train' else None),\n 'encoder' : iso.OneHotEncoder(seq_length=240),\n 'dim' : (240, 4),\n 'sparsify' : False\n }\n ],\n outputs = [\n {\n 'id' : cell_type + '_sd1_usage',\n 'source_type' : 'matrix',\n 'source' : cell_type + '_count',\n 'extractor' : iso.CountExtractor(start_pos=0, end_pos=240, static_poses=[-1], shifter=shifter if gen_id == 'train' else None, sparse_source=False),\n 'transformer' : lambda t: t[120] / np.sum(t)\n } for cell_type in ['hek', 'hela', 'mcf7', 'cho']\n ],\n randomizers = [shifter] if gen_id in ['train'] else [],\n shuffle = True if gen_id in ['train'] else False,\n densify_batch_matrices=True,\n move_outputs_to_inputs=True if gen_id in ['train', 'valid'] else False\n ) for gen_id, idx in [('train', train_index), ('valid', valid_index), ('test', test_index)]\n}\n", "_____no_output_____" ] ], [ [ "<h2>Keras Loss Functions</h2>\n\nHere we specfiy a few loss function (Cross-Entropy and KL-divergence) to be used when optimizing our Splicing CNN.<br/>\n", "_____no_output_____" ] ], [ [ "#Keras loss functions\n\ndef sigmoid_entropy(inputs) :\n y_true, y_pred = inputs\n y_pred = K.clip(y_pred, K.epsilon(), 1. - K.epsilon())\n \n return -K.sum(y_true * K.log(y_pred) + (1.0 - y_true) * K.log(1.0 - y_pred), axis=-1)\n\ndef mean_sigmoid_entropy(inputs) :\n y_true, y_pred = inputs\n y_pred = K.clip(y_pred, K.epsilon(), 1. - K.epsilon())\n \n return -K.mean(y_true * K.log(y_pred) + (1.0 - y_true) * K.log(1.0 - y_pred), axis=-1)\n\ndef sigmoid_kl_divergence(inputs) :\n y_true, y_pred = inputs\n y_pred = K.clip(y_pred, K.epsilon(), 1. - K.epsilon())\n y_true = K.clip(y_true, K.epsilon(), 1. - K.epsilon())\n \n return K.sum(y_true * K.log(y_true / y_pred) + (1.0 - y_true) * K.log((1.0 - y_true) / (1.0 - y_pred)), axis=-1)\n\ndef mean_sigmoid_kl_divergence(inputs) :\n y_true, y_pred = inputs\n y_pred = K.clip(y_pred, K.epsilon(), 1. - K.epsilon())\n y_true = K.clip(y_true, K.epsilon(), 1. - K.epsilon())\n \n return K.mean(y_true * K.log(y_true / y_pred) + (1.0 - y_true) * K.log((1.0 - y_true) / (1.0 - y_pred)), axis=-1)\n", "_____no_output_____" ] ], [ [ "<h2>Splicing Model Definition</h2>\n\nHere we specfiy the Keras Inputs that we expect to receive from the data generators.<br/>\nWe also define the model architecture (2 convolutional-layer CNN with MaxPooling).<br/>", "_____no_output_____" ] ], [ [ "#Splicing Model Definition (CNN)\n\n#Inputs\n\nseq_input = Input(shape=(240, 4))\n\n#Outputs\n\ntrue_usage_hek = Input(shape=(1,))\ntrue_usage_hela = Input(shape=(1,))\ntrue_usage_mcf7 = Input(shape=(1,))\ntrue_usage_cho = Input(shape=(1,))\n\n#Shared Model Definition (Applied to each randomized sequence region)\n\nlayer_1 = Conv1D(64, 8, padding='valid', activation='relu')\nlayer_1_pool = MaxPooling1D(pool_size=2)\nlayer_2 = Conv1D(128, 6, padding='valid', activation='relu')\n\ndef shared_model(seq_input) :\n return Flatten()(\n layer_2(\n layer_1_pool(\n layer_1(\n seq_input\n )\n )\n )\n )\n\n\nshared_out = shared_model(seq_input)\n\n#Layers applied to the concatenated hidden representation\n\nlayer_dense = Dense(256, activation='relu')\nlayer_drop = Dropout(0.2)\n\ndropped_dense_out = layer_drop(layer_dense(shared_out))\n\n#Final cell-line specific regression layers\n\nlayer_usage_hek = Dense(1, activation='sigmoid', kernel_initializer='zeros')\nlayer_usage_hela = Dense(1, activation='sigmoid', kernel_initializer='zeros')\nlayer_usage_mcf7 = Dense(1, activation='sigmoid', kernel_initializer='zeros')\nlayer_usage_cho = Dense(1, activation='sigmoid', kernel_initializer='zeros')\n\npred_usage_hek = layer_usage_hek(dropped_dense_out)\npred_usage_hela = layer_usage_hela(dropped_dense_out)\npred_usage_mcf7 = layer_usage_mcf7(dropped_dense_out)\npred_usage_cho = layer_usage_cho(dropped_dense_out)\n\n#Compile Splicing Model\n\nsplicing_model = Model(\n inputs=[\n seq_input\n ],\n outputs=[\n pred_usage_hek,\n pred_usage_hela,\n pred_usage_mcf7,\n pred_usage_cho\n ]\n)\n", "_____no_output_____" ] ], [ [ "<h2>Loss Model Definition</h2>\n\nHere we specfiy our loss function, and we build it as a separate Keras Model.<br/>\nIn our case, our loss model averages the KL-divergence of predicted vs. true Splice Donor Usage across the 4 different cell types.<br/>", "_____no_output_____" ] ], [ [ "#Loss Model Definition\n\nloss_hek = Lambda(sigmoid_kl_divergence, output_shape = (1,))([true_usage_hek, pred_usage_hek])\nloss_hela = Lambda(sigmoid_kl_divergence, output_shape = (1,))([true_usage_hela, pred_usage_hela])\nloss_mcf7 = Lambda(sigmoid_kl_divergence, output_shape = (1,))([true_usage_mcf7, pred_usage_mcf7])\nloss_cho = Lambda(sigmoid_kl_divergence, output_shape = (1,))([true_usage_cho, pred_usage_cho])\n\n\ntotal_loss = Lambda(\n lambda l: (l[0] + l[1] + l[2] + l[3]) / 4.,\n output_shape = (1,)\n)(\n [\n loss_hek,\n loss_hela,\n loss_mcf7,\n loss_cho\n ]\n)\n\nloss_model = Model([\n #Inputs\n seq_input,\n \n #Target SD Usages\n true_usage_hek,\n true_usage_hela,\n true_usage_mcf7,\n true_usage_cho\n], total_loss)", "_____no_output_____" ] ], [ [ "<h2>Optimize the Loss Model</h2>\n\nHere we use SGD to optimize the Loss Model (defined in the previous notebook cell).<br/>\nSince our Loss Model indirectly depends on predicted outputs from our CNN Splicing Model, SGD will optimize the weights of our CNN<br/>\n<br/>\n\nNote that we very easily pass the data generators, and run them in parallel, by simply calling Keras fit_generator.<br/>\n", "_____no_output_____" ] ], [ [ "#Optimize CNN with Keras using the Data Generators to stream genomic data features\n\nopt = keras.optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\n\nloss_model.compile(loss=lambda true, pred: pred, optimizer=opt)\n\ncallbacks =[\n EarlyStopping(monitor='val_loss', min_delta=0.001, patience=2, verbose=0, mode='auto')\n]\n\nloss_model.fit_generator(\n generator=splicing_gens['train'],\n validation_data=splicing_gens['valid'],\n epochs=10,\n use_multiprocessing=True,\n workers=4,\n callbacks=callbacks\n)\n", "Epoch 1/10\n6615/6616 [============================>.] - ETA: 0s - loss: 0.0754\n6616/6616 [==============================] - 470s 71ms/step - loss: 0.0754 - val_loss: 0.1041\nEpoch 2/10\nEpoch 1/10\n6616/6616 [==============================] - 452s 68ms/step - loss: 0.0561 - val_loss: 0.0950\nEpoch 3/10\n6616/6616 [==============================] - 449s 68ms/step - loss: 0.0536 - val_loss: 0.0928\nEpoch 4/10\n6616/6616 [==============================] - 462s 70ms/step - loss: 0.0509 - val_loss: 0.0913\nEpoch 5/10\n6616/6616 [==============================] - 466s 70ms/step - loss: 0.0497 - val_loss: 0.0912\nEpoch 6/10\n6616/6616 [==============================] - 459s 69ms/step - loss: 0.0489 - val_loss: 0.0883\nEpoch 7/10\n6616/6616 [==============================] - 455s 69ms/step - loss: 0.0482 - val_loss: 0.0881\nEpoch 8/10\n6616/6616 [==============================] - 472s 71ms/step - loss: 0.0471 - val_loss: 0.0821\nEpoch 9/10\n6616/6616 [==============================] - 475s 72ms/step - loss: 0.0467 - val_loss: 0.0855\nEpoch 10/10\n6616/6616 [==============================] - 472s 71ms/step - loss: 0.0465 - val_loss: 0.0828\n" ], [ "#Save model\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\n\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n\nmodel_name = 'splicing_cnn_perturbed_multicell.h5'\nmodel_path = os.path.join(save_dir, model_name)\nsplicing_model.save(model_path)\n\nprint('Saved trained model at %s ' % model_path)", "Saved trained model at /home/johli/isolearn/example/saved_models/splicing_cnn_perturbed_multicell.h5 \n" ], [ "#Load model\n\nsave_dir = os.path.join(os.getcwd(), 'saved_models')\nmodel_name = 'splicing_cnn_perturbed_multicell.h5'\nmodel_path = os.path.join(save_dir, model_name)\n\nsplicing_model = load_model(model_path)", "/home/johli/anaconda3/envs/aparent/lib/python3.6/site-packages/keras/engine/saving.py:292: UserWarning: No training configuration found in save file: the model was *not* compiled. Compile it manually.\n warnings.warn('No training configuration found in save file: '\n" ] ], [ [ "<h2>Evaluate the Splicing CNN</h2>\n\nHere we run our Splicing CNN on the Test set data generator (using Keras predict_generator).<br/>\nWe then compare our predictions of splice donor usage against the true RNA-Seq measurements.<br/>\n", "_____no_output_____" ] ], [ [ "#Evaluate predictions on test set\n\npredictions = splicing_model.predict_generator(splicing_gens['test'], workers=4, use_multiprocessing=True)\npred_usage_hek, pred_usage_hela, pred_usage_mcf7, pred_usage_cho = [np.ravel(prediction) for prediction in predictions]\n\ntargets = zip(*[splicing_gens['test'][i][1] for i in range(len(splicing_gens['test']))])\ntrue_usage_hek, true_usage_hela, true_usage_mcf7, true_usage_cho = [np.concatenate(list(target)) for target in targets]\n\ncell_lines = [\n ('hek', (pred_usage_hek, true_usage_hek)),\n ('hela', (pred_usage_hela, true_usage_hela)),\n ('mcf7', (pred_usage_mcf7, true_usage_mcf7)),\n ('cho', (pred_usage_cho, true_usage_cho))\n]\n\nfor cell_name, [y_true, y_pred] in cell_lines :\n \n r_val, p_val = pearsonr(y_pred, y_true)\n print(\"Test set R^2 = \" + str(round(r_val * r_val, 2)) + \", p = \" + str(p_val))\n\n #Plot test set scatter\n f = plt.figure(figsize=(4, 4))\n\n plt.scatter(y_pred, y_true, color='black', s=5, alpha=0.05)\n \n plt.xticks([0.0, 0.25, 0.5, 0.75, 1.0], fontsize=14)\n plt.yticks([0.0, 0.25, 0.5, 0.75, 1.0], fontsize=14)\n plt.xlabel('Predicted SD1 Usage', fontsize=14)\n plt.ylabel('True SD1 Usage', fontsize=14)\n plt.title(str(cell_name), fontsize=16)\n \n plt.xlim(0, 1)\n plt.ylim(0, 1)\n \n plt.tight_layout()\n plt.show()\n", "Test set R^2 = 0.86, p = 0.0\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb96c0c935e6e510439557feff38323ac116f9ee
52,164
ipynb
Jupyter Notebook
Project 2 - Image Captioning/1_Preliminaries.ipynb
taimurzahid/Computer-Vision-Nanodegree
7820ba20c979f8319145c65c823c95e8d9e46052
[ "MIT" ]
null
null
null
Project 2 - Image Captioning/1_Preliminaries.ipynb
taimurzahid/Computer-Vision-Nanodegree
7820ba20c979f8319145c65c823c95e8d9e46052
[ "MIT" ]
null
null
null
Project 2 - Image Captioning/1_Preliminaries.ipynb
taimurzahid/Computer-Vision-Nanodegree
7820ba20c979f8319145c65c823c95e8d9e46052
[ "MIT" ]
null
null
null
45.163636
704
0.553351
[ [ [ "# Computer Vision Nanodegree\n\n## Project: Image Captioning\n\n---\n\nIn this notebook, you will learn how to load and pre-process data from the [COCO dataset](http://cocodataset.org/#home). You will also design a CNN-RNN model for automatically generating image captions.\n\nNote that **any amendments that you make to this notebook will not be graded**. However, you will use the instructions provided in **Step 3** and **Step 4** to implement your own CNN encoder and RNN decoder by making amendments to the **models.py** file provided as part of this project. Your **models.py** file **will be graded**. \n\nFeel free to use the links below to navigate the notebook:\n- [Step 1](#step1): Explore the Data Loader\n- [Step 2](#step2): Use the Data Loader to Obtain Batches\n- [Step 3](#step3): Experiment with the CNN Encoder\n- [Step 4](#step4): Implement the RNN Decoder", "_____no_output_____" ], [ "<a id='step1'></a>\n## Step 1: Explore the Data Loader\n\nWe have already written a [data loader](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader) that you can use to load the COCO dataset in batches. \n\nIn the code cell below, you will initialize the data loader by using the `get_loader` function in **data_loader.py**. \n\n> For this project, you are not permitted to change the **data_loader.py** file, which must be used as-is.\n\nThe `get_loader` function takes as input a number of arguments that can be explored in **data_loader.py**. Take the time to explore these arguments now by opening **data_loader.py** in a new window. Most of the arguments must be left at their default values, and you are only allowed to amend the values of the arguments below:\n1. **`transform`** - an [image transform](http://pytorch.org/docs/master/torchvision/transforms.html) specifying how to pre-process the images and convert them to PyTorch tensors before using them as input to the CNN encoder. For now, you are encouraged to keep the transform as provided in `transform_train`. You will have the opportunity later to choose your own image transform to pre-process the COCO images.\n2. **`mode`** - one of `'train'` (loads the training data in batches) or `'test'` (for the test data). We will say that the data loader is in training or test mode, respectively. While following the instructions in this notebook, please keep the data loader in training mode by setting `mode='train'`.\n3. **`batch_size`** - determines the batch size. When training the model, this is number of image-caption pairs used to amend the model weights in each training step.\n4. **`vocab_threshold`** - the total number of times that a word must appear in the in the training captions before it is used as part of the vocabulary. Words that have fewer than `vocab_threshold` occurrences in the training captions are considered unknown words. \n5. **`vocab_from_file`** - a Boolean that decides whether to load the vocabulary from file. \n\nWe will describe the `vocab_threshold` and `vocab_from_file` arguments in more detail soon. For now, run the code cell below. Be patient - it may take a couple of minutes to run!", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append('/opt/cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\n!pip install nltk\nimport nltk\nnltk.download('punkt')\nfrom data_loader import get_loader\nfrom torchvision import transforms\n\n# Define a transform to pre-process the training images.\ntransform_train = transforms.Compose([ \n transforms.Resize(256), # smaller edge of image resized to 256\n transforms.RandomCrop(224), # get 224x224 crop from random location\n transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5\n transforms.ToTensor(), # convert the PIL Image to a tensor\n transforms.Normalize((0.485, 0.456, 0.406), # normalize image for pre-trained model\n (0.229, 0.224, 0.225))])\n\n# Set the minimum word count threshold.\nvocab_threshold = 5\n\n# Specify the batch size.\nbatch_size = 10\n\n# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=False)", "Requirement already satisfied: nltk in /opt/conda/lib/python3.6/site-packages (3.2.5)\nRequirement already satisfied: six in /opt/conda/lib/python3.6/site-packages (from nltk) (1.11.0)\n[nltk_data] Downloading package punkt to /root/nltk_data...\n[nltk_data] Unzipping tokenizers/punkt.zip.\nloading annotations into memory...\nDone (t=0.91s)\ncreating index...\nindex created!\n[0/414113] Tokenizing captions...\n[100000/414113] Tokenizing captions...\n[200000/414113] Tokenizing captions...\n[300000/414113] Tokenizing captions...\n[400000/414113] Tokenizing captions...\nloading annotations into memory...\nDone (t=0.95s)\ncreating index...\n" ] ], [ [ "When you ran the code cell above, the data loader was stored in the variable `data_loader`. \n\nYou can access the corresponding dataset as `data_loader.dataset`. This dataset is an instance of the `CoCoDataset` class in **data_loader.py**. If you are unfamiliar with data loaders and datasets, you are encouraged to review [this PyTorch tutorial](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html).\n\n### Exploring the `__getitem__` Method\n\nThe `__getitem__` method in the `CoCoDataset` class determines how an image-caption pair is pre-processed before being incorporated into a batch. This is true for all `Dataset` classes in PyTorch; if this is unfamiliar to you, please review [the tutorial linked above](http://pytorch.org/tutorials/beginner/data_loading_tutorial.html). \n\nWhen the data loader is in training mode, this method begins by first obtaining the filename (`path`) of a training image and its corresponding caption (`caption`).\n\n#### Image Pre-Processing \n\nImage pre-processing is relatively straightforward (from the `__getitem__` method in the `CoCoDataset` class):\n```python\n# Convert image to tensor and pre-process using transform\nimage = Image.open(os.path.join(self.img_folder, path)).convert('RGB')\nimage = self.transform(image)\n```\nAfter loading the image in the training folder with name `path`, the image is pre-processed using the same transform (`transform_train`) that was supplied when instantiating the data loader. \n\n#### Caption Pre-Processing \n\nThe captions also need to be pre-processed and prepped for training. In this example, for generating captions, we are aiming to create a model that predicts the next token of a sentence from previous tokens, so we turn the caption associated with any image into a list of tokenized words, before casting it to a PyTorch tensor that we can use to train the network.\n\nTo understand in more detail how COCO captions are pre-processed, we'll first need to take a look at the `vocab` instance variable of the `CoCoDataset` class. The code snippet below is pulled from the `__init__` method of the `CoCoDataset` class:\n```python\ndef __init__(self, transform, mode, batch_size, vocab_threshold, vocab_file, start_word, \n end_word, unk_word, annotations_file, vocab_from_file, img_folder):\n ...\n self.vocab = Vocabulary(vocab_threshold, vocab_file, start_word,\n end_word, unk_word, annotations_file, vocab_from_file)\n ...\n```\nFrom the code snippet above, you can see that `data_loader.dataset.vocab` is an instance of the `Vocabulary` class from **vocabulary.py**. Take the time now to verify this for yourself by looking at the full code in **data_loader.py**. \n\nWe use this instance to pre-process the COCO captions (from the `__getitem__` method in the `CoCoDataset` class):\n\n```python\n# Convert caption to tensor of word ids.\ntokens = nltk.tokenize.word_tokenize(str(caption).lower()) # line 1\ncaption = [] # line 2\ncaption.append(self.vocab(self.vocab.start_word)) # line 3\ncaption.extend([self.vocab(token) for token in tokens]) # line 4\ncaption.append(self.vocab(self.vocab.end_word)) # line 5\ncaption = torch.Tensor(caption).long() # line 6\n```\n\nAs you will see soon, this code converts any string-valued caption to a list of integers, before casting it to a PyTorch tensor. To see how this code works, we'll apply it to the sample caption in the next code cell.", "_____no_output_____" ] ], [ [ "sample_caption = 'A person doing a trick on a rail while riding a skateboard.'", "_____no_output_____" ] ], [ [ "In **`line 1`** of the code snippet, every letter in the caption is converted to lowercase, and the [`nltk.tokenize.word_tokenize`](http://www.nltk.org/) function is used to obtain a list of string-valued tokens. Run the next code cell to visualize the effect on `sample_caption`.", "_____no_output_____" ] ], [ [ "import nltk\n\nsample_tokens = nltk.tokenize.word_tokenize(str(sample_caption).lower())\nprint(sample_tokens)", "['a', 'person', 'doing', 'a', 'trick', 'on', 'a', 'rail', 'while', 'riding', 'a', 'skateboard', '.']\n" ] ], [ [ "In **`line 2`** and **`line 3`** we initialize an empty list and append an integer to mark the start of a caption. The [paper](https://arxiv.org/pdf/1411.4555.pdf) that you are encouraged to implement uses a special start word (and a special end word, which we'll examine below) to mark the beginning (and end) of a caption.\n\nThis special start word (`\"<start>\"`) is decided when instantiating the data loader and is passed as a parameter (`start_word`). You are **required** to keep this parameter at its default value (`start_word=\"<start>\"`).\n\nAs you will see below, the integer `0` is always used to mark the start of a caption.", "_____no_output_____" ] ], [ [ "sample_caption = []\n\nstart_word = data_loader.dataset.vocab.start_word\nprint('Special start word:', start_word)\nsample_caption.append(data_loader.dataset.vocab(start_word))\nprint(sample_caption)", "Special start word: <start>\n[0]\n" ] ], [ [ "In **`line 4`**, we continue the list by adding integers that correspond to each of the tokens in the caption.", "_____no_output_____" ] ], [ [ "sample_caption.extend([data_loader.dataset.vocab(token) for token in sample_tokens])\nprint(sample_caption)", "[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18]\n" ] ], [ [ "In **`line 5`**, we append a final integer to mark the end of the caption. \n\nIdentical to the case of the special start word (above), the special end word (`\"<end>\"`) is decided when instantiating the data loader and is passed as a parameter (`end_word`). You are **required** to keep this parameter at its default value (`end_word=\"<end>\"`).\n\nAs you will see below, the integer `1` is always used to mark the end of a caption.", "_____no_output_____" ] ], [ [ "end_word = data_loader.dataset.vocab.end_word\nprint('Special end word:', end_word)\n\nsample_caption.append(data_loader.dataset.vocab(end_word))\nprint(sample_caption)", "Special end word: <end>\n[0, 3, 98, 754, 3, 396, 39, 3, 1009, 207, 139, 3, 753, 18, 1]\n" ] ], [ [ "Finally, in **`line 6`**, we convert the list of integers to a PyTorch tensor and cast it to [long type](http://pytorch.org/docs/master/tensors.html#torch.Tensor.long). You can read more about the different types of PyTorch tensors on the [website](http://pytorch.org/docs/master/tensors.html).", "_____no_output_____" ] ], [ [ "import torch\n\nsample_caption = torch.Tensor(sample_caption).long()\nprint(sample_caption)", "tensor([ 0, 3, 98, 754, 3, 396, 39, 3, 1009,\n 207, 139, 3, 753, 18, 1])\n" ] ], [ [ "And that's it! In summary, any caption is converted to a list of tokens, with _special_ start and end tokens marking the beginning and end of the sentence:\n```\n[<start>, 'a', 'person', 'doing', 'a', 'trick', 'while', 'riding', 'a', 'skateboard', '.', <end>]\n```\nThis list of tokens is then turned into a list of integers, where every distinct word in the vocabulary has an associated integer value:\n```\n[0, 3, 98, 754, 3, 396, 207, 139, 3, 753, 18, 1]\n```\nFinally, this list is converted to a PyTorch tensor. All of the captions in the COCO dataset are pre-processed using this same procedure from **`lines 1-6`** described above. \n\nAs you saw, in order to convert a token to its corresponding integer, we call `data_loader.dataset.vocab` as a function. The details of how this call works can be explored in the `__call__` method in the `Vocabulary` class in **vocabulary.py**. \n\n```python\ndef __call__(self, word):\n if not word in self.word2idx:\n return self.word2idx[self.unk_word]\n return self.word2idx[word]\n```\n\nThe `word2idx` instance variable is a Python [dictionary](https://docs.python.org/3/tutorial/datastructures.html#dictionaries) that is indexed by string-valued keys (mostly tokens obtained from training captions). For each key, the corresponding value is the integer that the token is mapped to in the pre-processing step.\n\nUse the code cell below to view a subset of this dictionary.", "_____no_output_____" ] ], [ [ "# Preview the word2idx dictionary.\ndict(list(data_loader.dataset.vocab.word2idx.items())[:10])", "_____no_output_____" ] ], [ [ "We also print the total number of keys.", "_____no_output_____" ] ], [ [ "# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))", "Total number of tokens in vocabulary: 8855\n" ] ], [ [ "As you will see if you examine the code in **vocabulary.py**, the `word2idx` dictionary is created by looping over the captions in the training dataset. If a token appears no less than `vocab_threshold` times in the training set, then it is added as a key to the dictionary and assigned a corresponding unique integer. You will have the option later to amend the `vocab_threshold` argument when instantiating your data loader. Note that in general, **smaller** values for `vocab_threshold` yield a **larger** number of tokens in the vocabulary. You are encouraged to check this for yourself in the next code cell by decreasing the value of `vocab_threshold` before creating a new data loader. ", "_____no_output_____" ] ], [ [ "# Modify the minimum word count threshold.\nvocab_threshold = 4\n\n# Obtain the data loader.\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_threshold=vocab_threshold,\n vocab_from_file=False)", "loading annotations into memory...\nDone (t=0.90s)\ncreating index...\nindex created!\n[0/414113] Tokenizing captions...\n[100000/414113] Tokenizing captions...\n[200000/414113] Tokenizing captions...\n[300000/414113] Tokenizing captions...\n[400000/414113] Tokenizing captions...\nloading annotations into memory...\n" ], [ "# Print the total number of keys in the word2idx dictionary.\nprint('Total number of tokens in vocabulary:', len(data_loader.dataset.vocab))", "Total number of tokens in vocabulary: 9955\n" ] ], [ [ "There are also a few special keys in the `word2idx` dictionary. You are already familiar with the special start word (`\"<start>\"`) and special end word (`\"<end>\"`). There is one more special token, corresponding to unknown words (`\"<unk>\"`). All tokens that don't appear anywhere in the `word2idx` dictionary are considered unknown words. In the pre-processing step, any unknown tokens are mapped to the integer `2`.", "_____no_output_____" ] ], [ [ "unk_word = data_loader.dataset.vocab.unk_word\nprint('Special unknown word:', unk_word)\n\nprint('All unknown words are mapped to this integer:', data_loader.dataset.vocab(unk_word))", "Special unknown word: <unk>\nAll unknown words are mapped to this integer: 2\n" ] ], [ [ "Check this for yourself below, by pre-processing the provided nonsense words that never appear in the training captions. ", "_____no_output_____" ] ], [ [ "print(data_loader.dataset.vocab('jfkafejw'))\nprint(data_loader.dataset.vocab('ieowoqjf'))", "2\n2\n" ] ], [ [ "The final thing to mention is the `vocab_from_file` argument that is supplied when creating a data loader. To understand this argument, note that when you create a new data loader, the vocabulary (`data_loader.dataset.vocab`) is saved as a [pickle](https://docs.python.org/3/library/pickle.html) file in the project folder, with filename `vocab.pkl`.\n\nIf you are still tweaking the value of the `vocab_threshold` argument, you **must** set `vocab_from_file=False` to have your changes take effect. \n\nBut once you are happy with the value that you have chosen for the `vocab_threshold` argument, you need only run the data loader *one more time* with your chosen `vocab_threshold` to save the new vocabulary to file. Then, you can henceforth set `vocab_from_file=True` to load the vocabulary from file and speed the instantiation of the data loader. Note that building the vocabulary from scratch is the most time-consuming part of instantiating the data loader, and so you are strongly encouraged to set `vocab_from_file=True` as soon as you are able.\n\nNote that if `vocab_from_file=True`, then any supplied argument for `vocab_threshold` when instantiating the data loader is completely ignored.", "_____no_output_____" ] ], [ [ "# Obtain the data loader (from file). Note that it runs much faster than before!\ndata_loader = get_loader(transform=transform_train,\n mode='train',\n batch_size=batch_size,\n vocab_from_file=True)", "Vocabulary successfully loaded from vocab.pkl file!\nloading annotations into memory...\n" ] ], [ [ "In the next section, you will learn how to use the data loader to obtain batches of training data.", "_____no_output_____" ], [ "<a id='step2'></a>\n## Step 2: Use the Data Loader to Obtain Batches\n\nThe captions in the dataset vary greatly in length. You can see this by examining `data_loader.dataset.caption_lengths`, a Python list with one entry for each training caption (where the value stores the length of the corresponding caption). \n\nIn the code cell below, we use this list to print the total number of captions in the training data with each length. As you will see below, the majority of captions have length 10. Likewise, very short and very long captions are quite rare. ", "_____no_output_____" ] ], [ [ "from collections import Counter\n\n# Tally the total number of training captions with each length.\ncounter = Counter(data_loader.dataset.caption_lengths)\nlengths = sorted(counter.items(), key=lambda pair: pair[1], reverse=True)\nfor value, count in lengths:\n print('value: %2d --- count: %5d' % (value, count))", "value: 10 --- count: 86334\nvalue: 11 --- count: 79948\nvalue: 9 --- count: 71934\nvalue: 12 --- count: 57637\nvalue: 13 --- count: 37645\nvalue: 14 --- count: 22335\nvalue: 8 --- count: 20771\nvalue: 15 --- count: 12841\nvalue: 16 --- count: 7729\nvalue: 17 --- count: 4842\nvalue: 18 --- count: 3104\nvalue: 19 --- count: 2014\nvalue: 7 --- count: 1597\nvalue: 20 --- count: 1451\nvalue: 21 --- count: 999\nvalue: 22 --- count: 683\nvalue: 23 --- count: 534\nvalue: 24 --- count: 383\nvalue: 25 --- count: 277\nvalue: 26 --- count: 215\nvalue: 27 --- count: 159\nvalue: 28 --- count: 115\nvalue: 29 --- count: 86\nvalue: 30 --- count: 58\nvalue: 31 --- count: 49\nvalue: 32 --- count: 44\nvalue: 34 --- count: 39\nvalue: 37 --- count: 32\nvalue: 33 --- count: 31\nvalue: 35 --- count: 31\nvalue: 36 --- count: 26\nvalue: 38 --- count: 18\nvalue: 39 --- count: 18\nvalue: 43 --- count: 16\nvalue: 44 --- count: 16\nvalue: 48 --- count: 12\nvalue: 45 --- count: 11\nvalue: 42 --- count: 10\nvalue: 40 --- count: 9\nvalue: 49 --- count: 9\nvalue: 46 --- count: 9\nvalue: 47 --- count: 7\nvalue: 50 --- count: 6\nvalue: 51 --- count: 6\nvalue: 41 --- count: 6\nvalue: 52 --- count: 5\nvalue: 54 --- count: 3\nvalue: 56 --- count: 2\nvalue: 6 --- count: 2\nvalue: 53 --- count: 2\nvalue: 55 --- count: 2\nvalue: 57 --- count: 1\n" ] ], [ [ "To generate batches of training data, we begin by first sampling a caption length (where the probability that any length is drawn is proportional to the number of captions with that length in the dataset). Then, we retrieve a batch of size `batch_size` of image-caption pairs, where all captions have the sampled length. This approach for assembling batches matches the procedure in [this paper](https://arxiv.org/pdf/1502.03044.pdf) and has been shown to be computationally efficient without degrading performance.\n\nRun the code cell below to generate a batch. The `get_train_indices` method in the `CoCoDataset` class first samples a caption length, and then samples `batch_size` indices corresponding to training data points with captions of that length. These indices are stored below in `indices`.\n\nThese indices are supplied to the data loader, which then is used to retrieve the corresponding data points. The pre-processed images and captions in the batch are stored in `images` and `captions`.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport torch.utils.data as data\n\n# Randomly sample a caption length, and sample indices with that length.\nindices = data_loader.dataset.get_train_indices()\nprint('sampled indices:', indices)\n\n# Create and assign a batch sampler to retrieve a batch with the sampled indices.\nnew_sampler = data.sampler.SubsetRandomSampler(indices=indices)\ndata_loader.batch_sampler.sampler = new_sampler\n \n# Obtain the batch.\nimages, captions = next(iter(data_loader))\n \nprint('images.shape:', images.shape)\nprint('captions.shape:', captions.shape)\n\n# (Optional) Uncomment the lines of code below to print the pre-processed images and captions.\nprint('images:', images)\nprint('captions:', captions)", "sampled indices: [67304, 38034, 113580, 75989, 58838, 406186, 134359, 112305, 56503, 347324]\nimages.shape: torch.Size([10, 3, 224, 224])\ncaptions.shape: torch.Size([10, 10])\nimages: tensor([[[[ 0.2453, -0.6452, -1.2445, ..., -0.2171, 0.6906, 0.6906],\n [ 1.1700, -0.9705, -1.4158, ..., -1.8610, -1.2959, 0.7762],\n [ 0.2967, -1.3987, -1.5014, ..., -1.9809, -1.7069, 0.1939],\n ...,\n [-0.7479, -0.8507, -0.9363, ..., -0.9877, -1.0904, -1.3473],\n [-0.8507, -0.9877, -1.0733, ..., -1.1247, -1.1418, -1.1075],\n [-1.0048, -1.0562, -1.1075, ..., -1.0562, -1.1075, -1.0904]],\n\n [[-0.5826, -1.1604, -1.5455, ..., -0.7402, -0.2675, -0.3375],\n [-0.3200, -1.4230, -1.6506, ..., -1.4230, -1.2304, -0.1625],\n [-0.7752, -1.6155, -1.7381, ..., -1.4755, -1.3704, -0.3200],\n ...,\n [-1.0903, -1.1253, -1.1779, ..., -1.1954, -1.2654, -1.4755],\n [-1.1078, -1.1954, -1.2304, ..., -1.2829, -1.3179, -1.2304],\n [-1.1954, -1.1954, -1.2304, ..., -1.2479, -1.3004, -1.2129]],\n\n [[-0.7936, -1.1770, -1.6302, ..., -0.5321, -0.8110, -1.2119],\n [-1.0898, -1.5604, -1.6476, ..., -0.3055, -0.2707, -0.7238],\n [-1.4036, -1.6650, -1.6650, ..., -0.5495, -0.3055, -0.6541],\n ...,\n [-1.2990, -1.3164, -1.3164, ..., -1.1247, -1.1421, -1.3513],\n [-1.2641, -1.3164, -1.3339, ..., -1.1596, -1.2293, -1.1247],\n [-1.2816, -1.2641, -1.2816, ..., -1.0898, -1.1770, -1.0898]]],\n\n\n [[[ 0.2111, 0.2111, 0.2111, ..., 0.3481, 0.3309, 0.3481],\n [ 0.2111, 0.2111, 0.2111, ..., 0.3652, 0.3481, 0.3481],\n [ 0.2282, 0.2282, 0.2453, ..., 0.3481, 0.3481, 0.3481],\n ...,\n [-1.2445, -1.2445, -1.1932, ..., -2.0494, -2.0152, -2.0152],\n [-1.2103, -1.1932, -1.1418, ..., -2.1179, -2.1008, -2.0837],\n [-1.7412, -1.6042, -1.3815, ..., -2.1179, -2.1179, -2.1008]],\n\n [[ 0.9230, 0.9230, 0.9230, ..., 1.0805, 1.0630, 1.0805],\n [ 0.9230, 0.9230, 0.9055, ..., 1.0980, 1.0805, 1.0805],\n [ 0.9230, 0.9230, 0.9405, ..., 1.0980, 1.0805, 1.0630],\n ...,\n [-0.8803, -0.9153, -0.8627, ..., -1.5455, -1.5105, -1.5280],\n [-0.9503, -0.9153, -0.8102, ..., -1.7731, -1.7731, -1.7556],\n [-1.6155, -1.4755, -1.2129, ..., -1.7731, -1.8081, -1.8081]],\n\n [[ 1.8557, 1.8557, 1.8557, ..., 1.9603, 1.9428, 1.9603],\n [ 1.8557, 1.8557, 1.8383, ..., 1.9603, 1.9428, 1.9603],\n [ 1.8557, 1.8557, 1.8731, ..., 1.9603, 1.9428, 1.9428],\n ...,\n [-0.2010, -0.2358, -0.1661, ..., -0.6367, -0.6018, -0.6715],\n [-0.2184, -0.1835, -0.0790, ..., -1.0724, -1.0724, -1.0724],\n [-1.0201, -0.8284, -0.4973, ..., -1.0898, -1.1073, -1.1247]]],\n\n\n [[[ 1.0159, 1.0844, 1.2385, ..., 1.3413, 1.3584, 1.3584],\n [ 0.8447, 0.9817, 1.1529, ..., 1.3070, 1.3584, 1.3413],\n [ 0.8618, 0.8961, 0.9132, ..., 1.3584, 1.4098, 1.4098],\n ...,\n [-0.4397, -0.4397, -0.3369, ..., 0.2624, 0.2453, 0.1768],\n [-0.0287, 0.1768, 0.2967, ..., 0.0912, 0.2282, 0.2796],\n [ 0.4508, 0.4508, 0.4679, ..., 0.0398, 0.1083, 0.1768]],\n\n [[ 0.9405, 1.0980, 1.1856, ..., 1.5882, 1.5882, 1.6583],\n [ 0.8179, 0.9930, 1.1681, ..., 1.6583, 1.6583, 1.6758],\n [ 0.8179, 0.8704, 1.0455, ..., 1.6933, 1.7108, 1.6583],\n ...,\n [-0.4076, -0.3550, -0.3375, ..., 0.1702, 0.2052, 0.0826],\n [ 0.0651, 0.2227, 0.3627, ..., 0.1527, 0.1001, 0.0826],\n [ 0.6604, 0.6604, 0.5728, ..., 0.1702, 0.2227, 0.2402]],\n\n [[ 0.9319, 1.1411, 1.2108, ..., 2.0823, 2.0474, 2.0997],\n [ 0.7576, 0.9668, 1.1237, ..., 2.0474, 2.0997, 2.1520],\n [ 0.7228, 0.7054, 0.9494, ..., 2.0823, 2.1171, 2.0125],\n ...,\n [-0.2532, -0.4101, -0.2184, ..., 0.3045, 0.2522, 0.2173],\n [ 0.1651, 0.2522, 0.4439, ..., 0.1651, 0.2348, 0.3045],\n [ 0.8448, 0.8622, 0.7054, ..., 0.1825, 0.1651, 0.3045]]],\n\n\n ...,\n\n\n [[[ 0.6734, 0.7248, 0.6906, ..., 1.0844, 1.2043, 1.1187],\n [ 0.6221, 0.6563, 0.6906, ..., 1.1187, 1.2385, 1.1872],\n [ 0.6221, 0.6563, 0.6906, ..., 1.1358, 1.1700, 1.1872],\n ...,\n [-0.1314, 0.6049, 0.7077, ..., -0.5082, -0.4568, -0.5596],\n [-0.0458, 0.6221, 0.6221, ..., -0.4397, -0.3369, -0.3198],\n [ 0.2453, 0.7591, 0.5364, ..., -0.4911, -0.4397, -0.3198]],\n\n [[ 0.7304, 0.7829, 0.8354, ..., 1.2731, 1.3606, 1.2731],\n [ 0.7129, 0.7479, 0.7829, ..., 1.3081, 1.3957, 1.3606],\n [ 0.7479, 0.7829, 0.7654, ..., 1.3256, 1.3431, 1.3606],\n ...,\n [-0.0049, 0.7654, 0.6779, ..., -0.3550, -0.3025, -0.4426],\n [ 0.0651, 0.7304, 0.5203, ..., -0.2850, -0.1975, -0.1975],\n [ 0.3627, 0.8179, 0.4853, ..., -0.3200, -0.2675, -0.1975]],\n\n [[ 0.8622, 0.9145, 0.9842, ..., 1.5420, 1.4374, 1.5071],\n [ 0.8099, 0.8274, 0.8971, ..., 1.5768, 1.4374, 1.4548],\n [ 0.8971, 0.8971, 0.9145, ..., 1.5942, 1.4897, 1.4374],\n ...,\n [ 0.1476, -0.1312, 0.0082, ..., 0.0605, 0.0779, -0.0790],\n [-0.0615, -0.2532, -0.1487, ..., 0.1999, 0.1999, 0.0953],\n [-0.2010, -0.1661, -0.3055, ..., -0.0267, 0.0605, 0.1128]]],\n\n\n [[[ 1.4269, 1.4440, 1.4440, ..., -1.0390, -0.9705, -0.9363],\n [ 1.4269, 1.4269, 1.4269, ..., -0.9534, -1.0562, -0.9534],\n [ 1.4440, 1.4612, 1.4612, ..., -0.9363, -1.0219, -0.8678],\n ...,\n [ 0.2282, 0.2111, 0.2282, ..., -1.4329, -1.4329, -1.4329],\n [ 0.5022, 0.4166, 0.3823, ..., -1.4329, -1.4500, -1.4329],\n [ 0.8961, 0.8618, 0.8618, ..., -1.4500, -1.4500, -1.4500]],\n\n [[ 1.7283, 1.7283, 1.7283, ..., -0.5826, -0.4951, -0.4426],\n [ 1.7283, 1.7458, 1.7458, ..., -0.5826, -0.6702, -0.5476],\n [ 1.7108, 1.7283, 1.7283, ..., -0.5651, -0.6352, -0.3901],\n ...,\n [ 0.3102, 0.3102, 0.3102, ..., -1.2829, -1.2829, -1.2829],\n [ 0.4853, 0.4328, 0.3978, ..., -1.2829, -1.3004, -1.2829],\n [ 0.8179, 0.7829, 0.7829, ..., -1.2654, -1.3004, -1.3004]],\n\n [[ 1.9951, 1.9951, 1.9951, ..., -1.0027, -1.0376, -0.9853],\n [ 1.9951, 1.9951, 1.9951, ..., -0.9853, -1.0724, -1.0550],\n [ 1.9951, 1.9951, 2.0125, ..., -1.0376, -1.0898, -1.0550],\n ...,\n [ 0.4439, 0.4091, 0.4265, ..., -0.9678, -0.9678, -0.9678],\n [ 0.4788, 0.4614, 0.4439, ..., -0.9678, -0.9853, -0.9678],\n [ 0.7054, 0.6705, 0.6182, ..., -0.9678, -0.9853, -0.9853]]],\n\n\n [[[-1.4843, -1.4158, -1.4329, ..., -1.5185, -1.4672, -1.4158],\n [-1.6213, -1.4500, -1.4158, ..., -1.5014, -1.5699, -1.5014],\n [-1.5185, -1.5185, -1.4500, ..., -1.5357, -1.5528, -1.5699],\n ...,\n [-1.3815, -1.3473, -1.4500, ..., -1.7925, -1.7925, -1.7925],\n [-1.3644, -1.3130, -1.3815, ..., -1.8268, -1.8782, -1.8097],\n [-1.3815, -1.3473, -1.3987, ..., -1.7583, -1.8439, -1.8439]],\n\n [[-1.1779, -1.1253, -1.1429, ..., -1.2479, -1.1954, -1.1253],\n [-1.3004, -1.1779, -1.1078, ..., -1.2304, -1.3004, -1.2479],\n [-1.2479, -1.2304, -1.1429, ..., -1.2479, -1.2654, -1.3704],\n ...,\n [-1.0728, -1.0903, -1.2304, ..., -1.5980, -1.6155, -1.6331],\n [-1.0553, -1.0553, -1.1253, ..., -1.6681, -1.7031, -1.6331],\n [-1.0728, -1.0203, -1.1078, ..., -1.6331, -1.6681, -1.6856]],\n\n [[-0.7413, -0.6715, -0.7587, ..., -0.8807, -0.8633, -0.8110],\n [-0.9678, -0.7238, -0.7238, ..., -0.8807, -0.9853, -0.8633],\n [-0.9156, -0.8110, -0.7064, ..., -0.9504, -0.9504, -1.0027],\n ...,\n [-0.6367, -0.6890, -0.7936, ..., -1.3339, -1.3339, -1.3687],\n [-0.6018, -0.6193, -0.7761, ..., -1.3861, -1.4210, -1.3513],\n [-0.6193, -0.6193, -0.8110, ..., -1.3339, -1.4036, -1.3687]]]])\ncaptions: tensor([[ 0, 3, 399, 13, 1554, 52, 131, 508, 489,\n 1],\n [ 0, 3, 178, 6034, 21, 3, 372, 39, 46,\n 1],\n [ 0, 3, 174, 130, 900, 54, 3, 228, 33,\n 1],\n [ 0, 3, 134, 6, 8818, 7440, 417, 32, 1716,\n 1],\n [ 0, 3, 80, 13, 51, 355, 2359, 508, 18,\n 1],\n [ 0, 50, 2755, 52, 206, 508, 77, 32, 665,\n 1],\n [ 0, 3, 399, 13, 4094, 170, 192, 337, 136,\n 1],\n [ 0, 3, 584, 165, 166, 326, 1598, 50, 749,\n 1],\n [ 0, 228, 1224, 6057, 1261, 39, 3, 706, 33,\n 1],\n [ 0, 47, 327, 4884, 21, 201, 44, 6, 63,\n 1]])\n" ] ], [ [ "Each time you run the code cell above, a different caption length is sampled, and a different batch of training data is returned. Run the code cell multiple times to check this out!\n\nYou will train your model in the next notebook in this sequence (**2_Training.ipynb**). This code for generating training batches will be provided to you.\n\n> Before moving to the next notebook in the sequence (**2_Training.ipynb**), you are strongly encouraged to take the time to become very familiar with the code in **data_loader.py** and **vocabulary.py**. **Step 1** and **Step 2** of this notebook are designed to help facilitate a basic introduction and guide your understanding. However, our description is not exhaustive, and it is up to you (as part of the project) to learn how to best utilize these files to complete the project. __You should NOT amend any of the code in either *data_loader.py* or *vocabulary.py*.__\n\nIn the next steps, we focus on learning how to specify a CNN-RNN architecture in PyTorch, towards the goal of image captioning.", "_____no_output_____" ], [ "<a id='step3'></a>\n## Step 3: Experiment with the CNN Encoder\n\nRun the code cell below to import `EncoderCNN` and `DecoderRNN` from **model.py**. ", "_____no_output_____" ] ], [ [ "# Watch for any changes in model.py, and re-load it automatically.\n% load_ext autoreload\n% autoreload 2\n\n# Import EncoderCNN and DecoderRNN. \nfrom model import EncoderCNN, DecoderRNN", "_____no_output_____" ] ], [ [ "In the next code cell we define a `device` that you will use move PyTorch tensors to GPU (if CUDA is available). Run this code cell before continuing.", "_____no_output_____" ] ], [ [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "_____no_output_____" ] ], [ [ "Run the code cell below to instantiate the CNN encoder in `encoder`. \n\nThe pre-processed images from the batch in **Step 2** of this notebook are then passed through the encoder, and the output is stored in `features`.", "_____no_output_____" ] ], [ [ "# Specify the dimensionality of the image embedding.\nembed_size = 256\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Initialize the encoder. (Optional: Add additional arguments if necessary.)\nencoder = EncoderCNN(embed_size)\n\n# Move the encoder to GPU if CUDA is available.\nencoder.to(device)\n \n# Move last batch of images (from Step 2) to GPU if CUDA is available. \nimages = images.to(device)\n\n# Pass the images through the encoder.\nfeatures = encoder(images)\n\nprint('type(features):', type(features))\nprint('features.shape:', features.shape)\n\n# Check that your encoder satisfies some requirements of the project! :D\nassert type(features)==torch.Tensor, \"Encoder output needs to be a PyTorch Tensor.\" \nassert (features.shape[0]==batch_size) & (features.shape[1]==embed_size), \"The shape of the encoder output is incorrect.\"", "Downloading: \"https://download.pytorch.org/models/resnet50-19c8e357.pth\" to /root/.torch/models/resnet50-19c8e357.pth\n100%|██████████| 102502400/102502400 [00:02<00:00, 43334772.33it/s]\n" ] ], [ [ "The encoder that we provide to you uses the pre-trained ResNet-50 architecture (with the final fully-connected layer removed) to extract features from a batch of pre-processed images. The output is then flattened to a vector, before being passed through a `Linear` layer to transform the feature vector to have the same size as the word embedding.\n\n![Encoder](images/encoder.png)\n\nYou are welcome (and encouraged) to amend the encoder in **model.py**, to experiment with other architectures. In particular, consider using a [different pre-trained model architecture](http://pytorch.org/docs/master/torchvision/models.html). You may also like to [add batch normalization](http://pytorch.org/docs/master/nn.html#normalization-layers). \n\n> You are **not** required to change anything about the encoder.\n\nFor this project, you **must** incorporate a pre-trained CNN into your encoder. Your `EncoderCNN` class must take `embed_size` as an input argument, which will also correspond to the dimensionality of the input to the RNN decoder that you will implement in Step 4. When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `embed_size`.\n\nIf you decide to modify the `EncoderCNN` class, save **model.py** and re-execute the code cell above. If the code cell returns an assertion error, then please follow the instructions to modify your code before proceeding. The assert statements ensure that `features` is a PyTorch tensor with shape `[batch_size, embed_size]`.", "_____no_output_____" ], [ "<a id='step4'></a>\n## Step 4: Implement the RNN Decoder\n\nBefore executing the next code cell, you must write `__init__` and `forward` methods in the `DecoderRNN` class in **model.py**. (Do **not** write the `sample` method yet - you will work with this method when you reach **3_Inference.ipynb**.)\n\n> The `__init__` and `forward` methods in the `DecoderRNN` class are the only things that you **need** to modify as part of this notebook. You will write more implementations in the notebooks that appear later in the sequence.\n\nYour decoder will be an instance of the `DecoderRNN` class and must accept as input:\n- the PyTorch tensor `features` containing the embedded image features (outputted in Step 3, when the last batch of images from Step 2 was passed through `encoder`), along with\n- a PyTorch tensor corresponding to the last batch of captions (`captions`) from Step 2.\n\nNote that the way we have written the data loader should simplify your code a bit. In particular, every training batch will contain pre-processed captions where all have the same length (`captions.shape[1]`), so **you do not need to worry about padding**. \n> While you are encouraged to implement the decoder described in [this paper](https://arxiv.org/pdf/1411.4555.pdf), you are welcome to implement any architecture of your choosing, as long as it uses at least one RNN layer, with hidden dimension `hidden_size`. \n\nAlthough you will test the decoder using the last batch that is currently stored in the notebook, your decoder should be written to accept an arbitrary batch (of embedded image features and pre-processed captions [where all captions have the same length]) as input. \n\n![Decoder](images/decoder.png)\n\nIn the code cell below, `outputs` should be a PyTorch tensor with size `[batch_size, captions.shape[1], vocab_size]`. Your output should be designed such that `outputs[i,j,k]` contains the model's predicted score, indicating how likely the `j`-th token in the `i`-th caption in the batch is the `k`-th token in the vocabulary. In the next notebook of the sequence (**2_Training.ipynb**), we provide code to supply these scores to the [`torch.nn.CrossEntropyLoss`](http://pytorch.org/docs/master/nn.html#torch.nn.CrossEntropyLoss) optimizer in PyTorch.", "_____no_output_____" ] ], [ [ "# Specify the number of features in the hidden state of the RNN decoder.\nhidden_size = 512\n\n#-#-#-# Do NOT modify the code below this line. #-#-#-#\n\n# Store the size of the vocabulary.\nvocab_size = len(data_loader.dataset.vocab)\n\n# Initialize the decoder.\ndecoder = DecoderRNN(embed_size, hidden_size, vocab_size)\n\n# Move the decoder to GPU if CUDA is available.\ndecoder.to(device)\n \n# Move last batch of captions (from Step 1) to GPU if CUDA is available \ncaptions = captions.to(device)\n\n# Pass the encoder output and captions through the decoder.\noutputs = decoder(features, captions)\n\nprint('type(outputs):', type(outputs))\nprint('outputs.shape:', outputs.shape)\n\n# Check that your decoder satisfies some requirements of the project! :D\nassert type(outputs)==torch.Tensor, \"Decoder output needs to be a PyTorch Tensor.\"\nassert (outputs.shape[0]==batch_size) & (outputs.shape[1]==captions.shape[1]) & (outputs.shape[2]==vocab_size), \"The shape of the decoder output is incorrect.\"", "type(outputs): <class 'torch.Tensor'>\noutputs.shape: torch.Size([10, 10, 9955])\n" ] ], [ [ "When you train your model in the next notebook in this sequence (**2_Training.ipynb**), you are welcome to tweak the value of `hidden_size`.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
cb96c1289a17b9bcc4c5b31123e0140147a7c050
8,823
ipynb
Jupyter Notebook
examples/en/Introduction_B.ipynb
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
3
2020-06-02T03:55:52.000Z
2022-03-21T04:43:52.000Z
examples/en/Introduction_B.ipynb
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
28
2020-06-24T00:55:53.000Z
2021-07-16T22:09:19.000Z
examples/en/Introduction_B.ipynb
dprada/molsysmt
83f150bfe3cfa7603566a0ed4aed79d9b0c97f5d
[ "MIT" ]
1
2021-06-17T18:55:25.000Z
2021-06-17T18:55:25.000Z
29.607383
1,488
0.597869
[ [ [ "# Introduction", "_____no_output_____" ] ], [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import moldynmt as mdmt\nimport os", "/home/diego/Myopt/Miniconda/miniconda3/envs/UIBCDFLab_dev/lib/python3.6/site-packages/MDAnalysis/due.py:88: UserWarning: No module named 'duecredit'\n warnings.warn(str(err))\n" ], [ "dir_examples = mdmt.utils.path_examples", "_____no_output_____" ], [ "#example_pdb_files=os.listdir(dir_examples_pdb)\n#print('Files preloaded from the PDB in the examples directory: \\n {} \\n'.format(example_pdb_files))\n#\n#example_mol2_files=os.listdir(dir_examples_mol2)\n#print('Files preloaded from the MOL2 in the examples directory: \\n {} \\n'.format(example_mol2_files))", "_____no_output_____" ], [ "#PDB_example_file = os.path.join(dir_examples_pdb,\"1li2.pdb\")\n#MOL2_example_file = os.path.join(dir_examples_mol2,\"caffeine.mol2\")", "_____no_output_____" ], [ "## Lists of engines, files and classes MolSysMT can work with.", "_____no_output_____" ], [ "#mdmt.info_forms()", "_____no_output_____" ] ], [ [ "## Loading systems", "_____no_output_____" ] ], [ [ "#mdmt.info_load(to_form='mdtraj')", "_____no_output_____" ], [ "#mdmt.info_load(from_form='xtc')", "_____no_output_____" ], [ "md_mdtraj = msmt.load(PDB_example_file,'mdtraj')\nmsmt.get_form(md_mdtraj)", "_____no_output_____" ], [ "## Getting the form of an item", "_____no_output_____" ], [ "system_unknown_form = msmt.load(PDB_example_file,'mdtraj')\nmsmt.get_form(system_unknown_form)", "_____no_output_____" ], [ "msmt.get_form(\"PDB:1sux\")", "_____no_output_____" ], [ "msmt.get_form(MOL2_example_file)", "_____no_output_____" ], [ "## Converting items", "_____no_output_____" ], [ "MOL2_file=os.path.join(dir_examples_mol2,'foo.mol2')\nmsmt.convert(PDB_example_file,MOL2_file)", "_____no_output_____" ], [ "msmt.info_convert(from_form='parmed')", "_____no_output_____" ], [ "msmt.info_convert(to_form='pdb')", "_____no_output_____" ], [ "msmt.info_convert()", "_____no_output_____" ], [ "## Selection", "_____no_output_____" ], [ "## Extract part of a molecular system", "_____no_output_____" ], [ "## Viewing molecular systems\n", "_____no_output_____" ], [ "## Structural Observables", "_____no_output_____" ], [ "mdmt.rmsd(XTC_example_file,select='name CA')", "_____no_output_____" ], [ "XTC_mdmt.load(XTC_example_file,'mdtraj')\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb96ef41ecc1515b53d13c7e0bdd100f5a5f563b
362,232
ipynb
Jupyter Notebook
05 Dot coms.ipynb
tamedia-ddj/phishtank
449f0a55fbfb13427a383233cf2e7ed8ba58986b
[ "MIT" ]
null
null
null
05 Dot coms.ipynb
tamedia-ddj/phishtank
449f0a55fbfb13427a383233cf2e7ed8ba58986b
[ "MIT" ]
null
null
null
05 Dot coms.ipynb
tamedia-ddj/phishtank
449f0a55fbfb13427a383233cf2e7ed8ba58986b
[ "MIT" ]
null
null
null
46.291629
245
0.421051
[ [ [ "Im Phishing-Register Phishtank.com sind auch Schweizer Top-Level-Domains, die von der Melani gemeldet wurden, zu finden. Welche davon waren von Phishing wirklich betroffen? Und was sagen Sie dazu, dass Sie auf Phishtank.com zu finden sind?", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport progressbar", "_____no_output_____" ], [ "import matplotlib.pyplot as plt\nimport matplotlib\nplt.style.use('fivethirtyeight')\n%matplotlib inline", "_____no_output_____" ], [ "ph = pd.read_csv('d/goverCERTphishes.csv')\nph.index = pd.to_datetime(ph['Date'], format='%b %d %Y %I:%M %p')", "_____no_output_____" ], [ "ph.info()", "<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 16198 entries, 2016-08-03 06:36:00 to 2016-07-27 06:29:00\nData columns (total 4 columns):\nDate 16198 non-null object\nID 16198 non-null int64\nURL 16198 non-null object\nValid Phish 16198 non-null object\ndtypes: int64(1), object(3)\nmemory usage: 632.7+ KB\n" ] ], [ [ "## Finding Swiss sites", "_____no_output_____" ] ], [ [ "def com(elem):\n if \".com\" in elem:\n return True\n else:\n return False\n \nph['com check'] = ph['URL'].apply(com)\nch = ph[ph['com check']==True]", "_____no_output_____" ], [ "ch.info()", "<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 8736 entries, 2016-08-02 07:53:00 to 2016-07-27 08:31:00\nData columns (total 5 columns):\nDate 8736 non-null object\nID 8736 non-null int64\nURL 8736 non-null object\nValid Phish 8736 non-null object\ncom check 8736 non-null bool\ndtypes: bool(1), int64(1), object(3)\nmemory usage: 349.8+ KB\n" ] ], [ [ "Finding Top Level Domain", "_____no_output_____" ] ], [ [ "def toplevel(elem):\n elem = elem.replace(\"https://\",\"\").replace(\"http://\",\"\").split(\"/\")[0]\n return elem", "_____no_output_____" ], [ "ch['toplevel'] = ch['URL'].apply(toplevel)", "/Users/barneyjs/.virtualenvs/master/lib/python3.5/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "ch", "_____no_output_____" ], [ "def endinginch(elem):\n if elem[-4:] == '.com':\n return True\n else: \n return False", "_____no_output_____" ], [ "ch['ch toplevel'] = ch['toplevel'].apply(endinginch)", "/Users/barneyjs/.virtualenvs/master/lib/python3.5/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ] ], [ [ "# Examining Swiss toplevel domains", "_____no_output_____" ] ], [ [ "chtl = ch[ch['ch toplevel']==True]", "_____no_output_____" ], [ "chtl.info()", "<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 7490 entries, 2016-08-02 07:53:00 to 2016-07-27 08:31:00\nData columns (total 7 columns):\nDate 7490 non-null object\nID 7490 non-null int64\nURL 7490 non-null object\nValid Phish 7490 non-null object\ncom check 7490 non-null bool\ntoplevel 7490 non-null object\nch toplevel 7490 non-null bool\ndtypes: bool(2), int64(1), object(4)\nmemory usage: 365.7+ KB\n" ], [ "chtl[chtl['toplevel']=='www.restauranteabiss.com']", "_____no_output_____" ], [ "chtl['toplevel'].value_counts()", "_____no_output_____" ] ], [ [ "## Grundlage.ch", "_____no_output_____" ] ], [ [ "chtl[chtl['toplevel']=='www.grundlage.ch']", "_____no_output_____" ], [ "#Links: \n#https://www.phishtank.com/phish_detail.php?phish_id=4676051\n#https://www.phishtank.com/phish_detail.php?phish_id=4648313", "_____no_output_____" ] ], [ [ "## Singleactive.ch", "_____no_output_____" ] ], [ [ "chtl[chtl['toplevel']=='dating.singleactive.ch']", "_____no_output_____" ], [ "#Links\n#https://www.phishtank.com/phish_detail.php?phish_id=4576196\n#https://www.phishtank.com/phish_detail.php?phish_id=4556999", "_____no_output_____" ] ], [ [ "## Der Bund", "_____no_output_____" ] ], [ [ "chtl[chtl['toplevel']=='der-bund.ch']", "_____no_output_____" ] ], [ [ "Domain name:\nder-bund.ch\n\nHolder of domain name:\nSchlagwort AG\nMassat Remo Reimut\nAbteilung Technik\nSchauenberg 99\nCH-7421 Summaprada - Cazis\nSwitzerland\n\nTechnical contact:\nSchlagwort AG\nMassat Remo Reimut\nAbteilung Technik\nSchauenberg 99\nCH-7421 Summaprada - Cazis\nSwitzerland\n\nRegistrar:\nEuroDNS S.A.\n\nFirst registration date:\n2008-11-07", "_____no_output_____" ], [ "## Nach Datum sortiert", "_____no_output_____" ] ], [ [ "pd.options.display.max_rows = 999\nchtl.sort_index(ascending=False)", "_____no_output_____" ], [ "#kruesi-ag.ch\n#https://www.phishtank.com/phish_detail.php?phish_id=5580598\n#http://kruesi-ag.ch/fileadmin/", "_____no_output_____" ], [ "#https://www.furtbaechler.ch/\n#https://www.phishtank.com/phish_detail.php?phish_id=5550973\n#http://www.furtbaechler.ch/europe/", "_____no_output_____" ], [ "#Lorente.ch\n#https://whois.domaintools.com/lorente.ch\n#https://www.phishtank.com/phish_detail.php?phish_id=5546192\n#http://www.lorente.ch/wildturkeyrocks/guestbook/go.php?url=https://redirect.billing.info-live.cgi-bin.webapps-mpp.home.verified-suiss.ch/iService/", "_____no_output_____" ], [ "#doctorbook24\n#https://doctorbook24.ch/assets/data/Login/\n#https://www.phishtank.com/phish_detail.php?phish_id=5261716\n#https://doctorbook24.ch/assets/data/Login/", "_____no_output_____" ], [ "#https://www.stu.ch/\n#https://www.phishtank.com/phish_detail.php?phish_id=5216948\n#https://www.stu.ch/dd.php\n#https://docs.google.com/document/d/1NAh5sciO_q2VoTLKJk6Y5O_pmCmGwXkufsDBAvzp2nU/edit", "_____no_output_____" ], [ "#http://buergisserweb.ch/\n#https://www.phishtank.com/phish_detail.php?phish_id=5089593\n#http://buergisserweb.ch/de/index.php\n#https://docs.google.com/document/d/1NAh5sciO_q2VoTLKJk6Y5O_pmCmGwXkufsDBAvzp2nU/edit", "_____no_output_____" ], [ "#lovelysilk.ch\n#https://www.phishtank.com/phish_detail.php?phish_id=4869162\n#http://lovelysilk.ch/add/dpbx/index.php, Joomla.\n#https://docs.google.com/document/d/1NAh5sciO_q2VoTLKJk6Y5O_pmCmGwXkufsDBAvzp2nU/edit\n#Metanet AG in Zürich, Hardstrasse 235 Zürich. Heidi Ullmann, Tel.: +41 79 357 88 82. Heutiger Inhaber\n#Herr Zahnd weiss nichts davon. Auch Heidi Ullmann. Wie steht es um die Metanet AG. Metanet AG sagt \n#allerdings, dass sie die Site nur registriert hat. Zuständig ist eventNET.ch: Guido Blanke. +41 71 560 5445.\n#Was sagt eventNET. Frau Tilia Schnarwiler, [email protected]\n# DNS: \"0848 830 740\"", "_____no_output_____" ], [ "#https://www.clubmove.ch/\n#https://www.phishtank.com/phish_detail.php?phish_id=4844761\n#http://clubmove.ch/8NPjX4N/index.php\n#https://docs.google.com/document/d/1NAh5sciO_q2VoTLKJk6Y5O_pmCmGwXkufsDBAvzp2nU/edit", "_____no_output_____" ], [ "#Gamelab.ch\n#https://www.phishtank.com/phish_detail.php?phish_id=4756603\n#http://gamelab.ch/wp-content/uploads/2pQEzvfeZT8/index.php\n##https://docs.google.com/document/d/1NAh5sciO_q2VoTLKJk6Y5O_pmCmGwXkufsDBAvzp2nU/edit", "_____no_output_____" ], [ "#teatime.ch\n#https://www.phishtank.com/phish_detail.php?phish_id=4717935\n#https://www.teatime.ch/modules/blockadvertising/2016/client_data/\n#https://docs.google.com/document/d/1NAh5sciO_q2VoTLKJk6Y5O_pmCmGwXkufsDBAvzp2nU/edit", "_____no_output_____" ], [ "#hin.ch\n#https://www.phishtank.com/phish_detail.php?phish_id=4662453\n#http://my-hin.ch/?uid=WVEJINJE\n#https://docs.google.com/document/d/1RAqrNdV-2hY01ZDEZDpKhWiL-WrfkJgHeIr7K9_aBlM/edit", "_____no_output_____" ], [ "#world-of-grappa.ch\n#http://www.world-of-grappa.ch/Googledocwwq/Googledoc1/index.html\n#https://www.phishtank.com/phish_detail.php?phish_id=2644206\n#https://www.phishtank.com/phish_detail.php?phish_id=2644206", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb971787408e84ee8260d1a867b3b600d4ca4db1
8,080
ipynb
Jupyter Notebook
.ipynb_checkpoints/RemoveSnidDuplicates-checkpoint.ipynb
Hallflower20/supernova-spectrum-analysis
e852f23b11677fdcd8c95f2df6e267bb7afd093c
[ "MIT" ]
null
null
null
.ipynb_checkpoints/RemoveSnidDuplicates-checkpoint.ipynb
Hallflower20/supernova-spectrum-analysis
e852f23b11677fdcd8c95f2df6e267bb7afd093c
[ "MIT" ]
null
null
null
.ipynb_checkpoints/RemoveSnidDuplicates-checkpoint.ipynb
Hallflower20/supernova-spectrum-analysis
e852f23b11677fdcd8c95f2df6e267bb7afd093c
[ "MIT" ]
2
2020-10-07T20:10:30.000Z
2021-05-09T23:16:36.000Z
28.754448
127
0.438861
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport glob\nfrom pathlib import Path", "_____no_output_____" ], [ "tmpl_files = glob.glob('/Users/adamamiller/Downloads/templates-2.0/*lnw')", "_____no_output_____" ], [ "for tf in tmpl_files:\n if '87A' in tf:\n print(tf)\n with open(tf) as f:\n ll = f.readlines()", "/Users/adamamiller/Downloads/templates-2.0/sn87A.lnw\n/Users/adamamiller/Downloads/templates-2.0/sn1987A.lnw\n" ] ], [ [ "### Loop over all files to find matches", "_____no_output_____" ] ], [ [ "sn_name = np.empty(len(tmpl_files)).astype(str)\nfor sn_num, tf in enumerate(tmpl_files):\n name = tf.split('/')[-1].split('.lnw')[0]\n if len(name.split('sn')) > 1:\n this_sn = name.split('sn')[1].split('_b')[0]\n if this_sn[0:2] == '19' or this_sn[0:2] == '20':\n sn_name[sn_num] = this_sn[2:]\n else:\n sn_name[sn_num] = this_sn\n else:\n sn_name[sn_num] = name", "_____no_output_____" ], [ "match_list = []\nfor snn in sn_name:\n match = np.where(sn_name == snn)\n if len(match[0]) > 1 and snn not in match_list:\n match_list.append(snn)", "_____no_output_____" ] ], [ [ "### Given 2 random files, check for matching dates and remove them", "_____no_output_____" ] ], [ [ "match = match_list[-4]\nprint(match)", "03du\n" ], [ "for match in match_list:\n\n matches = np.where(sn_name == match)\n nspec = np.empty(len(matches[0])).astype(int)\n for m_num, m in enumerate(matches[0]):\n line1 = pd.read_csv(tmpl_files[m], nrows=1, header=None, delim_whitespace=True)\n nspec[m_num] = int(line1[0])\n\n order = np.argsort(nspec)[::-1]\n\n for m_num, m in enumerate(matches[0][order]):\n line1 = pd.read_csv(tmpl_files[m], nrows=1, header=None, delim_whitespace=True)\n nskip = int(line1[4])\n line_ages = pd.read_csv(tmpl_files[m], nrows=1, \n skiprows = nskip + 2,\n header=None, delim_whitespace=True)\n\n if m_num == 0:\n ages = line_ages.iloc[0].values[1:]\n continue\n else:\n dup_idx = []\n for spec_num, age in enumerate(line_ages.iloc[0].values[1:]):\n if age in list(ages):\n dup_idx.append(spec_num)\n if len(dup_idx) > 0:\n print('warning ', sn_name[m])\n # loop to create new file\n nspec_this_sn = int(line1[0])\n\n with open(tmpl_files[m]) as tf:\n ll = tf.readlines()\n\n with open(tmpl_files[m].replace('.lnw','_new.lnw'), 'w') as tfw:\n new_line1 = '{0:>5}'.format(nspec_this_sn - len(dup_idx)) + ll[0][5:]\n print(new_line1[:-1], file=tfw)\n\n for l in ll[1:nskip+2]:\n new_line = l[0:7]\n for dup_num, di in enumerate(dup_idx):\n if dup_num == 0:\n new_line += l[7:7 + di*16]\n else: \n new_line += l[23 + dup_idx[dup_num-1]*16:7 + di*16]\n if dup_num == len(dup_idx) - 1:\n new_line += l[23 + di*16:]\n print(new_line[:-1], file=tfw)\n\n for l in ll[nskip+2:]:\n new_line = l[0:8]\n for dup_num, di in enumerate(dup_idx):\n if dup_num == 0:\n new_line += l[8:8 + di*9]\n else: \n new_line += l[17 + dup_idx[dup_num-1]*9:8 + di*9]\n if dup_num == len(dup_idx) - 1:\n new_line += l[17 + di*9:]\n print(new_line[:-1], file=tfw)\n\n Path(tmpl_files[m]).rename(tmpl_files[m].replace('templates-2.0','templates-2.0/old_with_duplicates'))\n\n\n if m_num + 1 < len(matches[0]):\n ages = np.append(ages, line_ages.iloc[0].values[1:])", "warning 01el\nwarning 04aw\nwarning 94Q\nwarning 97ef\nwarning 97dc\nwarning 05gj\nwarning 02ap\nwarning 98dt\nwarning 98S\nwarning 90U\nwarning 90B\nwarning 92H\nwarning 83V\nwarning 05hk\nwarning 99dn\nwarning 91ar\nwarning 91A\nwarning 02bo\nwarning 98T\nwarning 99ex\nwarning 87A\nwarning 93ac\nwarning 97dd\nwarning 84L\nwarning 97cy\nwarning 97cn\nwarning 98aq\nwarning 93J\nwarning 80K\nwarning 97dq\nwarning 88L\nwarning 99em\nwarning 86G\nwarning 99di\nwarning 95F\nwarning 96cb\nwarning 00H\nwarning 98bw\nwarning 92ar\nwarning 79C\nwarning 97ei\nwarning 90N\nwarning 99aa\nwarning 97br\nwarning 99gi\nwarning 83N\nwarning 04dj\nwarning 90K\nwarning 91N\nwarning 90aa\nwarning 90I\nwarning 91M\nwarning 92A\nwarning 03cg\nwarning 04et\nwarning 06gz\nwarning 05cs\nwarning 02ic\nwarning 06bp\nwarning 03du\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
cb972fcb46ae896966b25db46323d5a530051239
393,905
ipynb
Jupyter Notebook
Bantaba Project on Convolutional Neural Network with TensorFlow.ipynb
IndupriyaKompi/MNIST-KannadaDigits
60e067827703f408bec7669d6d7911a0c123e67b
[ "MIT" ]
null
null
null
Bantaba Project on Convolutional Neural Network with TensorFlow.ipynb
IndupriyaKompi/MNIST-KannadaDigits
60e067827703f408bec7669d6d7911a0c123e67b
[ "MIT" ]
null
null
null
Bantaba Project on Convolutional Neural Network with TensorFlow.ipynb
IndupriyaKompi/MNIST-KannadaDigits
60e067827703f408bec7669d6d7911a0c123e67b
[ "MIT" ]
null
null
null
157.751302
115,332
0.866407
[ [ [ "# INFO 7390\n# Advances in Data Science and Architecture", "_____no_output_____" ], [ "# Bantaba Project on Convolutional Neural Network Using TensorFlow\n\n![title](images/dl1.jpg)", "_____no_output_____" ], [ "# 1. Abstract\nThe focus of the project is to understand Convolutional Neural Network(CNN) as a beginner. A very easy way to explain and interpret a CNN is using classification model. In this project, we will be learning how CNN classifies Kannada digits, from 1 through 9\n\nThis is a 5 layers Sequential Convolutional Neural Network for digits recognition trained on Kannada digits dataset. I have chosen to build it with keras API (Tensorflow backend) which is very intuitive. To ensure the model did not overfit, we used Keras callbacks\n\n# 2. Table of Contents \n\n\n1. Abstract\n\n\n2. Table of Contents\n\n\n3. Acknowledgement\n\n\n4. Introduction\n\n\n5. Running the Notebook\n\n\n6. Importing packages and collecting data\n\n\n7. Data Description and preparation\n\n\n 7.1 Checking the Target Distribution\n\n\n8. Data pre-processing\n\n 8.1 Image Normalization\n\n 8.2 Reshaping the images\n\n 8.3 Splitting training and Validation Set\n\n 8.4 Label Encoding target variables\n \n 8.5 Data Augmentation\n\n\n9. Building the Neural Network\n\n 9.1 Defining and Outlining the model\n\n 9.2 Complie and Traine the model\n\n 9.3 Visualizing the Accuracy and Loss\n\n 9.4 Prediction\n \n 9.4.1 Evaluating Some Wrong Predictions\n \n 9.4.2 Evaluating Right Predictions\n\n\n10. Conclusion\n\n\n11. Citation\n\n\n12. Licensing", "_____no_output_____" ], [ "# 3. Acknowledgment\n\nI acknowledge that the kannada_MNIST dataset was taken from https://www.kaggle.com/c/Kannada-MNIST/overview/description and was created and provided by Vinay Uday Prabhu\n\nhttps://arxiv.org/abs/1908.01242\n\nhttps://vinayprabhu.github.io/", "_____no_output_____" ], [ "# 4. Introduction\n\n![title](images/KannadaNumbers.png)\n\nThere are hundreds of thousand languages world wide. Some have scripts, some don't. **Kannada** is one of the oldest languages that originated in **South India** and is still spoken by majority of the people espicially in **Karnataka**. I am a kannadiga and I can speak, read and write kannada. \n\nThe main focus of the project is to write a chapter on CNN that could be read and understood easily; especially for a beginner.\nThe sole purpose for having chosen this data set is because it is easier to understand the operation of CNNs through MNIST datasets. \n\nThe dataset has 3 parts namely, train,test and Dig-MNIST. Dig-MNIST is a real world handwritten dataset (with 10k images), that can serve as an out-of-domain test dataset.\n", "_____no_output_____" ], [ "# 5. Running the Notebook\n\nThis file was ran on **Google CoLab** with runtime set to **GPU**. For all who don't what a google colab is, here is a breif inroduction- Google Colab is a free cloud service and now it supports free GPU! You can; improve your Python programming language coding skills. develop deep learning applications using popular libraries such as Keras, TensorFlow, PyTorch, and OpenCV.\n\n1. Open **google colab** Jupyter Notebook\n2. **Rename** the file\n3. Set the runtime to **GPU** [Runtime>change runtime type>hardware accelerator>GPU]\n4. To the right side of the page, there is an option to upload files from the local system. Click on upload button and upload the zip file\n5. Run the file", "_____no_output_____" ], [ "# 6. Importing packages and collecting data\n\n### We will mainly use 4 libraries.\n\npandas and numpy : It's used to handle our csv files.\n\nmatplotlib & seaborn : Used for charting and plotting.\n\nsklearn : Popular ML library.We will use it for splitting our data.\n\nKeras : Popular Deep learning library,we will use it to build our CNN Network.", "_____no_output_____" ] ], [ [ "'''Importing Data Manipulattion Moduls'''\nimport numpy as np\nimport pandas as pd\n\n'''Seaborn and Matplotlib Visualization'''\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline\n\n'''Importing preprocessing libraries'''\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\n\nfrom zipfile import ZipFile\nfile_name = \"Kannada-MNIST.zip\"\n\nimport os\nimport cv2\n\n'''Display markdown formatted output like bold, italic bold etc.'''\nfrom IPython.display import Markdown\ndef bold(string):\n display(Markdown(string))", "_____no_output_____" ], [ "import tensorflow as tf \nprint(tf.__version__)\n\nfrom tensorflow.keras import layers, models\nfrom keras.models import Sequential\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils import to_categorical\nfrom keras.layers import Dense,Conv2D,Flatten,MaxPooling2D,Dropout,BatchNormalization\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ReduceLROnPlateau\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint", "2.2.0-rc3\n" ], [ "with ZipFile(file_name,'r') as zip:\n zip.extractall()\n print('Done')", "Done\n" ], [ "train_kannadaMnist = pd.read_csv(\"train.csv\")\ntest_kannadaMnist = pd.read_csv(\"test.csv\")\ndig_kannadaMnist = pd.read_csv(\"Dig-MNIST.csv\")", "_____no_output_____" ] ], [ [ "# 7. Data Description and Preparation", "_____no_output_____" ] ], [ [ "'''Train and test data at a glance.'''\nbold('**Preview of Train Data:**')\ndisplay(train_kannadaMnist.head(3))\nbold('**Preview of Test Data:**')\ndisplay(test_kannadaMnist.head(3))", "_____no_output_____" ], [ "'''Ckecking for null and missing values'''\nbold('**Train Data**')\ndisplay(train_kannadaMnist.isnull().any(). describe())\nbold('**Test Data**')\ndisplay(test_kannadaMnist.isnull().any(). describe())", "_____no_output_____" ], [ "'''Seting X and Y'''\ny_train = train_kannadaMnist['label']\n\n# Drop 'label' column\nX_train = train_kannadaMnist.drop('label', axis = 1)\n\nX_test = test_kannadaMnist.drop('id', axis = 1)\n\ndig_img = dig_kannadaMnist.drop('label', axis = 1)", "_____no_output_____" ], [ "\"\"\"Let's have a final look at our data\"\"\"\nbold('**Data Dimension for Model Building:**')\nprint('Input matrix dimension:', X_train.shape)\nprint('Output vector dimension:',y_train.shape)\nprint('Test data dimension:', X_test.shape)", "_____no_output_____" ] ], [ [ "## 7.1 Checking the Target Distribution", "_____no_output_____" ] ], [ [ "'''Visualizating the taget distribution'''\nplt.figure(figsize = (8,8))\nsns.countplot(y_train, palette='cubehelix')\nplt.show()\nprint(\"Cool\")", "_____no_output_____" ], [ "images = train_kannadaMnist.iloc[:,1:].values\nimages = images.astype(np.float)\n\n# convert from [0:255] => [0.0:1.0]\nimages = np.multiply(images, 1.0 / 255.0)\n\nimage_size = images.shape[1]\nprint('image_size => {0}'.format(image_size))\n\n# in this case all images are square\nimage_width = image_height = np.ceil(np.sqrt(image_size)).astype(np.uint8)\n\nprint('image_width => {0}\\nimage_height => {1}'.format(image_width, image_height))", "image_size => 784\nimage_width => 28\nimage_height => 28\n" ], [ "'''Displaying image'''\n# display image\ndef display(img):\n \n # (784) => (28,28)\n one_image = img.reshape(image_width,image_height)\n \n plt.axis('off')\n plt.imshow(one_image, cmap='binary')\n\n# output image \ndisplay(images[4])", "_____no_output_____" ], [ "'''Converting X_train to numpy array'''\nX_train_array = X_train.to_numpy()", "_____no_output_____" ], [ "'''Displaying images'''\nn=10\nfig = plt.figure(figsize=(10,10))\n\nfor i in range(n):\n ax = fig.add_subplot(2, n, i+1, xticks=[], yticks=[])\n ax.imshow(X_train_array[i].reshape(image_width,image_height), cmap='viridis')\n ax.axis(\"off\")\n\nplt.tight_layout()\nplt.show()", "_____no_output_____" ] ], [ [ "# 8. Data Preprocessing", "_____no_output_____" ], [ "## 8.1 Image Normalization\n\nNormalize Pixel Values\nFor most image data, the pixel values are integers with values between 0 and 255.\n\nNeural networks process inputs using small weight values, and inputs with large integer values can disrupt or slow down the learning process. As such it is good practice to normalize the pixel values so that each pixel value has a value between 0 and 1.\n\nIt is valid for images to have pixel values in the range 0-1 and images can be viewed normally.\n\nThis can be achieved by dividing all pixels values by the largest pixel value; that is 255. This is performed across all channels, regardless of the actual range of pixel values that are present in the image.", "_____no_output_____" ] ], [ [ "'''Normalizing the data'''\nX_train = X_train / 255.0\nX_test = X_test / 255.0\ndig_img = dig_img/255.0", "_____no_output_____" ] ], [ [ "## 8.2 Reshaping the images\n\nTrain and test images (28 x 28) has been stock into pandas.Dataframe as 1D vectors of 784 values. We reshape all data to 28x28x1 3D matrices.\n\nKeras requires an extra dimension in the end which correspond to channels. MNIST images are gray scaled so it use only one channel. For RGB images, there is 3 channels, we would have reshaped 784px vectors to 28x28x3 3D matrices.", "_____no_output_____" ] ], [ [ "'''Reshape image in 3 dimensions (height = 28px, width = 28px , canal = 1)'''\nX_train = X_train.values.reshape(-1,28,28,1)\nX_test = X_test.values.reshape(-1,28,28,1)\ndig_img = dig_img.values.reshape(-1,28,28,1)", "_____no_output_____" ], [ "print(X_train.shape, X_test.shape, dig_img.shape)", "(60000, 28, 28, 1) (5000, 28, 28, 1) (10240, 28, 28, 1)\n" ] ], [ [ "Data reshape into 60000 examples of height 28 and width 28 and 1 channel.", "_____no_output_____" ], [ "## 8.3 Splitting Training and Validation set\n\nNow we will split out training data into train and validation data 10 percent of the training data will be used for validation purpose.", "_____no_output_____" ] ], [ [ "'''Set the random seed'''\nseed = 44\n'''Split the train and the validation set for the fitting'''\nX_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state=seed)", "_____no_output_____" ] ], [ [ "## 8.4 Label encoding of Target Variable", "_____no_output_____" ] ], [ [ "'''convert class labels from scalars to one-hot vectors'''\n# 0 => [1 0 0 0 0 0 0 0 0 0]\n# 1 => [0 1 0 0 0 0 0 0 0 0]\n# ...\n# 9 => [0 0 0 0 0 0 0 0 0 1]\ny_train = tf.keras.utils.to_categorical(y_train, num_classes = 10, dtype='uint8')\n\ny_val = tf.keras.utils.to_categorical(y_val, num_classes = 10, dtype='uint8')", "_____no_output_____" ] ], [ [ "## 8.5 Data Augmentation\nTo get more training data, to avoid overfitting, data augmentation is used. Data augmentation is the creation of altered copies of each training instance (image) within a training dataset. \n\n1. Ramdomly rotate the images by 10 degrees\n2. Randomly zoom the images by 25%\n3. Randomly shift its height and width by 25%\n\nIn order to avoid overfitting problem, we need to expand artificially our handwritten digit dataset. We can make your existing dataset even larger. The idea is to alter the training data with small transformations to reproduce the variations occuring when someone is writing a digit.\n\nFor example, the number is not centered The scale is not the same (some who write with big/small numbers) The image is rotated...\n\nApproaches that alter the training data in ways that change the array representation while keeping the label the same are known as data augmentation techniques. Some popular augmentations people use are grayscales, horizontal flips, vertical flips, random crops, color jitters, translations, rotations, and much more.\n\nBy applying just a couple of these transformations to our training data, we can easily double or triple the number of training examples and create a very robust model.\n", "_____no_output_____" ] ], [ [ "# Artificially increase training set\ntrain_datagen = ImageDataGenerator(rescale=1./255.,\n rotation_range=10,\n width_shift_range=0.25,\n height_shift_range=0.25,\n shear_range=0.1,\n zoom_range=0.25,\n horizontal_flip=False)\ntrain_datagen.fit(X_train)", "_____no_output_____" ] ], [ [ "# 9. CNN Model\n\n## 9.1 Defining and Outlining the model\n\nI used the Keras Sequential API, where you have just to add one layer at a time, starting from the input.\n\n![title](images/Cnn1.png)\n\nThe first is the ***convolutional (Conv2D) layer***. It is like a set of ***learnable filters***. I choosed to set 32 filters for the two firsts conv2D layers and 64 filters for the two last ones. Each filter transforms a part of the image (defined by the kernel size) using the kernel filter. The kernel filter matrix is applied on the whole image. Filters can be seen as a transformation of the image.\n\n\n![title](images/FilterGiphy.gif)\n\nThe CNN can extract features that are useful everywhere from these transformed images (feature maps).\n\n***Padding***\nwe can pad the image with an additional border, i.e., we add one pixel all around the edges. \n\n***Valid:*** It means no padding. If we are using valid padding, the output will be (n-f+1) X (n-f+1)\n***Same:*** Here, we apply padding so that the output size is the same as the input size, i.e.,\nn+2p-f+1 = n\nSo, p = (f-1)/2\n\n![title](images/Padding.png)\n\nThe second important layer in CNN is the ***pooling (MaxPool2D) layer***. This layer simply acts as a ***downsampling filter***. It looks at the 2 neighboring pixels and picks the maximal value. These are used to reduce computational cost, and to some extent also reduce overfitting. We have to choose the pooling size (i.e the area size pooled each time) more the pooling dimension is high, more the downsampling is important.\n\n![title](images/Pooling.png)\n\nCombining convolutional and pooling layers, CNN are able to combine local features and learn more global features of the image.\n\n***Dropout is a regularization method***, where a proportion of nodes in the layer are randomly ignored (setting their wieghts to zero) for each training sample. This drops randomly a propotion of the network and forces the network to learn features in a distributed way. This technique also improves generalization and reduces the overfitting.\n\n![title](images/Dropout.png)\n\n***'relu'*** is the rectifier (activation function max(0,x). The rectifier activation function is used to add non linearity to the network.\n\n\nThe ***Flatten layer*** is use to convert the final feature maps into a one single 1D vector. This flattening step is needed so that you can make use of fully connected layers after some convolutional/maxpool layers. It combines all the found local features of the previous convolutional layers.\n\n![title](images/Flat.png)\n\nIn the end I have used the features in two ***fully-connected (Dense) layers*** which is just artificial an neural networks (ANN) classifier. In the last layer(Dense(10,activation=\"softmax\")) the net outputs distribution of probability of each class.\n\n![title](images/Full.png)\n\nFully Connected Layer and Output Layer Fully connected layers or dense layers are the same hidden layers consisting of defined number of neurons connected with elements of another layer that we discussed in simple ANN. However the output layer is also the same but the number of neurons depend on our task. \n\nIn summary, the architecture of CNN , we can simply understand that it consist of an input layer followed by a Conv layer. The dimensions of conv layer depends on the data and problem, hence changing the dimensions accordingly. After the Conv Layer there is a activation layer , usually ReLU since it gives better results. After some conv and relu combination , pooling layer is used to reduce the size. Then after some combination of previously defined architecture, flattening layer is used to flatten the input for fully connected layer. Next to these layer, the last layer is the output layer.", "_____no_output_____" ] ], [ [ "'''Set the CNN model'''\n# CNN architechture is In -> [[Conv2D->relu]*2 -> MaxPool2D -> Dropout]*2 -> Flatten -> Dense -> Dropout -> Out\n#model = tensorflow.keras.Sequential()\nmodel = Sequential()\nmodel.add(Conv2D(32, (5, 5), activation='relu', padding='same', input_shape=(28,28,1)))\nmodel.add(Conv2D(32, (5, 5), activation='relu', padding='same'))\nmodel.add(BatchNormalization(momentum=0.15))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Dropout(0.15))\n \nmodel.add(Conv2D(64, (5, 5), activation='relu', padding='same'))\nmodel.add(Conv2D(64, (5, 5), activation='relu', padding='same'))\nmodel.add(BatchNormalization(momentum=0.15))\nmodel.add(MaxPooling2D((2, 2)))\nmodel.add(Dropout(0.25))\n \nmodel.add(Flatten())\nmodel.add(Dense(128, activation = \"relu\"))\nmodel.add(Dropout(0.40))\nmodel.add(Dense(64, activation = \"relu\"))\nmodel.add(Dropout(0.40))\nmodel.add(Dense(10, activation = \"softmax\"))", "_____no_output_____" ], [ "model.summary()", "Model: \"sequential_3\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d_9 (Conv2D) (None, 28, 28, 32) 832 \n_________________________________________________________________\nconv2d_10 (Conv2D) (None, 28, 28, 32) 25632 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, 28, 28, 32) 128 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, 14, 14, 32) 0 \n_________________________________________________________________\ndropout_9 (Dropout) (None, 14, 14, 32) 0 \n_________________________________________________________________\nconv2d_11 (Conv2D) (None, 14, 14, 64) 51264 \n_________________________________________________________________\nconv2d_12 (Conv2D) (None, 14, 14, 64) 102464 \n_________________________________________________________________\nbatch_normalization_4 (Batch (None, 14, 14, 64) 256 \n_________________________________________________________________\nmax_pooling2d_6 (MaxPooling2 (None, 7, 7, 64) 0 \n_________________________________________________________________\ndropout_10 (Dropout) (None, 7, 7, 64) 0 \n_________________________________________________________________\nflatten_3 (Flatten) (None, 3136) 0 \n_________________________________________________________________\ndense_7 (Dense) (None, 128) 401536 \n_________________________________________________________________\ndropout_11 (Dropout) (None, 128) 0 \n_________________________________________________________________\ndense_8 (Dense) (None, 64) 8256 \n_________________________________________________________________\ndropout_12 (Dropout) (None, 64) 0 \n_________________________________________________________________\ndense_9 (Dense) (None, 10) 650 \n=================================================================\nTotal params: 591,018\nTrainable params: 590,826\nNon-trainable params: 192\n_________________________________________________________________\n" ] ], [ [ "# 9.2 Complie and Train\n\nIn simpler terms, ***optimizers*** shape and mold your model into its most accurate possible form by futzing with the weights. The loss function is the guide to the terrain, telling the optimizer when it’s moving in the right or wrong direction", "_____no_output_____" ] ], [ [ "model.compile(optimizer=\"adam\", loss=['categorical_crossentropy'], metrics=['accuracy'])", "_____no_output_____" ] ], [ [ "A ***callback*** is a set of functions to be applied at given stages of the training procedure. You can use callbacks to get a view on internal states and statistics of the model during training.\nKeras callbacks can help you fix bugs more quickly, and can help you build better models. They can help you visualize how your model’s training is going, and can even help prevent overfitting by implementing early stopping or customizing the learning rate on each iteration.\n\nHere we use 2 callback functions:\n\n***Early Stopping*** - One technique to reduce overfitting in neural networks is to use early stopping. Early stopping prevents overtraining of the model by terminating the training process if it’s not really learning anything.\n\n***Learning Rate Reduction*** - The learning rate determines the size of the steps taken during the gradient descent process.\nWith the ***ReduceLROnPlateau*** function from Keras.callbacks, i choose to reduce the LR by half if the accuracy is not improved after 3 epochs.", "_____no_output_____" ] ], [ [ "# Set a learning rate annealer. Learning rate will be half after 3 epochs if accuracy is not increased\nlearning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', \n patience=3, \n verbose=1,\n factor=0.5, \n min_lr=0.00001)", "_____no_output_____" ] ], [ [ "### What is the Difference Between a Batch and an Epoch in a Neural Network?\n\nThe training process will run for a fixed number of iterations through the dataset called epochs, that we must specify using the epochs argument. We must also set the number of dataset rows that are considered before the model weights are updated within each epoch, called the batch size and set using the batch_size argument.", "_____no_output_____" ] ], [ [ "batch_size=75\nepochs = 50\n#num_classes = 10\n#learning_rate = 0.001", "_____no_output_____" ], [ "filepath = \"model.h5\"\n\nearlystopper = EarlyStopping(patience=10, verbose=1)\n\ncheckpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, \n save_best_only=True, mode='min')\n\ncallbacks_list = [earlystopper, checkpoint]\n\nhistory = model.fit(X_train, y_train, batch_size = batch_size , epochs = epochs, \n validation_data=(X_val, y_val),\n verbose=1,\n callbacks=callbacks_list)", "Train on 54000 samples, validate on 6000 samples\nEpoch 1/50\n54000/54000 [==============================] - 549s 10ms/step - loss: 0.2827 - accuracy: 0.9133 - val_loss: 0.0465 - val_accuracy: 0.9858\n\nEpoch 00001: val_loss improved from inf to 0.04651, saving model to model.h5\nEpoch 2/50\n54000/54000 [==============================] - 551s 10ms/step - loss: 0.0808 - accuracy: 0.9799 - val_loss: 0.0269 - val_accuracy: 0.9933\n\nEpoch 00002: val_loss improved from 0.04651 to 0.02691, saving model to model.h5\nEpoch 3/50\n54000/54000 [==============================] - 547s 10ms/step - loss: 0.0535 - accuracy: 0.9872 - val_loss: 0.0217 - val_accuracy: 0.9938\n\nEpoch 00003: val_loss improved from 0.02691 to 0.02167, saving model to model.h5\nEpoch 4/50\n54000/54000 [==============================] - 551s 10ms/step - loss: 0.0429 - accuracy: 0.9888 - val_loss: 0.0207 - val_accuracy: 0.9942\n\nEpoch 00004: val_loss improved from 0.02167 to 0.02072, saving model to model.h5\nEpoch 5/50\n54000/54000 [==============================] - 552s 10ms/step - loss: 0.0400 - accuracy: 0.9903 - val_loss: 0.0275 - val_accuracy: 0.9930\n\nEpoch 00005: val_loss did not improve from 0.02072\nEpoch 6/50\n54000/54000 [==============================] - 554s 10ms/step - loss: 0.0348 - accuracy: 0.9912 - val_loss: 0.0239 - val_accuracy: 0.9935\n\nEpoch 00006: val_loss did not improve from 0.02072\nEpoch 7/50\n54000/54000 [==============================] - 557s 10ms/step - loss: 0.0319 - accuracy: 0.9919 - val_loss: 0.0162 - val_accuracy: 0.9957\n\nEpoch 00007: val_loss improved from 0.02072 to 0.01616, saving model to model.h5\nEpoch 8/50\n54000/54000 [==============================] - 555s 10ms/step - loss: 0.0294 - accuracy: 0.9929 - val_loss: 0.0161 - val_accuracy: 0.9953\n\nEpoch 00008: val_loss improved from 0.01616 to 0.01609, saving model to model.h5\nEpoch 9/50\n54000/54000 [==============================] - 553s 10ms/step - loss: 0.0269 - accuracy: 0.9936 - val_loss: 0.0179 - val_accuracy: 0.9960\n\nEpoch 00009: val_loss did not improve from 0.01609\nEpoch 10/50\n54000/54000 [==============================] - 552s 10ms/step - loss: 0.0222 - accuracy: 0.9945 - val_loss: 0.0153 - val_accuracy: 0.9963\n\nEpoch 00010: val_loss improved from 0.01609 to 0.01529, saving model to model.h5\nEpoch 11/50\n54000/54000 [==============================] - 552s 10ms/step - loss: 0.0239 - accuracy: 0.9939 - val_loss: 0.0185 - val_accuracy: 0.9947\n\nEpoch 00011: val_loss did not improve from 0.01529\nEpoch 12/50\n54000/54000 [==============================] - 551s 10ms/step - loss: 0.0230 - accuracy: 0.9940 - val_loss: 0.0159 - val_accuracy: 0.9963\n\nEpoch 00012: val_loss did not improve from 0.01529\nEpoch 13/50\n54000/54000 [==============================] - 555s 10ms/step - loss: 0.0219 - accuracy: 0.9949 - val_loss: 0.0165 - val_accuracy: 0.9967\n\nEpoch 00013: val_loss did not improve from 0.01529\nEpoch 14/50\n54000/54000 [==============================] - 549s 10ms/step - loss: 0.0176 - accuracy: 0.9952 - val_loss: 0.0197 - val_accuracy: 0.9952\n\nEpoch 00014: val_loss did not improve from 0.01529\nEpoch 15/50\n54000/54000 [==============================] - 551s 10ms/step - loss: 0.0172 - accuracy: 0.9955 - val_loss: 0.0184 - val_accuracy: 0.9963\n\nEpoch 00015: val_loss did not improve from 0.01529\nEpoch 16/50\n54000/54000 [==============================] - 551s 10ms/step - loss: 0.0151 - accuracy: 0.9963 - val_loss: 0.0174 - val_accuracy: 0.9963\n\nEpoch 00016: val_loss did not improve from 0.01529\nEpoch 17/50\n54000/54000 [==============================] - 552s 10ms/step - loss: 0.0140 - accuracy: 0.9964 - val_loss: 0.0193 - val_accuracy: 0.9963\n\nEpoch 00017: val_loss did not improve from 0.01529\nEpoch 18/50\n54000/54000 [==============================] - 552s 10ms/step - loss: 0.0155 - accuracy: 0.9960 - val_loss: 0.0175 - val_accuracy: 0.9963\n\nEpoch 00018: val_loss did not improve from 0.01529\nEpoch 19/50\n54000/54000 [==============================] - 552s 10ms/step - loss: 0.0142 - accuracy: 0.9965 - val_loss: 0.0152 - val_accuracy: 0.9970\n\nEpoch 00019: val_loss improved from 0.01529 to 0.01524, saving model to model.h5\nEpoch 20/50\n54000/54000 [==============================] - 560s 10ms/step - loss: 0.0127 - accuracy: 0.9965 - val_loss: 0.0222 - val_accuracy: 0.9970\n\nEpoch 00020: val_loss did not improve from 0.01524\nEpoch 21/50\n54000/54000 [==============================] - 558s 10ms/step - loss: 0.0157 - accuracy: 0.9964 - val_loss: 0.0239 - val_accuracy: 0.9965\n\nEpoch 00021: val_loss did not improve from 0.01524\nEpoch 22/50\n54000/54000 [==============================] - 557s 10ms/step - loss: 0.0130 - accuracy: 0.9966 - val_loss: 0.0203 - val_accuracy: 0.9968\n\nEpoch 00022: val_loss did not improve from 0.01524\nEpoch 23/50\n54000/54000 [==============================] - 556s 10ms/step - loss: 0.0122 - accuracy: 0.9971 - val_loss: 0.0212 - val_accuracy: 0.9967\n\nEpoch 00023: val_loss did not improve from 0.01524\nEpoch 24/50\n54000/54000 [==============================] - 557s 10ms/step - loss: 0.0124 - accuracy: 0.9968 - val_loss: 0.0243 - val_accuracy: 0.9967\n\nEpoch 00024: val_loss did not improve from 0.01524\nEpoch 25/50\n54000/54000 [==============================] - 553s 10ms/step - loss: 0.0114 - accuracy: 0.9975 - val_loss: 0.0293 - val_accuracy: 0.9960\n\nEpoch 00025: val_loss did not improve from 0.01524\nEpoch 26/50\n54000/54000 [==============================] - 559s 10ms/step - loss: 0.0092 - accuracy: 0.9974 - val_loss: 0.0207 - val_accuracy: 0.9967\n\nEpoch 00026: val_loss did not improve from 0.01524\nEpoch 27/50\n54000/54000 [==============================] - 558s 10ms/step - loss: 0.0100 - accuracy: 0.9975 - val_loss: 0.0225 - val_accuracy: 0.9973\n\nEpoch 00027: val_loss did not improve from 0.01524\nEpoch 28/50\n54000/54000 [==============================] - 559s 10ms/step - loss: 0.0098 - accuracy: 0.9978 - val_loss: 0.0337 - val_accuracy: 0.9968\n\nEpoch 00028: val_loss did not improve from 0.01524\nEpoch 29/50\n54000/54000 [==============================] - 559s 10ms/step - loss: 0.0137 - accuracy: 0.9970 - val_loss: 0.0259 - val_accuracy: 0.9972\n\nEpoch 00029: val_loss did not improve from 0.01524\nEpoch 00029: early stopping\n" ] ], [ [ "## 9.3 Visulizing Accuracy and Loss", "_____no_output_____" ] ], [ [ "plt.plot(history.history['accuracy'])\nplt.plot(history.history['val_accuracy'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Validation'], loc='upper left')\nplt.show()\n\n# Plot training & validation loss values\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Validation'], loc='upper left')\nplt.show()", "_____no_output_____" ], [ "# Predict the values from the validation dataset\nY_pred = model.predict(X_val)\n# Convert predictions classes to one hot vectors \nY_pred_classes = np.argmax(Y_pred,axis = 1) \n# Convert validation observations to one hot vectors\nY_true = np.argmax(y_val,axis = 1) ", "_____no_output_____" ], [ "'''confusion matrix'''\n# compute the confusion matrix\nconfusion_mtx = confusion_matrix(Y_true, Y_pred_classes) \n# plot the confusion matrix\nf,ax = plt.subplots(figsize=(8, 8))\nsns.heatmap(confusion_mtx, annot=True, linewidths=0.01,cmap=\"Greens\",linecolor=\"gray\", fmt= '.1f',ax=ax)\nplt.xlabel(\"Predicted Label\")\nplt.ylabel(\"True Label\")\nplt.title(\"Confusion Matrix\")\nplt.show()", "_____no_output_____" ] ], [ [ "## 9.4 Making Predictions", "_____no_output_____" ] ], [ [ "'''predict results'''\npred_dig = model.predict(dig_img)\ndig_kannadaMnist['pred'] = np.argmax(pred_dig, axis=1)\n\n#'''select the indix with the maximum probability'''\n#results = np.argmax(results,axis = 1)", "_____no_output_____" ], [ "# look at those that were classified wrongly in X_dig\ndig_kannadaMnist['correct'] = dig_kannadaMnist['label'] - dig_kannadaMnist['pred']\nerrors = dig_kannadaMnist[dig_kannadaMnist['correct'] != 0]\nerror_list = errors.index\nprint('Number of errors is ', len(errors))\nprint('The indices are ', error_list)", "Number of errors is 1894\nThe indices are Int64Index([ 17, 23, 27, 37, 57, 67, 87, 97, 106,\n 117,\n ...\n 10217, 10218, 10219, 10221, 10224, 10226, 10227, 10228, 10229,\n 10234],\n dtype='int64', length=1894)\n" ] ], [ [ "## 9.4.1 Some of the wrong predictions for dig dataset:", "_____no_output_____" ], [ "### Further Investigation..\nWe will go on to see some of the misclassified images.We will simply inspect them to understand if it was a tough one to predict or not.Let's see...", "_____no_output_____" ] ], [ [ "# plot images of some of the wrong predictions for X_dig\nplt.figure(figsize=(15,10))\nfor i in range(40):\n plt.subplot(6, 10, i+1)\n plt.imshow(dig_img[error_list[i]].reshape((28,28)),cmap=plt.cm.binary)\n plt.title(\"true={}\\npredict={}\".format(dig_kannadaMnist['label'][error_list[i]], \n dig_kannadaMnist['pred'][error_list[i]]), y=0.9)\n plt.axis('off')\nplt.subplots_adjust(wspace=0.3, hspace=-0.1)\nplt.show()", "_____no_output_____" ] ], [ [ "Looking at those that were predicted wrongly, there are quite several difficult and ambiguous ones.", "_____no_output_____" ], [ "## 9.4.2 Correct Predictions", "_____no_output_____" ], [ "**Some examples of predictions made**", "_____no_output_____" ] ], [ [ "# predict on test set\npredictions = model.predict(X_test)\nprint(predictions.shape)", "(5000, 10)\n" ], [ "# set the predicted labels to be the one with the highest probability\npredicted_labels = np.argmax(predictions, axis=1)", "_____no_output_____" ], [ "# look at some of the predictions for test_X\nplt.figure(figsize=(15,6))\nfor i in range(40): \n plt.subplot(4, 10, i+1)\n plt.imshow(X_test[i].reshape((28,28)),cmap=plt.cm.binary)\n plt.title(\"predict=%d\" % predicted_labels[i],y=0.9)\n plt.axis('off')\nplt.subplots_adjust(wspace=0.3, hspace=-0.1)\nplt.show()", "_____no_output_____" ] ], [ [ "**My eyeball reading of the prediction is:**\n\n3 0 2 6 7 7 1 9 3 4\n\n8 8 1 7 8 1 5 1 5 9 \n\n3 7 6 0 2 0 8 7 0 0 \n\n8 9 2 3 2 4 6 0 7 8\n\n**Looks reasonable.**\n\n***I am a kannadiga(Native kannada speaker) and I am fluent in reading, writing and speaking in this language***\n", "_____no_output_____" ], [ "# 10.Conclusion\n\nThe above model works pretty well on the traing, validation and test dataset. The Notebook is designed in such a way that it could be easily understood by a Newbie(beginner). I believe that I have covered and explained all the concepts of Convolutional Neural Network. \n\nFurther, the model can be applied to a whole new image which is not previously seen/learnt by the model", "_____no_output_____" ], [ "# 11. Author\n\nThis notebook was created by ***Indupriya Kompi Sadasivappa***, currently enrolled student at ***Northeastern University, Boston*** as a contribution to ***Bantaba Projects*** in the month of ***April,2020*** under the guidance and supervision of ***Nicholas Brown***, Assistant Teaching Professor, Multidisciplinary Graduate Engineering Programs at ***Northeastern University, Boston***\n\[email protected]\n\nBearing **NUID 001051831**\n\n![title](images/tq.gif)\n", "_____no_output_____" ], [ "# 11.Citation\n\n1. https://www.kaggle.com/c/Kannada-MNIST\n2. https://www.kaggle.com/kaushal2896/kannada-mnist-using-cnn/notebook\n3. https://www.kaggle.com/shahules/indian-way-to-learn-cnn\n4. https://towardsdatascience.com/\n5. Picture are taken from https://www.google.com/imghp?hl=en, a few of the images/giphys are snipped from https://towardsdatascience.com/\n", "_____no_output_____" ], [ "# 12.Licensing\nCopyright 2020 Indupriya Kompi Sadasivappa\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb973261f303a39cbd137d0bac9fa5d48f746552
99,214
ipynb
Jupyter Notebook
preprocessing/graph_preprocessing.ipynb
Vloods/TransTTE_demo
e7493b1a548d2ff1573cbdeacb05bbdb82fc3b2f
[ "MIT" ]
null
null
null
preprocessing/graph_preprocessing.ipynb
Vloods/TransTTE_demo
e7493b1a548d2ff1573cbdeacb05bbdb82fc3b2f
[ "MIT" ]
null
null
null
preprocessing/graph_preprocessing.ipynb
Vloods/TransTTE_demo
e7493b1a548d2ff1573cbdeacb05bbdb82fc3b2f
[ "MIT" ]
null
null
null
31.446593
3,026
0.541305
[ [ [ "import multiprocessing as mp\nimport psutil\nimport pickle\nimport time\nimport sys\n\nimport pandas as pd\nimport numpy as np\nfrom tqdm.notebook import tqdm", "_____no_output_____" ], [ "# from google.colab import drive\n# drive.mount('/content/drive')", "Mounted at /content/drive\n" ], [ "# !xz -d -v Moscow_mkad.osm.xz", "Moscow_mkad.osm.xz (1/1)\nxz: Moscow_mkad.osm.xz: No such file or directory\n" ], [ "# парсинг XML\nimport xml.etree.ElementTree as ET\ntree = ET.parse('./Moscow_mkad.osm')\nroot=tree.getroot()\nelement = root[0]", "_____no_output_____" ], [ "\"\"\"\nСоздание матрицы высота\nМинимум: 75\nМаксимум: 317\nГенерация np матрицы высот\n\"\"\"\ncell_size = 0.00083333333333333\nlat_start = 55.0\nlon_start = 35.0\n\nmatrix_of_heights = []\nwith open('./srtm_44_01.asc') as f:\n data_str = f.readlines()\n\nfor row in data_str[6:]:\n row_list = list(map(int, row.split()))\n matrix_of_heights.append(row_list)\n\ndel row_list\ndel data_str\n\nmatrix_of_heights = np.array(matrix_of_heights, np.ushort)\n", "_____no_output_____" ], [ "def haversine_np(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n\n All args must be of equal length. \n\n \"\"\"\n lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n\n c = 2 * np.arcsin(np.sqrt(a))\n m = 6367 * c * 1000\n return m\n\n\ndef get_height(lat: float, lon: float) -> float:\n \"\"\"\n Получение высоты над уровнем моря по координатам\n :param lat: Долгота точки\n :param lon: Широта точки\n :return: Высота над уровнем моря\n \"\"\"\n semi_x = (lat - lat_start) / cell_size\n semi_y = (lon - lon_start) / cell_size\n if semi_x > 6000 or semi_y > 6000:\n raise Exception('Out of bounds, not in Moscow')\n\n points = [matrix_of_heights[int(np.floor(semi_x)), int(np.floor(semi_y))],\n matrix_of_heights[int(np.floor(semi_x)), int(np.ceil(semi_y))],\n matrix_of_heights[int(np.ceil(semi_x)), int(np.floor(semi_y))],\n matrix_of_heights[int(np.ceil(semi_x)), int(np.ceil(semi_y))]]\n floor_lat = np.floor(semi_x) * cell_size + lat_start\n floor_lon = np.floor(semi_y) * cell_size + lon_start\n ceil_lat = np.ceil(semi_x) * cell_size + lat_start\n ceil_lon = np.ceil(semi_y) * cell_size + lon_start\n coordinates = [[floor_lat, floor_lon], [floor_lat, ceil_lon], [ceil_lat, floor_lon],\n [ceil_lat, ceil_lon]]\n idx_min, min_ = 0, 200\n for idx, point in enumerate(coordinates):\n dist_ = haversine_np(lon, lat, point[1], point[0])\n if dist_ < min_:\n min_ = dist_\n idx_min = idx\n triangle = [points[idx_min-1], points[idx_min], points[(idx_min+1)%4]]\n return sum(triangle)/3\n\n\nget_height(55.7558, 37.6173)", "_____no_output_____" ], [ "\"\"\"\n Генерация листа эджей формата \n [{\"nodes\": [], \"highway\":\"\", \"surface\":\"asphalt\", \"lanes\":1, \"width\":2}]\n А также листа нод формата [{id, lat, lon}]\n\"\"\"\nnodes = []\nedges = []\nfor i in tqdm(range(1, len(root))):\n obj = root[i]\n if obj.tag == 'node':\n nodes.append({'id': obj.attrib['id'], \n 'lat': obj.attrib['lat'], 'lon': obj.attrib['lon']})\n if obj.tag == 'way':\n tmp = {\"nodes\": [], \"highway\":\"\", \"surface\":\"asphalt\", \"lanes\":1, \n \"width\":2}\n ok = False\n for child in obj:\n if child.tag == 'nd':\n tmp['nodes'].append(child.attrib[\"ref\"])\n if child.tag == 'tag':\n if child.attrib['k'] == 'highway':\n ok = True\n if child.attrib['k'] in tmp:\n tmp[child.attrib['k']] = child.attrib['v']\n if ok and tmp['highway'] in ['pedestrian', 'bridleway', 'cycleway', \n 'footway', 'living_street', 'path', \n 'steps', 'residential', 'service']:\n edges.append(tmp)\n\nprint(f'Edges: {len(edges):,}, Nodes: {len(nodes):,}')", "_____no_output_____" ], [ "# Удаления парсера XML\ndel tree\ndel root", "_____no_output_____" ], [ "# словарь айди_ноды: её координаты\nid2cor = {}\n\nfor i in nodes:\n id2cor[int(i['id'])] = {'lat': float(i['lat']), 'lon': float(i['lon'])}", "_____no_output_____" ], [ "# Генерация листа словарей edge\n# Медленная!\ncount = 0\nedge_list = []\nfor edge in tqdm(edges):\n for i in range(len(edge['nodes'])-1):\n node_1 = id2cor[edge['nodes'][i]]\n node_2 = id2cor[edge['nodes'][i+1]]\n lon1 = float(node_1['lon'])\n lat1 = float(node_1['lat'])\n lon2 = float(node_2['lon'])\n lat2 = float(node_2['lat']) \n dist = haversine_np(lon1, lat1, lon2, lat2)\n hight1 = get_height(lat1, lon1)\n hight2 = get_height(lat2, lon2)\n if isinstance(edge['width'], str):\n try:\n width_f = float(edge['width'])\n except ValueError:\n width_f = 1\n else:\n width_f = float(edge['width'])\n\n edge_list.append({\"edge_id\":count, \"id1\":int(edge['nodes'][i]), \n \"id2\":int(edge['nodes'][i+1]), \"dist\": dist, \n \"highway\": edge['highway'], \"surface\":edge['surface'], \n \"lanes\": int(edge['lanes']), \"width\":width_f, \n 'hight1': hight1, 'hight2': hight2})\n count += 1\n \nedge_list[22:25]", "_____no_output_____" ], [ "import pandas as pd\ngraph = pd.DataFrame(edge_list, columns=edge_list[0].keys())\ngraph.to_csv('edge_list.csv', index=False)\n\ngraph.head()", "_____no_output_____" ], [ "# Генерация локации еджа\nedge_cors = []\nfor i in tqdm(range(len(graph))):\n cor1 = np.array([id2cor[graph.id1.iloc[i]]['lat'], id2cor[graph.id1.iloc[i]]['lon']])\n cor2 = np.array([id2cor[graph.id2.iloc[i]]['lat'], id2cor[graph.id2.iloc[i]]['lon']])\n cor = (np.sum([cor1, cor2], axis=0)) / 2\n edge_cors.append(cor)", "_____no_output_____" ], [ "edge_cors[:5]", "_____no_output_____" ], [ "# Добавление локации еджа в граф\ngraph[\"lat\"] = np.array(edge_cors)[:, 0]\ngraph[\"lon\"] = np.array(edge_cors)[:, 1]\n\ngraph.head()", "_____no_output_____" ], [ "graph.to_csv(\"edge_with_location.csv\")", "_____no_output_____" ], [ "tmp_graph = graph.drop(columns=['highway', 'surface', 'width', 'hight1', 'hight2', 'dist', 'lanes'])\ngraph_np = tmp_graph.values\ndel tmp_graph\n\ngraph_np[:2], len(graph_np)", "_____no_output_____" ], [ "# uint\n# max id: 8912249766\n# min id: 27717690\nfast_graph_np = np.genfromtxt('fast_graph.csv', dtype=np.uint, delimiter=',')[1:]\n# fast_graph_np = []\n# for edge in tqdm(graph_np):\n# fast_graph_np.append([edge[0], int(edge[1]), int(edge[2])])\n\n# fast_graph_np = np.array(fast_graph_np, dtype=np.uint)\nfast_graph_np[:5]", "_____no_output_____" ], [ "fast_graph_df = pd.DataFrame(data=fast_graph_np)\nfast_graph_df.to_csv('fast_graph.csv', index=False)\nfast_graph_np[:,1].min(), fast_graph_np[:,1].max(), fast_graph_np[:,2].min(), fast_graph_np[:,2].max()", "_____no_output_____" ], [ "from tqdm import tqdm\nsize = len(fast_graph_np)+1\nworkers_count = mp.cpu_count() - 10\npart_size = len(fast_graph_np[:size])//workers_count\n\nworkers_count\nprint(part_size, mp.cpu_count(), workers_count, len(fast_graph_np[:size]))\n\n\ndef spawn():\n dataframe_list = []\n procs = list()\n \n manager = mp.Manager()\n return_list = manager.list()\n \n for cpu in range(workers_count):\n up_border = part_size*(cpu+1)\n p = mp.Process(target=run_child, args=(up_border, return_list))\n p.start()\n procs.append(p)\n for idx, p in enumerate(procs):\n p.join()\n print('Done: {}'.format(idx), end=' ')\n \n return return_list\n\nreturn_list = spawn()", "11537 96 86 992193\n" ], [ "def run_child(up_border, return_list):\n# print('Run child: ', up_border, end='\\t')\n adj = []\n\n for i in tqdm(range(up_border - part_size, up_border)):\n a = fast_graph_np[fast_graph_np[:,2] == fast_graph_np[i,1]][:, 0]\n b = fast_graph_np[fast_graph_np[:,1] == fast_graph_np[i,1]][:, 0].tolist() \n b.remove(i)\n df = b + a.tolist() \n adj.append(df)\n\n \n return_list.extend(adj)\n", "_____no_output_____" ], [ "adj = list(return_list) \nlen(adj)", "_____no_output_____" ], [ "# import pickle\n\n# with open('adj.pkl', 'wb') as fp:\n# pickle.dump(adj, fp, protocol=pickle.HIGHEST_PROTOCOL)\nadj[:10]", "_____no_output_____" ], [ "# adj_str = cast_to_string(return_list)\nadj_df = pd.DataFrame(data=adj)", "_____no_output_____" ], [ "adj_df.to_csv('duo_edge.csv', index=False)", "_____no_output_____" ], [ "def cast_to_string(routes_property):\n return [str(int).replace(\"[\", \"\").replace(\"]\", \"\") for int in routes_property]", "_____no_output_____" ], [ "adj_str = cast_to_string(adj)\nadj_df = pd.DataFrame(adj_str, columns = [\"adjacent\"])", "_____no_output_____" ], [ "import pickle\n\nwith open('nodes.pkl', 'wb') as fp:\n pickle.dump(nodes, fp, protocol=pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ], [ "import pickle\n\nwith open('edges.pkl', 'wb') as fp:\n pickle.dump(edges, fp, protocol=pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ], [ "count_pos = 0\ncount_neg = 0\nedge_list = []\nfor edge in edges:\n tmp = []\n for node in edge:\n if node in id2cor:\n count_pos += 1\n tmp.append(node)\n else:\n count_neg += 1\n edge_list.append(tmp)", "_____no_output_____" ], [ "count_pos", "_____no_output_____" ], [ "a = []\nfor edge in edges[102]:\n a.append({\"id\": edge, \"lat\": id2cor[edge]['lat'], \"lon\": id2cor[edge]['lon']})", "_____no_output_____" ], [ "df = pd.DataFrame(a, columns=[\"id\", \"lat\", 'lon'])\ndf.to_csv('list2.csv', index=False)", "_____no_output_____" ], [ "dist_edge = []\nfor edge in tqdm(edges):\n for i in range(len(edge)-1):\n dist = haversine_np(float(id2cor[edge[i]]['lon']), float(id2cor[edge[i]]['lat']), float(id2cor[edge[i + 1]]['lon']), float(id2cor[edge[i + 1]]['lat']))\n dist_edge.append({\"id1\": edge[i], \"id2\": edge[i + 1], 'd': dist * 1000})", "_____no_output_____" ], [ "import pickle\n\nwith open('edge_dist.pkl', 'wb') as fp:\n pickle.dump(dist_edge, fp, protocol=pickle.HIGHEST_PROTOCOL)", "_____no_output_____" ], [ "edge_list_for_kepler = []\nfor edge in tqdm(edges):\n for i in range(len(edge['nodes'])-1):\n node_1 = id2cor[edge['nodes'][i]]\n node_2 = id2cor[edge['nodes'][i+1]]\n lon1 = float(node_1['lon'])\n lat1 = float(node_1['lat'])\n lon2 = float(node_2['lon'])\n lat2 = float(node_2['lat']) \n dist = haversine_np(lon1, lat1, lon2, lat2)\n hight1 = get_height(lat1, lon1)\n hight2 = get_height(lat2, lon2)\n if isinstance(edge['width'], str):\n try:\n width_f = float(edge['width'])\n except ValueError:\n width_f = 1\n else:\n width_f = float(edge['width'])\n\n edge_list_for_kepler.append({\"dist\": dist, \"highway\": edge['highway'], \"surface\":edge['surface'], \n \"lanes\": int(edge['lanes']), \"width\":edge['width'], \n 'hight1': hight1, 'hight2': hight2, 'lat1': lat1, 'lon1': lon1, \n 'lat2': lat2, 'lon2': lon2})", "100%|██████████| 250642/250642 [02:07<00:00, 1962.75it/s]\n" ], [ "df = pd.DataFrame(edge_list_for_kepler, columns=edge_list_for_kepler[0].keys())\ndf.to_csv('edge_list_for_kepler.csv', index=False)", "_____no_output_____" ], [ "import pickle\n\nwith open('nodes.p', 'rb') as fp:\n nodes = pickle.load(fp)", "_____no_output_____" ], [ "edges_pickles = pickle.load(open('nodes.p', 'rb'))", "_____no_output_____" ], [ "pd.DataFrame(hist_nodes).to_csv(\"hist_nodes.csv\")", "_____no_output_____" ], [ "import json\nwith open('dtp_moskva.geojson') as fp:\n data = json.load(fp)", "_____no_output_____" ], [ "data['features'][1]['geometry']", "_____no_output_____" ], [ "len(data['features'])", "_____no_output_____" ], [ "dtp = []\nfor i in tqdm(range(len(data['features']))):\n dtp.append({\"lat\": data['features'][i]['geometry']['coordinates'][1], \"lon\":data['features'][i]['geometry']['coordinates'][0]})", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9737bd6d317f8febeefa417b3dd6448ef90184
16,695
ipynb
Jupyter Notebook
SQL-Hybrid-Cloud-Toolkit/content/provisioning/create-sqlvm.ipynb
meenal-gupta141/tigertoolbox
5c432392f7cab091121a8879ea886b39c54f519b
[ "MIT" ]
541
2019-05-07T11:41:25.000Z
2022-03-29T17:33:19.000Z
SQL-Hybrid-Cloud-Toolkit/content/provisioning/create-sqlvm.ipynb
sqlworldwide/tigertoolbox
2abcb62a09daf0116ab1ab9c9dd9317319b23297
[ "MIT" ]
89
2019-05-09T14:23:52.000Z
2022-01-13T20:21:04.000Z
SQL-Hybrid-Cloud-Toolkit/content/provisioning/create-sqlvm.ipynb
sqlworldwide/tigertoolbox
2abcb62a09daf0116ab1ab9c9dd9317319b23297
[ "MIT" ]
338
2019-05-08T05:45:16.000Z
2022-03-28T15:35:03.000Z
43.590078
489
0.481282
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb973b46f3f3e83ee0ba2c86feca3b61ceabf137
1,868
ipynb
Jupyter Notebook
embed_videos/video.ipynb
firasm/jupyter_rise_templates
8c80dc99272e3d8b84505957ea19d07b65820ef0
[ "MIT" ]
null
null
null
embed_videos/video.ipynb
firasm/jupyter_rise_templates
8c80dc99272e3d8b84505957ea19d07b65820ef0
[ "MIT" ]
null
null
null
embed_videos/video.ipynb
firasm/jupyter_rise_templates
8c80dc99272e3d8b84505957ea19d07b65820ef0
[ "MIT" ]
null
null
null
20.527473
79
0.461456
[ [ [ "from IPython.display import IFrame\nVideo(\"./download.mp4\")", "_____no_output_____" ], [ "from IPython.display import IFrame\nIFrame(\"./download.mp4\",500,500)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
cb97734e9972b162ff2b5ad526888db342de8779
420,096
ipynb
Jupyter Notebook
0finance/quant.ipynb
mayi140611/mayiexamples
221cf9e8916d81198df7355894ec59dc334ae0af
[ "Apache-2.0" ]
null
null
null
0finance/quant.ipynb
mayi140611/mayiexamples
221cf9e8916d81198df7355894ec59dc334ae0af
[ "Apache-2.0" ]
null
null
null
0finance/quant.ipynb
mayi140611/mayiexamples
221cf9e8916d81198df7355894ec59dc334ae0af
[ "Apache-2.0" ]
2
2020-03-09T12:48:07.000Z
2020-04-19T11:43:22.000Z
4,469.106383
418,324
0.96353
[ [ [ "https://mp.weixin.qq.com/s/zhJLfV5PiYFZoiHPRbk1gg", "_____no_output_____" ], [ "![image.png](attachment:image.png)", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown" ] ]
cb9787f80561febd3b9b118531eec84633c241da
104,106
ipynb
Jupyter Notebook
hcds-a1-data-curation.ipynb
Cain93/data-512-a1
d67b3eee3d81c7bfd1300a4c71db0ddcc45bc3d6
[ "MIT" ]
null
null
null
hcds-a1-data-curation.ipynb
Cain93/data-512-a1
d67b3eee3d81c7bfd1300a4c71db0ddcc45bc3d6
[ "MIT" ]
null
null
null
hcds-a1-data-curation.ipynb
Cain93/data-512-a1
d67b3eee3d81c7bfd1300a4c71db0ddcc45bc3d6
[ "MIT" ]
null
null
null
157.022624
82,376
0.85728
[ [ [ "# Stage 0: SETUP", "_____no_output_____" ], [ "The below libraries are used for this project. For a full list of requirements and versions, please see the requirements.txt file included in the repository.", "_____no_output_____" ] ], [ [ "import json\nimport requests\nimport os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.dates as mdates\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "# Stage 1: DATA ACQUISITION", "_____no_output_____" ], [ "## Overview\nData is acquired through the Wikimedia REST API and saved as json files. These files are included in the repository in the *data* folder; you made skip to Stage 2 and use the included files if desired, or skip to Stage 3 to skip processing entirely.", "_____no_output_____" ], [ "We will request data from both the [Legacy](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Legacy_Pagecounts) and [Pageviews](https://wikitech.wikimedia.org/wiki/Analytics/AQS/Pageviews) API.\n\nWe define base templates for the parameters. English wikipedia with monthyl granularity will always be requested, and on the pageviews api we always request agent=user to filter out crawler and bot traffic. We also request consistent dateranges for each api", "_____no_output_____" ] ], [ [ "endpoint_legacy = 'https://wikimedia.org/api/rest_v1/metrics/legacy/pagecounts/aggregate/{project}/{site}/{granularity}/{start}/{end}'\nendpoint_pageviews = 'https://wikimedia.org/api/rest_v1/metrics/pageviews/aggregate/{project}/{site}/{agent}/{granularity}/{start}/{end}'\nparams_legacy = {\"project\" : \"en.wikipedia.org\",\n \"granularity\" : \"monthly\",\n \"start\" : \"2008010100\",\n \"end\" : \"2016080100\"\n }\n\nparams_pageviews = {\"project\" : \"en.wikipedia.org\",\n \"agent\" : \"user\",\n \"granularity\" : \"monthly\",\n \"start\" : \"2015070100\",\n \"end\" : \"2021090100\"\n }\n\n", "_____no_output_____" ] ], [ [ "We request each endpoint for each access type, except for aggregates. All data is saved in the *data* folder.\n ", "_____no_output_____" ] ], [ [ "def api_call(endpoint,parameters):\n headers = {\n 'User-Agent': 'https://github.com/Cain93',\n 'From': '[email protected]'\n }\n call = requests.get(endpoint.format(**parameters), headers=headers)\n response = call.json()\n \n return response", "_____no_output_____" ], [ "legacy_sites = [\"desktop-site\", \"mobile-site\"]\npageview_sites = [\"desktop\", \"mobile-app\", \"mobile-web\"]\nfile_template = \"data/{apiname}_{site}_{daterange}.json\"\nfor site in legacy_sites:\n data = api_call(endpoint_legacy, {**params_legacy, \"site\":site})\n fileName = file_template.format(apiname=\"pagecount\", site=site, daterange = \"200801-201607\")\n with open(fileName, 'w') as outfile:\n json.dump(data, outfile)\nfor site in pageview_sites:\n data = api_call(endpoint_pageviews, {**params_pageviews, \"site\":site})\n fileName = file_template.format(apiname=\"pageview\", site=site, daterange = \"201507-202108\")\n with open(fileName, 'w') as outfile:\n json.dump(data, outfile) \n \n", "_____no_output_____" ] ], [ [ "# Stage 2: DATA PROCESSING", "_____no_output_____" ], [ "Data is consoldiated and formatted into a single file. This file is included in the repository as *en-wikipedia_traffic_200712-202108.csv*; you may skip to Stage 3 and use the included file if desired.\n\nFirst we open each file and combine into a dataframe. While doing, we rename columns to make them consistent between legacy and pageview data.", "_____no_output_____" ] ], [ [ "combined_data = pd.DataFrame()\ncol_names = {\n \"access-site\": \"access\",\n \"count\": \"views\"\n}\n\nfor filename in os.listdir(\"data\"):\n file = open(\"data/\" + filename, \"r\")\n file_data = json.loads(file.read())\n file_df = pd.DataFrame.from_records(file_data[\"items\"]).rename(columns = col_names)\n \n combined_data = combined_data.append(file_df)\n\ncombined_data.head()", "_____no_output_____" ] ], [ [ "Then we parse the timestamp into year and month, and remove unused columns.", "_____no_output_____" ] ], [ [ "combined_data[\"year\"] = combined_data[\"timestamp\"].apply(lambda x: x[0:4])\ncombined_data[\"month\"] = combined_data[\"timestamp\"].apply(lambda x: x[4:6])\ncleaned_data = combined_data.drop(columns=[\"timestamp\", \"granularity\", \"project\", \"agent\"])\ncleaned_data.head()", "_____no_output_____" ] ], [ [ "Now data is pivoted to create a new column for each type of view. After pivoting:\n1. Mobile-web and mobile-app columns are combined into mobile\n1. Columns are rename into more descriptive names\n1. Aggregate columns for all pageview and pagecount views are created\n1. Unused columns are dropped", "_____no_output_____" ] ], [ [ "# Pivot\npivot_data = cleaned_data.pivot(index = [\"year\", \"month\"], columns=[\"access\"])\npivot_data.columns = pivot_data.columns.droplevel()\n\n# Replace NaN with 0\npivot_data = pivot_data.fillna(0)\n\nprint(pivot_data.head())\n\n# Combine mobil views\npivot_data[\"mobile\"] = pivot_data[\"mobile-web\"] + pivot_data[\"mobile-app\"]\npivot_data = pivot_data.drop(columns = [\"mobile-web\", \"mobile-app\"])\n\n# Rename and aggregate\npivot_data = pivot_data.rename(columns = {\"desktop-site\":\"pagecount_desktop_views\",\n \"mobile-site\": \"pagecount_mobile_views\",\n \"desktop\":\"pageview_desktop_views\",\n \"mobile\":\"pageview_mobile_views\",\n })\npivot_data[\"pagecount_all_views\"] = pivot_data[\"pagecount_desktop_views\"] + pivot_data[\"pagecount_mobile_views\"]\npivot_data[\"pageview_all_views\"] = pivot_data[\"pageview_desktop_views\"] + pivot_data[\"pageview_mobile_views\"]\n\npivot_data.head()\n", "access desktop desktop-site mobile-app mobile-site mobile-web\nyear month \n2008 01 0.0 4.930903e+09 0.0 0.0 0.0\n 02 0.0 4.818394e+09 0.0 0.0 0.0\n 03 0.0 4.955406e+09 0.0 0.0 0.0\n 04 0.0 5.159162e+09 0.0 0.0 0.0\n 05 0.0 5.584691e+09 0.0 0.0 0.0\n" ] ], [ [ "The data is converted to csv and saved.", "_____no_output_____" ] ], [ [ "pivot_data.to_csv('en-wikipedia_traffic_200712-202108.csv')", "_____no_output_____" ] ], [ [ "# Stage 3: ANALYSIS", "_____no_output_____" ], [ "Some minor processing is necessary to dispaly. The year and month are combined into a datetime index, and 0's are replaced with Nan to avoid plotting them. Values re-scaled to be in billions for easier interpretation.", "_____no_output_____" ] ], [ [ "display_data = pd.read_csv('en-wikipedia_traffic_200712-202108.csv')\n\n# Set the index as dates\ndisplay_data.index = pd.to_datetime(display_data[['year', 'month']].assign(DAY=1))\ndisplay_data.drop(columns=[\"year\", \"month\"], inplace=True)\n\n# Repplce 0 with Nan\ndisplay_data = display_data.replace(0, np.nan)\n\n# Rescale to billions\ndisplay_data = display_data / 1000000000\n\n", "_____no_output_____" ] ], [ [ "With some minor formatting, we see the trend in views. In 2016 we can see the difference in the new pageview API totals compared to the legacy, attributable to the amount of web crawlers accessing pages. \n\nAlso visible is the growth of mobile access from when it was first recorded in late 2014.", "_____no_output_____" ] ], [ [ "labels = [\"Desktop\", \"Desktop (Legacy)\", \"Mobile (Legacy)\", \"Mobile\", \"Total (Legacy)\", \"Total\"]\nstyles = [\"g--\",\"b--\", \"b:\", \"g:\", \"b\", \"g\"]\nwidths = [3,3,3,3,6,6]\n\nax = display_data.plot(figsize=(15, 6), style=styles, title=\"English Wikipedia Traffic\")\n\n# Reorder legend\nhandles = plt.gca().get_legend_handles_labels()[0]\norder = [5,0,3,4,1,2]\nplt.legend([handles[idx] for idx in order],[labels[idx] for idx in order])\n\n# Axis titles\nax.set_xlabel(\"Date\")\nax.set_ylabel(\"Views in Billions\")\n\nplt.show()", "_____no_output_____" ], [ "# Save plot\nax.get_figure().savefig(\"english_wikipedia_traffic.png\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
cb978c2232296e6be55e061af6ad10ec0daea62b
16,616
ipynb
Jupyter Notebook
examples/Notebooks/flopy3_nwt_options.ipynb
andrewcalderwood/flopy
0432ce96a0a5eec4d20adb4d384505632a2db3dc
[ "CC0-1.0", "BSD-3-Clause" ]
2
2019-12-17T16:00:44.000Z
2021-10-08T00:56:18.000Z
examples/Notebooks/flopy3_nwt_options.ipynb
jlarsen-usgs/flopy
6db70ac0b3da282e2e697909368d3204747bf2ca
[ "CC0-1.0", "BSD-3-Clause" ]
1
2018-09-28T17:08:51.000Z
2018-10-02T22:15:28.000Z
examples/Notebooks/flopy3_nwt_options.ipynb
jlarsen-usgs/flopy
6db70ac0b3da282e2e697909368d3204747bf2ca
[ "CC0-1.0", "BSD-3-Clause" ]
1
2022-02-06T20:05:17.000Z
2022-02-06T20:05:17.000Z
22.125166
235
0.492056
[ [ [ "# Working with MODFLOW-NWT v 1.1 option blocks\n\nIn MODFLOW-NWT an option block is present for the WEL file, UZF file, and SFR file. This block takes keyword arguments that are supplied in an option line in other versions of MODFLOW. \n\nThe `OptionBlock` class was created to provide combatibility with the MODFLOW-NWT option block and allow the user to easily edit values within the option block", "_____no_output_____" ] ], [ [ "import os\nimport sys\nimport platform\n\ntry:\n import flopy\nexcept:\n fpth = os.path.abspath(os.path.join(\"..\", \"..\"))\n sys.path.append(fpth)\n import flopy\n\nfrom flopy.utils import OptionBlock\n\nprint(sys.version)\nprint(\"flopy version: {}\".format(flopy.__version__))", "3.8.11 (default, Aug 6 2021, 08:56:27) \n[Clang 10.0.0 ]\nflopy version: 3.3.5\n" ], [ "load_ws = os.path.join(\"..\", \"data\", \"options\", \"sagehen\")\nmodel_ws = os.path.join(\"temp\", \"nwt_options\", \"output\")", "_____no_output_____" ] ], [ [ "## Loading a MODFLOW-NWT model that has option block options\n\nIt is critical to set the `version` flag in `flopy.modflow.Modflow.load()` to `version='mfnwt'` \n\nWe are going to load a modified version of the Sagehen test problem from GSFLOW to illustrate compatibility", "_____no_output_____" ] ], [ [ "mfexe = \"mfnwt\"\n\nif platform.system() == \"Windows\":\n mfexe += \".exe\"\n\nml = flopy.modflow.Modflow.load(\n \"sagehen.nam\", model_ws=load_ws, exe_name=mfexe, version=\"mfnwt\"\n)\nml.change_model_ws(new_pth=model_ws)\nml.write_input()\n\nsuccess, buff = ml.run_model(silent=True)\nif not success:\n print(\"Something bad happened.\")", " loading iuzfbnd array...\n loading vks array...\n loading eps array...\n loading thts array...\nstress period 1:\n loading finf array...\nstress period 2:\n" ] ], [ [ "## Let's look at the options attribute of the UZF object\n\nThe `uzf.options` attribute is an `OptionBlock` object. The representation of this object is the option block that will be written to output, which allows the user to easily check to make sure the block has the options they want.", "_____no_output_____" ] ], [ [ "uzf = ml.get_package(\"UZF\")\nuzf.options", "_____no_output_____" ] ], [ [ "The `OptionBlock` object also has attributes which correspond to the option names listed in the online guide to modflow\n\nThe user can call and edit the options within the option block", "_____no_output_____" ] ], [ [ "print(uzf.options.nosurfleak)\nprint(uzf.options.savefinf)", "True\nTrue\n" ], [ "uzf.options.etsquare = False\nuzf.options", "_____no_output_____" ], [ "uzf.options.etsquare = True\nuzf.options", "_____no_output_____" ] ], [ [ "### The user can also see the single line representation of the options", "_____no_output_____" ] ], [ [ "uzf.options.single_line_options", "_____no_output_____" ] ], [ [ "### And the user can easily change to single line options writing", "_____no_output_____" ] ], [ [ "uzf.options.block = False\n\n# write out only the uzf file\nuzf_name = \"uzf_opt.uzf\"\nuzf.write_file(os.path.join(model_ws, uzf_name))", "_____no_output_____" ] ], [ [ "Now let's examine the first few lines of the new UZF file", "_____no_output_____" ] ], [ [ "f = open(os.path.join(model_ws, uzf_name))\nfor ix, line in enumerate(f):\n if ix == 3:\n break\n else:\n print(line)", "# UZF package for MODFLOW-NWT generated by Flopy 3.3.5\n\nNOSURFLEAK ETSQUARE 0.2 SAVEFINF\n\n 3 1 0 0 0 0 15 100 4 1.000000E+00 #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NTRAIL NSETS NUZGAGES \n\n" ] ], [ [ "And let's load the new UZF file", "_____no_output_____" ] ], [ [ "uzf2 = flopy.modflow.ModflowUzf1.load(\n os.path.join(model_ws, uzf_name), ml, check=False\n)", " loading iuzfbnd array...\n loading vks array...\n loading eps array...\n loading thts array...\nstress period 1:\n loading finf array...\nstress period 2:\n" ] ], [ [ "### Now we can look at the options object, and check if it's block or line format\n\n`block=False` indicates that options will be written as line format", "_____no_output_____" ] ], [ [ "print(uzf2.options)\nprint(uzf2.options.block)", "OPTIONS\nNOSURFLEAK\nETSQUARE 0.2\nSAVEFINF\nEND\n\nFalse\n" ] ], [ [ "### Finally we can convert back to block format", "_____no_output_____" ] ], [ [ "uzf2.options.block = True\nuzf2.write_file(os.path.join(model_ws, uzf_name))\nml.remove_package(\"UZF\")\n\nuzf3 = flopy.modflow.ModflowUzf1.load(\n os.path.join(model_ws, uzf_name), ml, check=False\n)\nprint(\"\\n\")\nprint(uzf3.options)\nprint(uzf3.options.block)", " loading iuzfbnd array...\n loading vks array...\n loading eps array...\n loading thts array...\n" ] ], [ [ "## We can also look at the WEL object ", "_____no_output_____" ] ], [ [ "wel = ml.get_package(\"WEL\")\nwel.options", "_____no_output_____" ] ], [ [ "Let's write this out as a single line option block and examine the first few lines", "_____no_output_____" ] ], [ [ "wel_name = \"wel_opt.wel\"\nwel.options.block = False\n\nwel.write_file(os.path.join(model_ws, wel_name))\n\n\nf = open(os.path.join(model_ws, wel_name))\nfor ix, line in enumerate(f):\n if ix == 4:\n break\n else:\n print(line)", "# WEL package for MODFLOW-NWT generated by Flopy 3.3.5\n\n 5 0 NOPRINT \n\nSPECIFY 0.1 90\n\n 5 0 # stress period 1\n\n" ] ], [ [ "And we can load the new single line options WEL file and confirm that it is being read as an option line", "_____no_output_____" ] ], [ [ "ml.remove_package(\"WEL\")\nwel2 = flopy.modflow.ModflowWel.load(\n os.path.join(model_ws, wel_name), ml, nper=ml.nper, check=False\n)\n\nwel2.options\nwel2.options.block", "_____no_output_____" ] ], [ [ "# Building an OptionBlock from scratch\n\nThe user can also build an `OptionBlock` object from scratch to add to a `ModflowSfr2`, `ModflowUzf1`, or `ModflowWel` file.\n\nThe `OptionBlock` class has two required parameters and one optional parameter\n\n`option_line`: a one line, string based representation of the options\n\n`package`: a modflow package object\n\n`block`: boolean flag for line based or block based options", "_____no_output_____" ] ], [ [ "opt_line = \"specify 0.1 20\"\noptions = OptionBlock(opt_line, flopy.modflow.ModflowWel, block=True)\noptions", "_____no_output_____" ] ], [ [ "from here we can set the noprint flag by using `options.noprint`", "_____no_output_____" ] ], [ [ "options.noprint = True", "_____no_output_____" ] ], [ [ "and the user can also add auxillary variables by using `options.auxillary`", "_____no_output_____" ] ], [ [ "options.auxillary = [\"aux\", \"iface\"]", "_____no_output_____" ] ], [ [ "### Now we can create a new wel file using this `OptionBlock`\n\nand write it to output", "_____no_output_____" ] ], [ [ "wel3 = flopy.modflow.ModflowWel(\n ml,\n stress_period_data=wel.stress_period_data,\n options=options,\n unitnumber=99,\n)\n\nwel3.write_file(os.path.join(model_ws, wel_name))", "_____no_output_____" ] ], [ [ "And now let's examine the first few lines of the file", "_____no_output_____" ] ], [ [ "f = open(os.path.join(model_ws, wel_name))\nfor ix, line in enumerate(f):\n if ix == 8:\n break\n else:\n print(line)", "# WEL package for MODFLOW-NWT generated by Flopy 3.3.5\n\nOPTIONS\n\nSPECIFY 0.1 20\n\nEND\n\n 5 0 NOPRINT AUX IFACE\n\n 5 0 # stress period 1\n\n 1 35 12 20.0\n\n 1 36 13 21.0\n\n" ] ], [ [ "We can see that everything that the OptionBlock class writes out options in the correct location.", "_____no_output_____" ], [ "### The user can also switch the options over to option line style and write out the output too!", "_____no_output_____" ] ], [ [ "wel3.options.block = False\nwel3.write_file(os.path.join(model_ws, wel_name))\n\nf = open(os.path.join(model_ws, wel_name))\nfor ix, line in enumerate(f):\n if ix == 6:\n break\n else:\n print(line)", "# WEL package for MODFLOW-NWT generated by Flopy 3.3.5\n\n 5 0 NOPRINT AUX IFACE\n\nSPECIFY 0.1 20\n\n 5 0 # stress period 1\n\n 1 35 12 20.0\n\n 1 36 13 21.0\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb979519b2de5fc25eb1e6b6d04e0a6dbd819ebf
41,670
ipynb
Jupyter Notebook
StreetEYE Pure LSTM-Overfit.ipynb
druce/deeplearning20170805
6c3d4aecbb085df8d3b9a2cde31f0e160345aa62
[ "MIT" ]
null
null
null
StreetEYE Pure LSTM-Overfit.ipynb
druce/deeplearning20170805
6c3d4aecbb085df8d3b9a2cde31f0e160345aa62
[ "MIT" ]
null
null
null
StreetEYE Pure LSTM-Overfit.ipynb
druce/deeplearning20170805
6c3d4aecbb085df8d3b9a2cde31f0e160345aa62
[ "MIT" ]
null
null
null
49.725537
6,382
0.551308
[ [ [ "from __future__ import print_function\nimport collections\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport sklearn\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.layers import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom keras.regularizers import L1L2\nfrom sklearn.model_selection import train_test_split\n\n# fix random seed for reproducibility\nnp.random.seed(7)\n\nfrom pprint import pprint\n\n", "Using TensorFlow backend.\n" ], [ "# Global config variables \nmodel_name = \"streeteye_lstm\"\n#data_file = \"lstm_dump_test.txt\"\ndata_file = \"dump_2017_words.txt\"\n\ncheckpoint_dir = \"/home/ubuntu/mount/Notebooks/checkpoints\"\ntensorboard_dir =\"/home/ubuntu/mount/Notebooks/tensorboard\"\n", "_____no_output_____" ], [ "############################################################\n# 1. load data\n############################################################\n\n# load dataset\nprint(\"Loading data...\")\ndata=[]\ny=[]\n\n# count words\nc = collections.Counter()\n\nwith open(data_file, \"r\") as infile:\n for line in infile:\n l = line.rstrip('\\n').split(\",\")\n label = l.pop(0)\n # skip empty headlines\n if len(l[0]) == 0:\n continue\n if '' in l:\n l = [w for w in l if w]\n data.append(l)\n y.append(label)\n c.update(l)\n \nprint(\"Loaded data.\")\n", "Loading data...\nLoaded data.\n" ], [ "# create a list of top words \nvocabulary_size = 10000 # set this to have ~20 for least popular\ncount = [['UNK', -1]]\ncount.extend(c.most_common(vocabulary_size - 1))\nprint(count[:10])\nprint(count[-10:])", "[['UNK', -1], ('domain_otherdomain', 119708), ('subsource_othersubsource', 47862), ('trump', 21141), ('with', 10761), ('domain_youtube.com', 8908), ('us', 8434), ('2017', 7862), ('from', 7768), ('subsource_memeorandum', 7712)]\n[('hazard', 17), ('alexei', 17), ('molly', 17), ('expel', 17), ('champ', 17), ('admiral', 17), ('conversational', 17), ('memorable', 17), ('wharton', 17), ('torn', 17)]\n" ], [ "dictionary = dict()\n# map words into a dict of ints\nfor word, _ in count:\n dictionary[word] = len(dictionary)\n\ndata_embeddings=[]\nunk_count = 0\n\nfor obs in data:\n embedlist = []\n for word in obs:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count = unk_count + 1\n embedlist.append(index)\n data_embeddings.append(embedlist)\n \ncount[0][1] = unk_count\nreverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))", "_____no_output_____" ], [ "print(dictionary['trump'])\nprint(reverse_dictionary[3])", "3\ntrump\n" ], [ "%matplotlib inline\nls = (map(len, data_embeddings))\npd.DataFrame(ls).hist()\n", "_____no_output_____" ], [ "MAX_LENGTH = 120\nX = sequence.pad_sequences(data_embeddings, maxlen=MAX_LENGTH)\nX[0]\nX.shape", "_____no_output_____" ], [ "y=np.array(np.float32(y))\n\ny=y.reshape((y.shape[0],1))\nprint(y.shape)\nnum_labels=1\n\nnum_obs, num_features = X.shape\nprint(\"Observations: %d\\nFeatures: %d\" % (num_obs, num_features))\n\n# split into training, xval, test, 60/20/20\nprint(\"Split into training, temp\")\nX_train, X_temp, y_train, y_temp = train_test_split(X, y, test_size=0.4)\nprint(\"Split into xval, test\")\nX_xval, X_test, y_xval, y_test = train_test_split(X_temp, y_temp, test_size=0.5)\n", "(218419, 1)\nObservations: 218419\nFeatures: 120\nSplit into training, temp\nSplit into xval, test\n" ], [ "\nprint(\"Training set\")\nprint(X_train.shape)\n\nprint(\"Xval set\")\nprint(X_xval.shape)\n\nprint(\"Test set\")\nprint(X_test.shape)\n\nnum_training_samples = X_train.shape[0]\nnum_xval_samples = X_xval.shape[0]\nnum_test_samples = X_test.shape[0]\n\nprint (\"\\nTraining observations: %d \\nXval observations: %d \\nTest observations: %d\\n\" % (num_training_samples, num_xval_samples, num_test_samples))\n", "Training set\n(131051, 120)\nXval set\n(43684, 120)\nTest set\n(43684, 120)\n\nTraining observations: 131051 \nXval observations: 43684 \nTest observations: 43684\n\n" ], [ "# initialize embeddings to Google vals\npkl_file = open('embeddings.pkl', 'rb')\nembeddings_dict, embeddings_reverse_dict, embeddings_data = pickle.load(pkl_file)", "_____no_output_____" ], [ "EMBEDDING_DIM=300\n\nembedding_matrix = np.zeros((len(dictionary) + 1, EMBEDDING_DIM))\n\ncount = 0\nfor word, i in dictionary.items():\n #print(word)\n embed_i = embeddings_dict.get(word)\n if embed_i is not None:\n embedding_vector = embeddings_data[i]\n count +=1\n embedding_matrix[i] = embedding_vector\n \nprint(\"initialized %d embeddings\" % count)", "initialized 10000 embeddings\n" ], [ "# function to generate model\n\ndef create_model(lstm_size=30, lstm_reg_penalty=0.0, sigmoid_dropout=(1.0/3.0), sigmoid_reg_penalty=0.0001):\n # create model\n model = Sequential()\n\n model.add(Embedding(len(dictionary) + 1, \n embedding_vector_length, \n weights=[embedding_matrix],\n input_length=MAX_LENGTH,\n trainable=False))\n \n # LSTM with lstm_size units\n model.add(LSTM(lstm_size,\n kernel_regularizer=L1L2(l1=lstm_reg_penalty, l2=lstm_reg_penalty)))\n model.add(Dropout(sigmoid_dropout))\n model.add(Dense(1, \n activation='sigmoid',\n kernel_initializer='TruncatedNormal', \n kernel_regularizer=L1L2(l1=sigmoid_reg_penalty, l2=sigmoid_reg_penalty)))\n model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n print(model.summary())\n return model\n", "_____no_output_____" ], [ "def selectThreshold (logits, labels, beta=(2.0/3)):\n # return threshold, f-score that yields best F-score\n # predict using true if >= threshold\n\n precision, recall, thresholds = sklearn.metrics.precision_recall_curve(labels, logits)\n bb = beta**2\n f1_scores = (1 + bb) * precision * recall / (bb * precision + recall)\n best_index = np.argmax(f1_scores)\n best_threshold = thresholds[best_index]\n best_score = f1_scores[best_index]\n return (best_threshold, best_score)", "_____no_output_____" ], [ "# create the model\nembedding_vector_length = EMBEDDING_DIM\n\nfor sig_reg_penalty in [0.00003]:\n for dropout in [0.5]:\n for lstm_units in [16,]:\n #, 32, 64]:\n for lstm_reg_penalty in [0.00000,]:\n #0.000001, 0.000003, 0.00001, 0.00003]:\n model = create_model(lstm_size=lstm_units, \n lstm_reg_penalty=lstm_reg_penalty, \n sigmoid_dropout=dropout, \n sigmoid_reg_penalty=sig_reg_penalty)\n print (\"LSTM units %d\" % lstm_units)\n print (\"LSTM reg_penalty %.8f\" % lstm_reg_penalty)\n print (\"Sigmoid dropout %.4f\" % dropout)\n print (\"Sigmoid reg_penalty %.8f\" % sig_reg_penalty)\n \n model.fit(X_train, y_train, validation_data=(X_xval, y_xval), epochs=100, batch_size=128)\n \n y_train_prob = model.predict(X_train)\n \n beta=(2.0/3.0) # penalize false positives more than false negatives\n thresh, score = selectThreshold(y_train_prob, y_train, beta=beta)\n y_train_pred = y_train_prob >= thresh\n \n print(\"Train Accuracy %.3f, Train F1 %.3f, f_score %.3f (beta %.3f)\" % \n (sklearn.metrics.accuracy_score(y_train_pred, y_train), \n sklearn.metrics.f1_score(y_train_pred, y_train),\n score, beta))\n \n print(sklearn.metrics.confusion_matrix(y_train_pred, y_train))\n\n y_xval_prob = model.predict(X_xval)\n \n thresh, score = selectThreshold(y_xval_prob, y_xval, beta=beta)\n y_xval_pred = y_xval_prob >= thresh\n \n print (\"LSTM units %d\" % lstm_units)\n print (\"LSTM reg_penalty %.8f\" % lstm_reg_penalty)\n print (\"Sigmoid dropout %.4f\" % dropout)\n print (\"Sigmoid reg_penalty %.8f\" % sig_reg_penalty) \n\n print(\"Xval Accuracy %.3f, Xval F1 %.3f, f_score %.3f (beta %.3f)\" % \n (sklearn.metrics.accuracy_score(y_xval_pred, y_xval), \n sklearn.metrics.f1_score(y_xval_pred, y_xval),\n score, beta))\n \n confusion_matrix = sklearn.metrics.confusion_matrix(y_xval_pred, y_xval)\n print(confusion_matrix)\n false_positive = confusion_matrix[1][0]\n false_negative = confusion_matrix[0][1]\n raw_score = (2.0*false_positive + false_negative) / np.sum(confusion_matrix)\n \n print (\"Raw score %f\" % raw_score)\n\n \n", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nembedding_22 (Embedding) (None, 120, 300) 3000300 \n_________________________________________________________________\nlstm_22 (LSTM) (None, 16) 20288 \n_________________________________________________________________\ndropout_22 (Dropout) (None, 16) 0 \n_________________________________________________________________\ndense_22 (Dense) (None, 1) 17 \n=================================================================\nTotal params: 3,020,605.0\nTrainable params: 20,305.0\nNon-trainable params: 3,000,300.0\n_________________________________________________________________\nNone\nLSTM units 16\nLSTM reg_penalty 0.00000000\nSigmoid dropout 0.5000\nSigmoid reg_penalty 0.00003000\nTrain on 131051 samples, validate on 43684 samples\nEpoch 1/100\n131051/131051 [==============================] - 229s - loss: 0.1207 - acc: 0.9755 - val_loss: 0.0596 - val_acc: 0.9803\nEpoch 2/100\n131051/131051 [==============================] - 319s - loss: 0.0610 - acc: 0.9813 - val_loss: 0.0534 - val_acc: 0.9825\nEpoch 3/100\n131051/131051 [==============================] - 318s - loss: 0.0549 - acc: 0.9827 - val_loss: 0.0509 - val_acc: 0.9831\nEpoch 4/100\n131051/131051 [==============================] - 318s - loss: 0.0522 - acc: 0.9838 - val_loss: 0.0500 - val_acc: 0.9832\nEpoch 5/100\n131051/131051 [==============================] - 293s - loss: 0.0504 - acc: 0.9841 - val_loss: 0.0505 - val_acc: 0.9835\nEpoch 6/100\n131051/131051 [==============================] - 264s - loss: 0.0491 - acc: 0.9845 - val_loss: 0.0483 - val_acc: 0.9837\nEpoch 7/100\n131051/131051 [==============================] - 318s - loss: 0.0479 - acc: 0.9849 - val_loss: 0.0484 - val_acc: 0.9835\nEpoch 8/100\n131051/131051 [==============================] - 318s - loss: 0.0472 - acc: 0.9852 - val_loss: 0.0482 - val_acc: 0.9836\nEpoch 9/100\n131051/131051 [==============================] - 316s - loss: 0.0461 - acc: 0.9852 - val_loss: 0.0482 - val_acc: 0.9837\nEpoch 10/100\n131051/131051 [==============================] - 295s - loss: 0.0450 - acc: 0.9855 - val_loss: 0.0485 - val_acc: 0.9835\nEpoch 11/100\n131051/131051 [==============================] - 196s - loss: 0.0436 - acc: 0.9857 - val_loss: 0.0491 - val_acc: 0.9833\nEpoch 12/100\n131051/131051 [==============================] - 182s - loss: 0.0434 - acc: 0.9861 - val_loss: 0.0487 - val_acc: 0.9840\nEpoch 13/100\n131051/131051 [==============================] - 181s - loss: 0.0424 - acc: 0.9861 - val_loss: 0.0484 - val_acc: 0.9837\nEpoch 14/100\n131051/131051 [==============================] - 182s - loss: 0.0417 - acc: 0.9862 - val_loss: 0.0498 - val_acc: 0.9838\nEpoch 15/100\n131051/131051 [==============================] - 181s - loss: 0.0409 - acc: 0.9863 - val_loss: 0.0488 - val_acc: 0.9838\nEpoch 16/100\n131051/131051 [==============================] - 182s - loss: 0.0399 - acc: 0.9868 - val_loss: 0.0505 - val_acc: 0.9840\nEpoch 17/100\n131051/131051 [==============================] - 182s - loss: 0.0388 - acc: 0.9872 - val_loss: 0.0494 - val_acc: 0.9833\nEpoch 18/100\n131051/131051 [==============================] - 182s - loss: 0.0385 - acc: 0.9874 - val_loss: 0.0500 - val_acc: 0.9836\nEpoch 19/100\n131051/131051 [==============================] - 182s - loss: 0.0378 - acc: 0.9874 - val_loss: 0.0515 - val_acc: 0.9836\nEpoch 20/100\n131051/131051 [==============================] - 181s - loss: 0.0370 - acc: 0.9880 - val_loss: 0.0534 - val_acc: 0.9828\nEpoch 21/100\n131051/131051 [==============================] - 181s - loss: 0.0362 - acc: 0.9880 - val_loss: 0.0573 - val_acc: 0.9839\nEpoch 22/100\n131051/131051 [==============================] - 182s - loss: 0.0363 - acc: 0.9879 - val_loss: 0.0534 - val_acc: 0.9826\nEpoch 23/100\n131051/131051 [==============================] - 181s - loss: 0.0350 - acc: 0.9883 - val_loss: 0.0532 - val_acc: 0.9832\nEpoch 24/100\n131051/131051 [==============================] - 182s - loss: 0.0344 - acc: 0.9883 - val_loss: 0.0644 - val_acc: 0.9814\nEpoch 25/100\n131051/131051 [==============================] - 182s - loss: 0.0336 - acc: 0.9889 - val_loss: 0.0567 - val_acc: 0.9835\nEpoch 26/100\n131051/131051 [==============================] - 181s - loss: 0.0329 - acc: 0.9889 - val_loss: 0.0559 - val_acc: 0.9835\nEpoch 27/100\n131051/131051 [==============================] - 181s - loss: 0.0321 - acc: 0.9891 - val_loss: 0.0550 - val_acc: 0.9831\nEpoch 28/100\n131051/131051 [==============================] - 182s - loss: 0.0313 - acc: 0.9894 - val_loss: 0.0593 - val_acc: 0.9832\nEpoch 29/100\n131051/131051 [==============================] - 182s - loss: 0.0306 - acc: 0.9897 - val_loss: 0.0592 - val_acc: 0.9822\nEpoch 30/100\n131051/131051 [==============================] - 182s - loss: 0.0303 - acc: 0.9898 - val_loss: 0.0619 - val_acc: 0.9823\nEpoch 31/100\n131051/131051 [==============================] - 182s - loss: 0.0295 - acc: 0.9903 - val_loss: 0.0631 - val_acc: 0.9832\nEpoch 32/100\n131051/131051 [==============================] - 182s - loss: 0.0297 - acc: 0.9899 - val_loss: 0.0636 - val_acc: 0.9826\nEpoch 33/100\n131051/131051 [==============================] - 181s - loss: 0.0277 - acc: 0.9908 - val_loss: 0.0654 - val_acc: 0.9822\nEpoch 34/100\n131051/131051 [==============================] - 182s - loss: 0.0274 - acc: 0.9910 - val_loss: 0.0674 - val_acc: 0.9831\nEpoch 35/100\n131051/131051 [==============================] - 181s - loss: 0.0262 - acc: 0.9915 - val_loss: 0.0661 - val_acc: 0.9830\nEpoch 36/100\n131051/131051 [==============================] - 181s - loss: 0.0261 - acc: 0.9914 - val_loss: 0.0670 - val_acc: 0.9811\nEpoch 37/100\n131051/131051 [==============================] - 181s - loss: 0.0255 - acc: 0.9916 - val_loss: 0.0730 - val_acc: 0.9828\nEpoch 38/100\n131051/131051 [==============================] - 181s - loss: 0.0255 - acc: 0.9917 - val_loss: 0.0717 - val_acc: 0.9820\nEpoch 39/100\n131051/131051 [==============================] - 182s - loss: 0.0244 - acc: 0.9923 - val_loss: 0.0777 - val_acc: 0.9830\nEpoch 40/100\n131051/131051 [==============================] - 183s - loss: 0.0241 - acc: 0.9918 - val_loss: 0.0740 - val_acc: 0.9815\nEpoch 41/100\n131051/131051 [==============================] - 181s - loss: 0.0234 - acc: 0.9923 - val_loss: 0.0710 - val_acc: 0.9821\nEpoch 42/100\n131051/131051 [==============================] - 181s - loss: 0.0225 - acc: 0.9928 - val_loss: 0.0853 - val_acc: 0.9824\nEpoch 43/100\n131051/131051 [==============================] - 181s - loss: 0.0248 - acc: 0.9917 - val_loss: 0.0753 - val_acc: 0.9825\nEpoch 44/100\n131051/131051 [==============================] - 181s - loss: 0.0217 - acc: 0.9932 - val_loss: 0.0762 - val_acc: 0.9814\nEpoch 45/100\n131051/131051 [==============================] - 181s - loss: 0.0210 - acc: 0.9931 - val_loss: 0.0858 - val_acc: 0.9807\nEpoch 46/100\n131051/131051 [==============================] - 181s - loss: 0.0211 - acc: 0.9935 - val_loss: 0.0796 - val_acc: 0.9820\nEpoch 47/100\n131051/131051 [==============================] - 182s - loss: 0.0202 - acc: 0.9938 - val_loss: 0.0866 - val_acc: 0.9813\nEpoch 48/100\n131051/131051 [==============================] - 181s - loss: 0.0198 - acc: 0.9936 - val_loss: 0.0862 - val_acc: 0.9810\nEpoch 49/100\n131051/131051 [==============================] - 181s - loss: 0.0193 - acc: 0.9939 - val_loss: 0.0921 - val_acc: 0.9814\nEpoch 50/100\n131051/131051 [==============================] - 182s - loss: 0.0196 - acc: 0.9937 - val_loss: 0.0879 - val_acc: 0.9807\nEpoch 51/100\n131051/131051 [==============================] - 182s - loss: 0.0186 - acc: 0.9940 - val_loss: 0.0920 - val_acc: 0.9811\nEpoch 52/100\n131051/131051 [==============================] - 181s - loss: 0.0182 - acc: 0.9944 - val_loss: 0.0922 - val_acc: 0.9820\nEpoch 53/100\n131051/131051 [==============================] - 182s - loss: 0.0174 - acc: 0.9948 - val_loss: 0.0985 - val_acc: 0.9815\nEpoch 54/100\n" ], [ "y_train_prob = model.predict(X_train)\nbeta=(2.0/3.0) # penalize false positives more than false negatives\n\nthresh, score = selectThreshold(y_train_prob, y_train, beta=beta)\ny_train_pred = y_train_prob >= thresh\nprint(\"Train Accuracy %.3f, Train F1 %.3f, f_score %.3f (beta %.3f)\" % \n (sklearn.metrics.accuracy_score(y_train_pred, y_train), \n sklearn.metrics.f1_score(y_train_pred, y_train),\n score, beta))\n \nprint(sklearn.metrics.confusion_matrix(y_train_pred, y_train))\n ", "Train Accuracy 0.996, Train F1 0.931, f_score 0.944 (beta 0.667)\n[[127340 358]\n [ 120 3233]]\n" ], [ " y_xval_prob = model.predict(X_xval)\n \n thresh, score = selectThreshold(y_xval_prob, y_xval, beta=beta)\n y_xval_pred = y_xval_prob >= thresh\n\n print(\"Xval Accuracy %.3f, Xval F1 %.3f, f_score %.3f (beta %.3f)\" % \n (sklearn.metrics.accuracy_score(y_xval_pred, y_xval), \n sklearn.metrics.f1_score(y_xval_pred, y_xval),\n score, beta))\n \n print(sklearn.metrics.confusion_matrix(y_xval_pred, y_xval))\n\n ", "Xval Accuracy 0.971, Xval F1 0.000, f_score nan (beta 0.667)\n[[42410 1273]\n [ 1 0]]\n" ], [ "y_test_prob = model.predict(X_test)\nbeta=(2.0/3.0) # penalize false positives more than false negatives\n\ny_test_pred = y_test_prob >= thresh\nprint(\"Test Accuracy %.3f, Test F1 %.3f, f_score %.3f (beta %.3f)\" % \n (sklearn.metrics.accuracy_score(y_test_pred, y_test), \n sklearn.metrics.f1_score(y_test_pred, y_test),\n score, beta))\n \nprint(sklearn.metrics.confusion_matrix(y_test_pred, y_test))\n", "Test Accuracy 0.971, Test F1 0.002, f_score nan (beta 0.667)\n[[42437 1246]\n [ 0 1]]\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb97963741e5192d9c62079d2611f94a10eceb98
137,582
ipynb
Jupyter Notebook
*Python_Basics/Matplotlib/Matplotlib.ipynb
jasjung/Machine_Learnin
d2272a529b94935bd9dc25fe96871dfbd3e582ae
[ "MIT" ]
1
2020-06-08T10:58:36.000Z
2020-06-08T10:58:36.000Z
*Python_Basics/Matplotlib/Matplotlib.ipynb
jasjung/Machine_Learnin
d2272a529b94935bd9dc25fe96871dfbd3e582ae
[ "MIT" ]
null
null
null
*Python_Basics/Matplotlib/Matplotlib.ipynb
jasjung/Machine_Learnin
d2272a529b94935bd9dc25fe96871dfbd3e582ae
[ "MIT" ]
null
null
null
172.192741
34,228
0.902153
[ [ [ "<h1>Table of Contents<span class=\"tocSkip\"></span></h1>\n<div class=\"toc\"><ul class=\"toc-item\"><li><span><a href=\"#Matplotlib\" data-toc-modified-id=\"Matplotlib-1\"><span class=\"toc-item-num\">1&nbsp;&nbsp;</span>Matplotlib</a></span><ul class=\"toc-item\"><li><span><a href=\"#Customization\" data-toc-modified-id=\"Customization-1.1\"><span class=\"toc-item-num\">1.1&nbsp;&nbsp;</span>Customization</a></span></li></ul></li><li><span><a href=\"#subplot\" data-toc-modified-id=\"subplot-2\"><span class=\"toc-item-num\">2&nbsp;&nbsp;</span>subplot</a></span></li></ul></div>", "_____no_output_____" ], [ "# Intermediate Python for Data Science\n## Matplotlib\n\n- source: https://www.datacamp.com/courses/intermediate-python-for-data-science\n- color code: https://matplotlib.org/examples/color/named_colors.html", "_____no_output_____" ] ], [ [ "# Quick cheat sheet \n\n# to change plot size \nplt.figure(figsize=(20,8))", "_____no_output_____" ], [ "'''Line Plot'''\n# Print the last item from years and populations\nprint(year[-1])\nprint(pop[-1])\n\n# Import matplotlib.pyplot as plt\nimport matplotlib.pyplot as plt\n\n# Make a line plot: year on the x-axis, pop on the y-axis\nplt.plot(year, pop)\n\n# Display the plot with plt.show()\nplt.show()", "_____no_output_____" ], [ "# Print the last item of gdp_cap and life_exp\nprint(gdp_cap[-1])\nprint(life_exp[-1])\n\n# Make a line plot, gdp_cap on the x-axis, life_exp on the y-axis\nplt.plot(gdp_cap, life_exp)\n\n# Display the plot\nplt.show()", "_____no_output_____" ], [ "'''Scatter Plot'''\n# Change the line plot below to a scatter plot\nplt.scatter(gdp_cap, life_exp)\n\n# Put the x-axis on a logarithmic scale\nplt.xscale('log')\n\n# Show plot\nplt.show()", "_____no_output_____" ], [ "'''Scatter Plot'''\n# Import package\nimport matplotlib.pyplot as plt\n\n# Build Scatter plot\nplt.scatter(pop, life_exp)\n\n# Show plot\nplt.show()", "_____no_output_____" ], [ "'''Histogram'''\n# Create histogram of life_exp data\nplt.hist(life_exp)\n\n# Display histogram\nplt.show()", "_____no_output_____" ], [ "'''Histogram bins'''\n# Build histogram with 5 bins\nplt.hist(life_exp, bins = 5)\n\n# Show and clear plot\nplt.show()\nplt.clf() # cleans it up again so you can start afresh.\n\n# Build histogram with 20 bins\nplt.hist(life_exp, bins = 20)\n\n# Show and clear plot again\nplt.show()\nplt.clf()", "_____no_output_____" ], [ "'''Histogram compare'''\n# Histogram of life_exp, 15 bins\nplt.hist(life_exp, bins = 15)\n\n# Show and clear plot\nplt.show()\nplt.clf()\n\n# Histogram of life_exp1950, 15 bins\nplt.hist(life_exp1950, bins = 15)\n\n# Show and clear plot again\nplt.show()\nplt.clf()\n", "_____no_output_____" ] ], [ [ "### Customization", "_____no_output_____" ] ], [ [ "'''Label'''\n# Basic scatter plot, log scale\nplt.scatter(gdp_cap, life_exp)\nplt.xscale('log') \n\n# Strings\nxlab = 'GDP per Capita [in USD]'\nylab = 'Life Expectancy [in years]'\ntitle = 'World Development in 2007'\n\n# Add axis labels\nplt.xlabel(xlab)\nplt.ylabel(ylab)\n\n# Add title\nplt.title(title)\n\n# After customizing, display the plot\nplt.show()", "_____no_output_____" ], [ "'''Ticks'''\n# Scatter plot\nplt.scatter(gdp_cap, life_exp)\n\n# Previous customizations\nplt.xscale('log') \nplt.xlabel('GDP per Capita [in USD]')\nplt.ylabel('Life Expectancy [in years]')\nplt.title('World Development in 2007')\n\n# Definition of tick_val and tick_lab\ntick_val = [1000,10000,100000]\ntick_lab = ['1k','10k','100k']\n\n# Adapt the ticks on the x-axis\nplt.xticks(tick_val, tick_lab)\n\n# After customizing, display the plot\nplt.show()", "_____no_output_____" ], [ "'''Sizes\nWouldn't it be nice if the size of the dots corresponds to the population?\n'''\n# Import numpy as np\nimport numpy as np\n\n# Store pop as a numpy array: np_pop\nnp_pop = np.array(pop)\n\n# Double np_pop\nnp_pop = np_pop * 2\n\n# Update: set s argument to np_pop # s is size \nplt.scatter(gdp_cap, life_exp, s = np_pop) \n\n# Previous customizations\nplt.xscale('log') \nplt.xlabel('GDP per Capita [in USD]')\nplt.ylabel('Life Expectancy [in years]')\nplt.title('World Development in 2007')\nplt.xticks([1000, 10000, 100000],['1k', '10k', '100k'])\n\n# Display the plot\nplt.show()", "_____no_output_____" ], [ "'''Colors\n\nThe next step is making the plot more colorful! To do this, a list col has been created for you. It's a list with a color for each corresponding country, depending on the continent the country is part of.\n\nHow did we make the list col you ask? The Gapminder data contains a list continent with the continent each country belongs to. A dictionary is constructed that maps continents onto colors:\n'''\n\ndict = {\n 'Asia':'red',\n 'Europe':'green',\n 'Africa':'blue',\n 'Americas':'yellow',\n 'Oceania':'black'\n}\n\n# c = color, alpha = opacity \n# Specify c and alpha inside plt.scatter()\nplt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)\n\n# Previous customizations\nplt.xscale('log') \nplt.xlabel('GDP per Capita [in USD]')\nplt.ylabel('Life Expectancy [in years]')\nplt.title('World Development in 2007')\nplt.xticks([1000,10000,100000], ['1k','10k','100k'])\n\n# Show the plot\nplt.show()", "_____no_output_____" ], [ "'''Additional Customizations'''\n# Scatter plot\nplt.scatter(x = gdp_cap, y = life_exp, s = np.array(pop) * 2, c = col, alpha = 0.8)\n\n# Previous customizations\nplt.xscale('log') \nplt.xlabel('GDP per Capita [in USD]')\nplt.ylabel('Life Expectancy [in years]')\nplt.title('World Development in 2007')\nplt.xticks([1000,10000,100000], ['1k','10k','100k'])\n\n# Additional customizations\nplt.text(1550, 71, 'India')\nplt.text(5700, 80, 'China')\n\n# Add grid() call\nplt.grid(True)\n\n# Show the plot\nplt.show()", "_____no_output_____" ], [ "from sklearn.datasets import load_iris\ndata = load_iris()\ndata.target[[10, 25, 50]]\nlist(data.target_names)", "_____no_output_____" ] ], [ [ "## subplot\nsource: https://matplotlib.org/examples/pylab_examples/subplot_demo.html", "_____no_output_____" ] ], [ [ "# subplot(nrows, ncols, plot_number)", "_____no_output_____" ], [ "import numpy as np\nimport matplotlib.pyplot as plt\n\nx1 = np.linspace(0.0, 5.0)\nx2 = np.linspace(0.0, 2.0)\n\ny1 = np.cos(2 * np.pi * x1) * np.exp(-x1)\ny2 = np.cos(2 * np.pi * x2)\n\nplt.subplot(2, 1, 1)\nplt.plot(x1, y1, 'ko-')\nplt.title('A tale of 2 subplots')\nplt.ylabel('Damped oscillation')\n\nplt.subplot(2, 1, 2)\nplt.plot(x2, y2, 'r.-')\nplt.xlabel('time (s)')\nplt.ylabel('Undamped')\n\nplt.show()", "_____no_output_____" ], [ "plt.subplot(2, 1, 1)\nplt.plot(x1, y1, 'ko-')\nplt.title('A tale of 2 subplots')\nplt.ylabel('Damped oscillation')\n\nplt.subplot(2, 1, 2)\nplt.plot(x2, y2, 'r.-')\nplt.xlabel('time (s)')\nplt.ylabel('Undamped')\n\nplt.show()", "_____no_output_____" ], [ "plt.subplots(2, 2, sharex=True, sharey=True)\nplt.show()", "_____no_output_____" ], [ "x = np.linspace(0, 2*np.pi, 400)\ny = np.sin(x**2)", "_____no_output_____" ], [ "fig, axes = plt.subplots(1,2, sharey=True)\naxes[0].plot(x, y)\naxes[1].scatter(x, y)\nplt.show()", "_____no_output_____" ], [ "# Two subplots, unpack the axes array immediately\nf, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\nax1.plot(x, y)\nax1.set_title('Sharing Y axis')\nax2.scatter(x, y)\nplt.show()", "_____no_output_____" ], [ "fig, axes = plt.subplots(1,3, sharey=True, sharex=True)\n\nfor i in range(3):\n \n axes[i].scatter(center[i],xn)\n axes[i].set_title('Cluster ' + str(i+1))\n axes[i].grid(True)\n \nplt.yticks(xn,var)\nplt.subplots_adjust(wspace=0, hspace=0)\n#plt.grid(True)\nplt.show()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb979c3686305a44968b1feda65c50c9362bd8b8
591,423
ipynb
Jupyter Notebook
reports/monthly/2019-02-01_2019-03-01/github.ipynb
choldgraf/jupyter-activity-snapshot
080f8c34e1e3e5081c4b733592b114b01b09b8c0
[ "BSD-3-Clause" ]
7
2019-08-26T13:19:05.000Z
2021-11-18T16:34:01.000Z
reports/monthly/2019-02-01_2019-03-01/github.ipynb
choldgraf/jupyter-activity-snapshot
080f8c34e1e3e5081c4b733592b114b01b09b8c0
[ "BSD-3-Clause" ]
3
2019-11-27T19:25:27.000Z
2021-03-13T01:19:45.000Z
reports/monthly/2019-02-01_2019-03-01/github.ipynb
choldgraf/jupyter-activity-snapshot
080f8c34e1e3e5081c4b733592b114b01b09b8c0
[ "BSD-3-Clause" ]
4
2019-06-20T17:49:53.000Z
2021-05-21T21:06:18.000Z
90.101005
31,953
0.561659
[ [ [ "import seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport altair as alt\nfrom markdown import markdown\nfrom IPython.display import Markdown\nfrom ipywidgets.widgets import HTML, Tab\nfrom ipywidgets import widgets\nfrom datetime import timedelta\nfrom matplotlib import pyplot as plt\nimport os.path as op\n\nfrom mod import load_data, alt_theme", "_____no_output_____" ], [ "def author_url(author):\n return f\"https://github.com/{author}\"", "_____no_output_____" ], [ "# Parameters\nfmt_date = \"{:%Y-%m-%d}\"\n\nn_days = 90\nstart_date = fmt_date.format(pd.datetime.today() - timedelta(days=n_days))\nend_date = fmt_date.format(pd.datetime.today())\n\nrenderer = \"jupyterlab\"\ngithub_orgs = [\"jupyterhub\", \"jupyter\", \"jupyterlab\", \"jupyter-widgets\", \"ipython\", \"binder-examples\", \"nteract\"]", "_____no_output_____" ], [ "# Parameters\nrenderer = \"kaggle\"\nstart_date = \"2019-02-01\"\nend_date = \"2019-03-01\"\n", "_____no_output_____" ], [ "comments, issues, prs = load_data('../data/')\nbot_names = pd.read_csv('bot_names.csv')['names'].tolist()\ncomments = comments.query('author not in @bot_names').drop_duplicates()\nissues = issues.query('author not in @bot_names').drop_duplicates()\nprs = prs.query('author not in @bot_names').drop_duplicates()", "/c/Users/chold/Dropbox/projects/jupyter-activity-snapshot/templates/mod/__init__.py:24: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version\nof pandas will change to not sort by default.\n\nTo accept the future behavior, pass 'sort=False'.\n\nTo retain the current behavior and silence the warning, pass 'sort=True'.\n\n comments = pd.concat(comments)\n" ], [ "# Only keep the dates we want\ncomments = comments.query('updatedAt > @start_date and updatedAt < @end_date')\nissues = issues.query('updatedAt > @start_date and updatedAt < @end_date')\nprs = prs.query('updatedAt > @start_date and updatedAt < @end_date')", "_____no_output_____" ], [ "alt.renderers.enable(renderer);\nalt.themes.register('my_theme', alt_theme)\nalt.themes.enable(\"my_theme\")", "_____no_output_____" ], [ "# Information about out time window\ntime_delta = pd.to_datetime(end_date) - pd.to_datetime(start_date)\nn_days = time_delta.days\n\n# Information about the data we loaded\ngithub_orgs = comments['org'].unique()", "_____no_output_____" ] ], [ [ "# GitHub activity\n\nJupyter also has lots of activity across GitHub repositories. The following sections contain\noverviews of recent activity across the following GitHub organizations:", "_____no_output_____" ] ], [ [ "# Define colors we'll use for GitHub membership\nauthor_types = ['MEMBER', 'CONTRIBUTOR', 'COLLABORATOR', \"NONE\"]\n\nauthor_palette = sns.palettes.blend_palette([\"lightgrey\", \"lightgreen\", \"darkgreen\"], 4)\nauthor_colors = [\"rgb({}, {}, {}, {})\".format(*(ii*256)) for ii in author_palette]\nauthor_color_dict = {key: val for key, val in zip(author_types, author_palette)}", "_____no_output_____" ], [ "orgs_md = []\nfor org in github_orgs:\n orgs_md.append(f'* [github.com/{org}](https://github.com/{org})')\nMarkdown('\\n'.join(orgs_md))", "_____no_output_____" ], [ "Markdown(f\"Showing GitHub activity from **{start_date}** to **{end_date}**\")", "_____no_output_____" ] ], [ [ "## List of all contributors per organization\n\nFirst, we'll list each contributor that has contributed to each organization in the last several days.\nContributions to open source projects are diverse, and involve much more than just contributing code and\ncode review. Thanks to everybody in the Jupyter communities for all that they do.", "_____no_output_____" ] ], [ [ "n_plot = 5\ntabs = widgets.Tab(children=[])\nfor ii, org in enumerate(github_orgs):\n authors_comments = comments.query('org == @org')['author']\n authors_prs = prs.query('org == @org')['author']\n unique_participants = np.unique(np.hstack([authors_comments.values, authors_prs.values]).astype(str)).tolist()\n unique_participants.sort(key=lambda a: a.lower())\n \n all_participants = [f\"[{participant}](https://github.com/{participant})\" for participant in unique_participants]\n participants_md = \" | \".join(all_participants)\n md_html = HTML(\"<center>{}</center>\".format(markdown(participants_md)))\n \n children = list(tabs.children)\n children.append(md_html)\n tabs.children = tuple(children)\n tabs.set_title(ii, org)\n \ndisplay(Markdown(f\"All participants across issues and pull requests in each org in the last {n_days} days\"))\ndisplay(tabs)", "_____no_output_____" ] ], [ [ "## Merged Pull requests\n\nHere's an analysis of **merged pull requests** across each of the repositories in the Jupyter\necosystem.", "_____no_output_____" ] ], [ [ "merged = prs.query('state == \"MERGED\" and closedAt > @start_date and closedAt < @end_date')", "_____no_output_____" ], [ "prs_by_repo = merged.groupby(['org', 'repo']).count()['author'].reset_index().sort_values(['org', 'author'], ascending=False)\nalt.Chart(data=prs_by_repo, title=f\"Merged PRs in the last {n_days} days\").mark_bar().encode(\n x=alt.X('repo', sort=prs_by_repo['repo'].values.tolist()),\n y='author',\n color='org'\n)", "_____no_output_____" ] ], [ [ "### A list of merged PRs by project\n\nBelow is a tabbed readout of recently-merged PRs. Check out the title to get an idea for what they\nimplemented, and be sure to thank the PR author for their hard work!", "_____no_output_____" ] ], [ [ "tabs = widgets.Tab(children=[])\nmerged_by = {}\npr_by = {}\nfor ii, (org, idata) in enumerate(merged.groupby('org')):\n issue_md = []\n issue_md.append(f\"#### Closed PRs for org: `{org}`\")\n issue_md.append(\"\")\n for (org, repo), prs in idata.groupby(['org', 'repo']):\n issue_md.append(f\"##### [{org}/{repo}](https://github.com/{org}/{repo})\")\n for _, pr in prs.iterrows():\n user_name = pr['author']\n user_url = author_url(user_name)\n pr_number = pr['number']\n pr_html = pr['url']\n pr_title = pr['title']\n pr_closedby = pr['mergedBy']\n pr_closedby_url = f\"https://github.com/{pr_closedby}\"\n if user_name not in pr_by:\n pr_by[user_name] = 1\n else:\n pr_by[user_name] += 1\n \n if pr_closedby not in merged_by:\n merged_by[pr_closedby] = 1\n else:\n merged_by[pr_closedby] += 1\n text = f\"* [(#{pr_number})]({pr_html}): _{pr_title}_ by **[@{user_name}]({user_url})** merged by **[@{pr_closedby}]({pr_closedby_url})**\"\n issue_md.append(text)\n issue_md.append('')\n markdown_html = markdown('\\n'.join(issue_md))\n \n children = list(tabs.children)\n children.append(HTML(markdown_html))\n tabs.children = tuple(children)\n tabs.set_title(ii, org)\ntabs", "_____no_output_____" ] ], [ [ "### Authoring and merging stats by repository\n\nLet's see who has been doing most of the PR authoring and merging. The PR author is generally the\nperson that implemented a change in the repository (code, documentation, etc). The PR merger is\nthe person that \"pressed the green button\" and got the change into the main codebase.", "_____no_output_____" ] ], [ [ "# Prep our merging DF\nmerged_by_repo = merged.groupby(['org', 'repo', 'author'], as_index=False).agg({'id': 'count', 'authorAssociation': 'first'}).rename(columns={'id': \"authored\", 'author': 'username'})\nclosed_by_repo = merged.groupby(['org', 'repo', 'mergedBy']).count()['id'].reset_index().rename(columns={'id': \"closed\", \"mergedBy\": \"username\"})", "_____no_output_____" ], [ "n_plot = 50\ncharts = []\nfor ii, (iorg, idata) in enumerate(merged_by_repo.replace(np.nan, 0).groupby(['org'])):\n title = f\"PR authors for {iorg} in the last {n_days} days\"\n idata = idata.groupby('username', as_index=False).agg({'authored': 'sum', 'authorAssociation': 'first'})\n idata = idata.sort_values('authored', ascending=False).head(n_plot)\n ch = alt.Chart(data=idata, width=1000, title=title).mark_bar().encode(\n x='username',\n y='authored',\n color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))\n )\n charts.append(ch)\n\nalt.hconcat(*charts)", "_____no_output_____" ], [ "charts = []\nfor ii, (iorg, idata) in enumerate(closed_by_repo.replace(np.nan, 0).groupby(['org'])):\n title = f\"Merges for {iorg} in the last {n_days} days\"\n ch = alt.Chart(data=idata, width=1000, title=title).mark_bar().encode(\n x='username',\n y='closed',\n )\n charts.append(ch)\nalt.hconcat(*charts)", "_____no_output_____" ] ], [ [ "## Issues\n\nIssues are **conversations** that happen on our GitHub repositories. Here's an\nanalysis of issues across the Jupyter organizations.", "_____no_output_____" ] ], [ [ "created = issues.query('state == \"OPEN\" and createdAt > @start_date and createdAt < @end_date')\nclosed = issues.query('state == \"CLOSED\" and closedAt > @start_date and closedAt < @end_date')", "_____no_output_____" ], [ "created_counts = created.groupby(['org', 'repo']).count()['number'].reset_index()\ncreated_counts['org/repo'] = created_counts.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)\nsorted_vals = created_counts.sort_values(['org', 'number'], ascending=False)['repo'].values\nalt.Chart(data=created_counts, title=f\"Issues created in the last {n_days} days\").mark_bar().encode(\n x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),\n y='number',\n color='org',\n)", "_____no_output_____" ], [ "closed_counts = closed.groupby(['org', 'repo']).count()['number'].reset_index()\nclosed_counts['org/repo'] = closed_counts.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)\nsorted_vals = closed_counts.sort_values(['org', 'number'], ascending=False)['repo'].values\nalt.Chart(data=closed_counts, title=f\"Issues closed in the last {n_days} days\").mark_bar().encode(\n x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),\n y='number',\n color='org',\n)", "_____no_output_____" ], [ "created_closed = pd.merge(created_counts.rename(columns={'number': 'created'}).drop(columns='org/repo'),\n closed_counts.rename(columns={'number': 'closed'}).drop(columns='org/repo'),\n on=['org', 'repo'], how='outer')\n\ncreated_closed = pd.melt(created_closed, id_vars=['org', 'repo'], var_name=\"kind\", value_name=\"count\").replace(np.nan, 0)", "_____no_output_____" ], [ "charts = []\nfor org in github_orgs:\n # Pick the top 10 repositories\n this_issues = created_closed.query('org == @org')\n top_repos = this_issues.groupby(['repo']).sum().sort_values(by='count', ascending=False).head(10).index\n ch = alt.Chart(this_issues.query('repo in @top_repos'), width=120).mark_bar().encode(\n x=alt.X(\"kind\", axis=alt.Axis(labelFontSize=15, title=\"\")), \n y=alt.Y('count', axis=alt.Axis(titleFontSize=15, labelFontSize=12)),\n color='kind',\n column=alt.Column(\"repo\", header=alt.Header(title=f\"Issue activity, last {n_days} days for {org}\", titleFontSize=15, labelFontSize=12))\n )\n charts.append(ch)\nalt.hconcat(*charts)", "_____no_output_____" ], [ "# Set to datetime\nfor kind in ['createdAt', 'closedAt']:\n closed.loc[:, kind] = pd.to_datetime(closed[kind])\n \nclosed.loc[:, 'time_open'] = closed['closedAt'] - closed['createdAt']\nclosed.loc[:, 'time_open'] = closed['time_open'].dt.total_seconds()", "/home/choldgraf/anaconda/envs/dev/lib/python3.7/site-packages/pandas/core/indexing.py:480: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self.obj[item] = s\n/home/choldgraf/anaconda/envs/dev/lib/python3.7/site-packages/pandas/core/indexing.py:362: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n self.obj[key] = _infer_fill_value(value)\n" ], [ "time_open = closed.groupby(['org', 'repo']).agg({'time_open': 'median'}).reset_index()\ntime_open['time_open'] = time_open['time_open'] / (60 * 60 * 24)\ntime_open['org/repo'] = time_open.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)\nsorted_vals = time_open.sort_values(['org', 'time_open'], ascending=False)['repo'].values\nalt.Chart(data=time_open, title=f\"Time to close for issues closed in the last {n_days} days\").mark_bar().encode(\n x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),\n y=alt.Y('time_open', title=\"Median Days Open\"),\n color='org',\n)", "_____no_output_____" ] ], [ [ "### A list of recent issues\n\nBelow is a list of issues with recent activity in each repository. If they seem of interest\nto you, click on their links and jump in to participate!", "_____no_output_____" ] ], [ [ "# Add comment count data to issues and PRs\ncomment_counts = (\n comments\n .query(\"createdAt > @start_date and createdAt < @end_date\")\n .groupby(['org', 'repo', 'issue_id'])\n .count().iloc[:, 0].to_frame()\n)\ncomment_counts.columns = ['n_comments']\ncomment_counts = comment_counts.reset_index()", "_____no_output_____" ], [ "n_plot = 5\ntabs = widgets.Tab(children=[])\nfor ii, (org, idata) in enumerate(comment_counts.groupby('org')):\n issue_md = []\n issue_md.append(f\"#### {org}\")\n issue_md.append(\"\")\n for repo, i_issues in idata.groupby('repo'):\n issue_md.append(f\"##### [{org}/{repo}](https://github.com/{org}/{repo})\")\n\n top_issues = i_issues.sort_values('n_comments', ascending=False).head(n_plot)\n top_issue_list = pd.merge(issues, top_issues, left_on=['org', 'repo', 'number'], right_on=['org', 'repo', 'issue_id'])\n for _, issue in top_issue_list.sort_values('n_comments', ascending=False).head(n_plot).iterrows():\n user_name = issue['author']\n user_url = author_url(user_name)\n issue_number = issue['number']\n issue_html = issue['url']\n issue_title = issue['title']\n\n text = f\"* [(#{issue_number})]({issue_html}): _{issue_title}_ by **[@{user_name}]({user_url})**\"\n issue_md.append(text)\n\n issue_md.append('')\n md_html = HTML(markdown('\\n'.join(issue_md)))\n \n children = list(tabs.children)\n children.append(HTML(markdown('\\n'.join(issue_md))))\n tabs.children = tuple(children)\n tabs.set_title(ii, org)\n \ndisplay(Markdown(f\"Here are the top {n_plot} active issues in each repository in the last {n_days} days\"))\ndisplay(tabs)", "_____no_output_____" ] ], [ [ "## Commenters across repositories\n\nThese are commenters across all issues and pull requests in the last several days.\nThese are colored by the commenter's association with the organization. For information\nabout what these associations mean, [see this StackOverflow post](https://stackoverflow.com/a/28866914/1927102).", "_____no_output_____" ] ], [ [ "commentors = (\n comments\n .query(\"createdAt > @start_date and createdAt < @end_date\")\n .groupby(['org', 'repo', 'author', 'authorAssociation'])\n .count().rename(columns={'id': 'count'})['count']\n .reset_index()\n .sort_values(['org', 'count'], ascending=False)\n)", "_____no_output_____" ], [ "n_plot = 50\ncharts = []\nfor ii, (iorg, idata) in enumerate(commentors.groupby(['org'])):\n title = f\"Top {n_plot} commentors for {iorg} in the last {n_days} days\"\n idata = idata.groupby('author', as_index=False).agg({'count': 'sum', 'authorAssociation': 'first'})\n idata = idata.sort_values('count', ascending=False).head(n_plot)\n ch = alt.Chart(data=idata.head(n_plot), width=1000, title=title).mark_bar().encode(\n x='author',\n y='count',\n color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))\n )\n charts.append(ch)\nalt.hconcat(*charts)", "_____no_output_____" ] ], [ [ "## First responders\n\nFirst responders are the first people to respond to a new issue in one of the repositories.\nThe following plots show first responders for recently-created issues.", "_____no_output_____" ] ], [ [ "first_comments = []\nfor (org, repo, issue_id), i_comments in comments.groupby(['org', 'repo', 'issue_id']):\n ix_min = pd.to_datetime(i_comments['createdAt']).idxmin()\n first_comment = i_comments.loc[ix_min]\n if isinstance(first_comment, pd.DataFrame):\n first_comment = first_comment.iloc[0]\n first_comments.append(first_comment)\nfirst_comments = pd.concat(first_comments, axis=1).T", "_____no_output_____" ], [ "first_responder_counts = first_comments.groupby(['org', 'author', 'authorAssociation'], as_index=False).\\\n count().rename(columns={'id': 'n_first_responses'}).sort_values(['org', 'n_first_responses'], ascending=False)\n", "_____no_output_____" ], [ "n_plot = 50\ncharts = []\nfor ii, (iorg, idata) in enumerate(first_responder_counts.groupby(['org'])):\n title = f\"Top {n_plot} first responders for {iorg} in the last {n_days} days\"\n idata = idata.groupby('author', as_index=False).agg({'n_first_responses': 'sum', 'authorAssociation': 'first'})\n idata = idata.sort_values('n_first_responses', ascending=False).head(n_plot)\n ch = alt.Chart(data=idata.head(n_plot), width=1000, title=title).mark_bar().encode(\n x='author',\n y='n_first_responses',\n color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))\n )\n charts.append(ch)\nalt.hconcat(*charts)", "_____no_output_____" ], [ "%%html\n<script src=\"https://cdn.rawgit.com/parente/4c3e6936d0d7a46fd071/raw/65b816fb9bdd3c28b4ddf3af602bfd6015486383/code_toggle.js\"></script>", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb97a1809f43472eec97e9327ad33a417301575a
1,424
ipynb
Jupyter Notebook
persist_model/Music_Projects_Persist_Model.ipynb
ptyadana/ML-Music-Recommender
769310d96c704b92a313f38eb3fbf6a6a2189e53
[ "MIT" ]
2
2020-12-16T17:59:56.000Z
2020-12-16T18:00:04.000Z
persist_model/Music_Projects_Persist_Model.ipynb
ptyadana/ML-Music-Recommender
769310d96c704b92a313f38eb3fbf6a6a2189e53
[ "MIT" ]
null
null
null
persist_model/Music_Projects_Persist_Model.ipynb
ptyadana/ML-Music-Recommender
769310d96c704b92a313f38eb3fbf6a6a2189e53
[ "MIT" ]
null
null
null
22.25
63
0.544944
[ [ [ "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.externals import joblib\n\nmusic_data = pd.read_csv('music.csv')\nX = music_data.drop(columns='genre')\ny = music_data['genre']\n\n# model = DecisionTreeClassifier()\n# model.fit(X, y)\n\n#create trained model, so that we can use it next time\n# joblib.dump(model, 'music-recommender.joblib')\n\n\n#use trained model and make predictions\nmodel = joblib.load('music-recommender.joblib')\npredictions = model.predict([[21, 1]])\npredictions", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
cb97a7b348a3e48438e0ff328fa3a08af2b44226
38,077
ipynb
Jupyter Notebook
4_8_Vehicle_Motion_and_Calculus/.ipynb_checkpoints/Speed from Position Data-checkpoint.ipynb
ramanpreet9/CVND_udacity
d70e3dd897c463eb594e7804bc4c323cc7b4a704
[ "MIT" ]
null
null
null
4_8_Vehicle_Motion_and_Calculus/.ipynb_checkpoints/Speed from Position Data-checkpoint.ipynb
ramanpreet9/CVND_udacity
d70e3dd897c463eb594e7804bc4c323cc7b4a704
[ "MIT" ]
5
2021-03-19T01:13:24.000Z
2022-03-11T23:49:57.000Z
4_8_Vehicle_Motion_and_Calculus/.ipynb_checkpoints/Speed from Position Data-checkpoint.ipynb
ramanpreet9/CVND_udacity
d70e3dd897c463eb594e7804bc4c323cc7b4a704
[ "MIT" ]
null
null
null
106.658263
14,264
0.846049
[ [ [ "# Speed from Position Data\n\nIn this Notebook you'll work with data just like the data you'll be using in the final project for this course. That data comes from CSVs that looks like this:\n\n| timestamp | displacement | yaw_rate | acceleration |\n| :-------: | :----------: | :------: | :----------: |\n| 0.0 | 0 | 0.0 | 0.0 |\n| 0.25 | 0.0 | 0.0 | 19.6 |\n| 0.5 | 1.225 | 0.0 | 19.6 |\n| 0.75 | 3.675 | 0.0 | 19.6 |\n| 1.0 | 7.35 | 0.0 | 19.6 |\n| 1.25 | 12.25 | 0.0 | 0.0 |\n| 1.5 | 17.15 | -2.82901631903 | 0.0 |\n| 1.75 | 22.05 | -2.82901631903 | 0.0 |\n| 2.0 | 26.95 | -2.82901631903 | 0.0 |\n| 2.25 | 31.85 | -2.82901631903 | 0.0 |", "_____no_output_____" ] ], [ [ "from helpers import process_data\nfrom matplotlib import pyplot as plt", "_____no_output_____" ], [ "PARALLEL_PARK_DATA = process_data(\"parallel_park.pickle\")", "_____no_output_____" ], [ "# This is what the first few entries in the parallel \n# park data look like. \n\nPARALLEL_PARK_DATA[:5]", "_____no_output_____" ], [ "# In this exercise we'll be differentiating (taking the\n# derivative of) displacement data. This will require \n# using only the first two columns of this data.\ntimestamps = [row[0] for row in PARALLEL_PARK_DATA]\ndisplacements = [row[1] for row in PARALLEL_PARK_DATA]\n\n# You'll use these data in the next lesson on integration\n# You can ignore them for now.\nyaw_rates = [row[2] for row in PARALLEL_PARK_DATA]\naccelerations = [row[3] for row in PARALLEL_PARK_DATA]", "_____no_output_____" ], [ "plt.title(\"Displacement vs Time while Parallel Parking\")\nplt.xlabel(\"Time (seconds)\")\nplt.ylabel(\"Displacement (meters)\")\nplt.scatter(timestamps, displacements)\nplt.show()", "_____no_output_____" ] ], [ [ "In the graph above, you can see displacement vs time data for a car as it parallel parks. Note that backwards motion winds back the odometer and reduces displacement (this isn't actually how odometers work on modern cars. Sorry Ferris Bueller)\n\nNote how for approximately 4 seconds the motion is backwards and then for the last two the car goes forwards.\n\nLet's look at some data somewhere in the middle of this trajectory", "_____no_output_____" ] ], [ [ "print(timestamps[20:22])\nprint(displacements[20:22])", "[1.25, 1.3125]\n[-1.4087500000000004, -1.5312500000000004]\n" ] ], [ [ "So you can see that at $t=1.25$ the car has displacement $x=-1.40875$ and at $t=1.3125$ the car has displacement $x=-1.53125$\n\nThis means we could calculate the speed / slope as follows:\n\n$$\\text{slope} = \\frac{\\text{vertical change}}{\\text{horizontal change}} = \\frac{\\Delta x}{\\Delta t}$$\n\nand for the numbers I just mentioned this would mean:\n\n$$\\frac{\\Delta x}{\\Delta t} = \\frac{-1.53125 - -1.40875}{1.3125 - 1.25} = \\frac{-0.1225 \\text{ meters}}{0.0625\\text{ seconds}} = -1.96 \\frac{m}{s}$$\n\nSo I can say the following:\n\n> Between $t=1.25$ and $t=1.3125$ the vehicle had an **average speed** of **-1.96 meters per second**\n\nI could make this same calculation in code as follows", "_____no_output_____" ] ], [ [ "delta_x = displacements[21] - displacements[20]\ndelta_t = timestamps[21] - timestamps[20]\nslope = delta_x / delta_t\n\nprint(slope)", "-1.9600000000000009\n" ] ], [ [ "Earlier in this lesson you worked with truly continuous functions. In that situation you could make $\\Delta t$ as small as you wanted!\n\nBut now we have real data, which means the size of $\\Delta t$ is dictated by how frequently we made measurements of displacement. In this case it looks like subsequent measurements are separated by\n\n$$\\Delta t = 0.0625 \\text{ seconds}$$\n\nIn the `get_derivative_from_data` function below, I demonstrate how to \"take a derivative\" of real data. Read through this code and understand how it works: in the next notebook you'll be asked to reproduce this code yourself.", "_____no_output_____" ] ], [ [ "def get_derivative_from_data(position_data, time_data):\n \"\"\"\n Calculates a list of speeds from position_data and \n time_data.\n \n Arguments:\n position_data - a list of values corresponding to \n vehicle position\n \n time_data - a list of values (equal in length to\n position_data) which give timestamps for each \n position measurement\n \n Returns:\n speeds - a list of values (which is shorter \n by ONE than the input lists) of speeds.\n \"\"\"\n # 1. Check to make sure the input lists have same length\n if len(position_data) != len(time_data):\n raise(ValueError, \"Data sets must have same length\")\n \n # 2. Prepare empty list of speeds\n speeds = []\n \n # 3. Get first values for position and time\n previous_position = position_data[0]\n previous_time = time_data[0]\n \n # 4. Begin loop through all data EXCEPT first entry\n for i in range(1, len(position_data)):\n \n # 5. get position and time data for this timestamp\n position = position_data[i]\n time = time_data[i]\n \n # 6. Calculate delta_x and delta_t\n delta_x = position - previous_position\n delta_t = time - previous_time\n \n # 7. Speed is slope. Calculate it and append to list\n speed = delta_x / delta_t\n speeds.append(speed)\n \n # 8. Update values for next iteration of the loop.\n previous_position = position\n previous_time = time\n \n return speeds\n\n# 9. Call this function with appropriate arguments\nspeeds = get_derivative_from_data(displacements, timestamps)\n\n# 10. Prepare labels for a plot\nplt.title(\"Speed vs Time while Parallel Parking\")\nplt.xlabel(\"Time (seconds)\")\nplt.ylabel(\"Speed (m / s)\")\n\n# 11. Make the plot! Note the slicing of timestamps!\nplt.scatter(timestamps[1:], speeds)\nplt.show()", "_____no_output_____" ] ], [ [ "Now that you've read through the code and seen how it's used (and what the resulting plot looks like), I want to discuss the numbered sections of the code.", "_____no_output_____" ], [ "1. The time and position data need to have equal lengths, since each position measurement is meant to correspond to one of those timestamps.\n\n2. The `speeds` list will eventually be returned at the end of the function.\n\n3. The use of the word \"previous\" in these variable names will be clearer in step 8. But basically we need to have TWO positions if we're ever going to calculate a delta X. This is where we grab the first position in the position_data list.\n\n4. Note that we loop from `range(1, len(position_data))`, which means that the first value for `i` will be `1` and **not** `0`. That's because we already grabbed element 0 in step 3.\n\n5. Get the data for this `i`.\n\n6. Calculate the change in position and time.\n\n7. Find the slope (which is the speed) and append it to the `speeds` list.\n\n8. This sets the values of `previous_position` and `previous_time` so that they are correct for the *next* iteration of this loop.\n\n9. Here we call the function with the `displacements` and `timestamps` data that we used before.\n\n10. Self-explanatory\n\n11. This part is interesting. Note that we only plot `timestamps[1:]`. This means \"every element in `timestamps` except the first one\". Remember how in step 4 we looped through every element except the first one? That means that our `speeds` array ends up being 1 element shorter than our original data.", "_____no_output_____" ], [ "## What to Remember\nYou don't need to memorize any of this. The important thing to remember is this: \n\nWhen you're working with real time-series data, you calculate the \"derivative\" by finding the slope between adjacent data points.\n\nYou'll be implementing this on your own in the next notebook. Feel free to come back here if you need help, but try your best to get it on your own.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
cb97a978ae79cf3fb04f372c5b7a72d0b78c44c1
6,627
ipynb
Jupyter Notebook
Data_Processing.ipynb
sonhmai/movielens
11c25071074339583d9da85246a8ae1ebdc927ca
[ "MIT" ]
null
null
null
Data_Processing.ipynb
sonhmai/movielens
11c25071074339583d9da85246a8ae1ebdc927ca
[ "MIT" ]
null
null
null
Data_Processing.ipynb
sonhmai/movielens
11c25071074339583d9da85246a8ae1ebdc927ca
[ "MIT" ]
null
null
null
27.384298
128
0.495096
[ [ [ "# MovieLens Data Processing", "_____no_output_____" ] ], [ [ "# Import packages\nimport os\nimport pandas as pd", "_____no_output_____" ], [ "# Define file directories\nMOVIELENS_DIR = 'dat'\nUSER_DATA_FILE = 'users.dat'\nMOVIE_DATA_FILE = 'movies.dat'\nRATING_DATA_FILE = 'ratings.dat'", "_____no_output_____" ], [ "# Specify User's Age and Occupation Column\nAGES = { 1: \"Under 18\", 18: \"18-24\", 25: \"25-34\", 35: \"35-44\", 45: \"45-49\", 50: \"50-55\", 56: \"56+\" }\nOCCUPATIONS = { 0: \"other or not specified\", 1: \"academic/educator\", 2: \"artist\", 3: \"clerical/admin\",\n 4: \"college/grad student\", 5: \"customer service\", 6: \"doctor/health care\",\n 7: \"executive/managerial\", 8: \"farmer\", 9: \"homemaker\", 10: \"K-12 student\", 11: \"lawyer\",\n 12: \"programmer\", 13: \"retired\", 14: \"sales/marketing\", 15: \"scientist\", 16: \"self-employed\",\n 17: \"technician/engineer\", 18: \"tradesman/craftsman\", 19: \"unemployed\", 20: \"writer\" }", "_____no_output_____" ], [ "# Define csv files to be saved into\nUSERS_CSV_FILE = 'users.csv'\nMOVIES_CSV_FILE = 'movies.csv'\nRATINGS_CSV_FILE = 'ratings.csv'", "_____no_output_____" ], [ "# Read the Ratings File\nratings = pd.read_csv(os.path.join(MOVIELENS_DIR, RATING_DATA_FILE), \n sep='::', \n engine='python', \n encoding='latin-1',\n names=['user_id', 'movie_id', 'rating', 'timestamp'])\n\n# Set max_userid to the maximum user_id in the ratings\nmax_userid = ratings['user_id'].drop_duplicates().max()\n# Set max_movieid to the maximum movie_id in the ratings\nmax_movieid = ratings['movie_id'].drop_duplicates().max()\n\n# Process ratings dataframe for Keras Deep Learning model\n# Add user_emb_id column whose values == user_id - 1\nratings['user_emb_id'] = ratings['user_id'] - 1\n# Add movie_emb_id column whose values == movie_id - 1\nratings['movie_emb_id'] = ratings['movie_id'] - 1\n\nprint(len(ratings), 'ratings loaded')", "1000209 ratings loaded\n" ], [ "# Save into ratings.csv\nratings.to_csv(RATINGS_CSV_FILE, \n sep='\\t', \n header=True, \n encoding='latin-1', \n columns=['user_id', 'movie_id', 'rating', 'timestamp', 'user_emb_id', 'movie_emb_id'])\nprint('Saved to', RATINGS_CSV_FILE)", "Saved to ratings.csv\n" ], [ "# Read the Users File\nusers = pd.read_csv(os.path.join(MOVIELENS_DIR, USER_DATA_FILE), \n sep='::', \n engine='python', \n encoding='latin-1',\n names=['user_id', 'gender', 'age', 'occupation', 'zipcode'])\nusers['age_desc'] = users['age'].apply(lambda x: AGES[x])\nusers['occ_desc'] = users['occupation'].apply(lambda x: OCCUPATIONS[x])\nprint(len(users), 'descriptions of', max_userid, 'users loaded.')", "6040 descriptions of 6040 users loaded.\n" ], [ "# Save into users.csv\nusers.to_csv(USERS_CSV_FILE, \n sep='\\t', \n header=True, \n encoding='latin-1',\n columns=['user_id', 'gender', 'age', 'occupation', 'zipcode', 'age_desc', 'occ_desc'])\nprint('Saved to', USERS_CSV_FILE)", "Saved to users.csv\n" ], [ "# Read the Movies File\nmovies = pd.read_csv(os.path.join(MOVIELENS_DIR, MOVIE_DATA_FILE), \n sep='::', \n engine='python', \n encoding='latin-1',\n names=['movie_id', 'title', 'genres'])\nprint(len(movies), 'descriptions of', max_movieid, 'movies loaded.')", "3883 descriptions of 3952 movies loaded.\n" ], [ "# Save into movies.csv\nmovies.to_csv(MOVIES_CSV_FILE, \n sep='\\t', \n header=True, \n columns=['movie_id', 'title', 'genres'])\nprint('Saved to', MOVIES_CSV_FILE)", "Saved to movies.csv\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb97aee03acee341279c28dae8ad9ebcd5729736
7,284
ipynb
Jupyter Notebook
test-lab/5 - appium_v2.ipynb
Manzanero/automatizar-python
3fc16ba9d73cf6857788205989849804fc5ef484
[ "MIT" ]
null
null
null
test-lab/5 - appium_v2.ipynb
Manzanero/automatizar-python
3fc16ba9d73cf6857788205989849804fc5ef484
[ "MIT" ]
null
null
null
test-lab/5 - appium_v2.ipynb
Manzanero/automatizar-python
3fc16ba9d73cf6857788205989849804fc5ef484
[ "MIT" ]
null
null
null
28.232558
131
0.553405
[ [ [ "## Herencia\nUna clase puede heredar todo los métodos y atributos de otra con esta sintaxis. Ejemplo de una clase heredada:", "_____no_output_____" ] ], [ [ "class ClaseA(object):\n a = 1\n b = 2\n \n def c(self):\n return 3\n \n def d(self):\n return 4\n\n\nclass ClaseB(ClaseA):\n b = 5\n \n def d(self):\n return 6\n\n \nclase_a = ClaseA()\nclase_b = ClaseB()\n \nprint('1a:', clase_a.a)\nprint('2a:', clase_a.b)\nprint('3a:', clase_a.c())\nprint('4a:', clase_a.d())\n\nprint('1b:', clase_b.a)\nprint('2b:', clase_b.b)\nprint('3b:', clase_b.c())\nprint('4b:', clase_b.d())", "_____no_output_____" ] ], [ [ "# Appium\n___\n## Conectar un dispositivo\n___\n### Pasos comunes\nPara conectar un dispositivo de Android hay que seguir los siguientes pasos:\n1. Descargar e instalar Java jdk 1.8: https://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html\n2. Añadir la variable de entorno JAVA_HOME = \"C:\\Program Files\\Java\\jdk {version} \"\n3. Descargar e instalar Android Studio: https://developer.android.com/studio\n4. Añadir la variable de entorno ANDROID_HOME = \"C:\\Users\\\\ {user} \\AppData\\Local\\Android\\Sdk\\\"\n5. Añadir el directorio \"C:\\Users\\\\ {user} \\AppData\\Local\\Android\\Sdk\\platform-tools\\\" al Path de Windows\n\n#### Emulador\nPara crear un emulador hay que seguir los siguientes pasos:\n1. Lanzar Android Studio, si pide crear un proyecto se crea un vacío (que no usaremos para nada)\n2. Dejar que se actualice con las actualizaciones por defecto (puede variar dependiendo de la versión)\n3. Ir a \"Tools\" > \"AVD Manager\"\n4. CLick en \"Create Virtual Device\".\n5. Seleccionar \"Phone\" > \"Nexus 5X\", \"Next\"\n6. Seleccionar \"Oreo\" (API Level 27, Android 8.1), si no está disponible click en descargar, \"Next\"\n7. Nombrar y \"Fisinsh\"\n\n#### Real\nPara conectar un dispositivo real hay que seguir los siguientes pasos (No todos los dispositivos son compatibles):\n1. En el dispositivo: Ir a \"Settings\" > \"About phone\" > \"Software information\" y pulsar \"Build number\" 7 veces\n2. En el dispositivo: Ir a \"Settings\" > \"Developer options\" y activar \"Stay awake\" y \"USB debugging\"\n3. Conectar por USB y aceptar permisos\n\n### Comprobar la conexión\nPar comprobar que todo funciona correctamente ejecutar:", "_____no_output_____" ] ], [ [ "! adb devices", "_____no_output_____" ] ], [ [ "___\n## Levantar un servidor de Appium en local\n___\n1. Descargar e instalar Appium-Desktop: https://github.com/appium/appium-desktop/releases/\n2. Iniciar Appium (tarda)\n3. Poner Host: 0.0.0.0 y Puerto: 4723, pulsar \"Start Server\"", "_____no_output_____" ], [ "___\n## Crear un script con el cliente de Appium para Python\n___\nSe instalan los sdk's de Appium para Python:", "_____no_output_____" ] ], [ [ "! pip install Appium-Python-Client --trusted-host pypi.python.org", "_____no_output_____" ] ], [ [ "___\nImportamos la librería:", "_____no_output_____" ] ], [ [ "from appium import webdriver", "_____no_output_____" ], [ "import os\n\ndesired_caps = {}\ndesired_caps['platformName'] = 'Android'\ndesired_caps['deviceName'] = 'Android Emulator'\ndesired_caps['app'] = os.path.join(os.getcwd(), 'example.apk') # ruta a una apk de ejemplo\n\ndriver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)", "_____no_output_____" ], [ "from appium.webdriver.common.mobileby import MobileBy\n\n\nclass CommonPage(object):\n \n def __init__(self, driver):\n self.driver = driver \n\n \n\nclass InitPage(CommonPage): \n \n def btn__add_contact(self):\n return driver.find_element(MobileBy.ACCESSIBILITY_ID, \"Add Contact\")\n \n\nclass AddContactPage(CommonPage):\n \n def tbx__contact_name(self):\n return driver.find_element(MobileBy.ID, \"com.example.android.contactmanager:id/contactNameEditText\")\n \n def tbx__contact_phone(self):\n return driver.find_element(MobileBy.ID, \"com.example.android.contactmanager:id/contactPhoneEditText\")", "_____no_output_____" ], [ "InitPage(driver).btn__add_contact().click()\n\nimport time\ntime.sleep(1)\n\npage__add_contact = AddContactPage(driver)\npage__add_contact.tbx__contact_name().send_keys('Alejandro')\npage__add_contact.tbx__contact_phone().send_keys('987654321')", "_____no_output_____" ], [ "driver.quit()", "_____no_output_____" ] ], [ [ "___\n## Obtener los localizadores de objectos manualmente\n___\n1. Desde Appium, ir a \"File\" > \"New Session Window...\"\n2. Rellenar la tabla con los valores:\n\nName | Type | Value\n-----|------|------\nplatformName | text | Android\ndeviceName | text | Android Emulator\napp | text | C:\\Users\\mtp1923\\test-lab\\example.apk\n\n3. Pulsar en \"Start Session\"\n\nSe abrirá una ventana que es similar a pulsar F12 en Chrome", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
cb97b6dbef21872a01f0fa2ddf4af62d896d8802
143,635
ipynb
Jupyter Notebook
courses/machine_learning/deepdive2/launching_into_ml/labs/intro_logistic_regression.ipynb
Glairly/introduction_to_tensorflow
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
[ "Apache-2.0" ]
2
2022-01-06T11:52:57.000Z
2022-01-09T01:53:56.000Z
courses/machine_learning/deepdive2/launching_into_ml/labs/intro_logistic_regression.ipynb
Glairly/introduction_to_tensorflow
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
[ "Apache-2.0" ]
null
null
null
courses/machine_learning/deepdive2/launching_into_ml/labs/intro_logistic_regression.ipynb
Glairly/introduction_to_tensorflow
aa0a44d9c428a6eb86d1f79d73f54c0861b6358d
[ "Apache-2.0" ]
null
null
null
156.806769
46,709
0.872336
[ [ [ "# Introduction to Logistic Regression \n\n\n\n## Learning Objectives\n\n1. Create Seaborn plots for Exploratory Data Analysis \n2. Train a Logistic Regression Model using Scikit-Learn\n\n\n## Introduction \n\nThis lab is an introduction to logistic regression using Python and Scikit-Learn. This lab serves as a foundation for more complex algorithms and machine learning models that you will encounter in the course. In this lab, we will use a synthetic advertising data set, indicating whether or not a particular internet user clicked on an Advertisement on a company website. We will try to create a model that will predict whether or not they will click on an ad based off the features of that user. \n\n\nEach learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solutions/intro_logistic_regression.ipynb). \n", "_____no_output_____" ], [ "### Import Libraries", "_____no_output_____" ] ], [ [ "import os\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n%matplotlib inline", "_____no_output_____" ] ], [ [ "### Load the Dataset\n\nWe will use a synthetic [advertising](https://www.kaggle.com/fayomi/advertising) dataset. This data set contains the following features:\n\n* 'Daily Time Spent on Site': consumer time on site in minutes\n* 'Age': customer age in years\n* 'Area Income': Avg. Income of geographical area of consumer\n* 'Daily Internet Usage': Avg. minutes a day consumer is on the internet\n* 'Ad Topic Line': Headline of the advertisement\n* 'City': City of consumer\n* 'Male': Whether or not consumer was male\n* 'Country': Country of consumer\n* 'Timestamp': Time at which consumer clicked on Ad or closed window\n* 'Clicked on Ad': 0 or 1 indicated clicking on Ad", "_____no_output_____" ] ], [ [ "# TODO 1: Read in the advertising.csv file and set it to a data frame called ad_data.\n# TODO: Your code goes here", "_____no_output_____" ] ], [ [ "**Check the head of ad_data**", "_____no_output_____" ] ], [ [ "ad_data.head()", "_____no_output_____" ] ], [ [ "**Use info and describe() on ad_data**", "_____no_output_____" ] ], [ [ "ad_data.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 10 columns):\nDaily Time Spent on Site 1000 non-null float64\nAge 1000 non-null int64\nArea Income 1000 non-null float64\nDaily Internet Usage 1000 non-null float64\nAd Topic Line 1000 non-null object\nCity 1000 non-null object\nMale 1000 non-null int64\nCountry 1000 non-null object\nTimestamp 1000 non-null object\nClicked on Ad 1000 non-null int64\ndtypes: float64(3), int64(3), object(4)\nmemory usage: 78.2+ KB\n" ], [ "ad_data.describe()", "_____no_output_____" ] ], [ [ "Let's check for any null values.", "_____no_output_____" ] ], [ [ "ad_data.isnull().sum()", "_____no_output_____" ] ], [ [ "## Exploratory Data Analysis (EDA)\n\nLet's use seaborn to explore the data! Try recreating the plots shown below!", "_____no_output_____" ], [ "TODO 1: **Create a histogram of the Age**", "_____no_output_____" ] ], [ [ "# TODO: Your code goes here", "_____no_output_____" ] ], [ [ "TODO 1: **Create a jointplot showing Area Income versus Age.**", "_____no_output_____" ] ], [ [ "# TODO: Your code goes here", "_____no_output_____" ] ], [ [ "TODO 2: **Create a jointplot showing the kde distributions of Daily Time spent on site vs. Age.**", "_____no_output_____" ] ], [ [ "# TODO: Your code goes here", "_____no_output_____" ] ], [ [ "TODO 1: **Create a jointplot of 'Daily Time Spent on Site' vs. 'Daily Internet Usage'**", "_____no_output_____" ] ], [ [ "# TODO: Your code goes here", "_____no_output_____" ] ], [ [ "# Logistic Regression\n\nLogistic regression is a supervised machine learning process. It is similar to linear regression, but rather than predict a continuous value, we try to estimate probabilities by using a logistic function. Note that even though it has regression in the name, it is for classification.\nWhile linear regression is acceptable for estimating values, logistic regression is best for predicting the class of an observation\n\nNow it's time to do a train test split, and train our model! You'll have the freedom here to choose columns that you want to train on!", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ] ], [ [ "Next, let's define the features and label. Briefly, feature is input; label is output. This applies to both classification and regression problems.", "_____no_output_____" ] ], [ [ "X = ad_data[['Daily Time Spent on Site', 'Age', 'Area Income','Daily Internet Usage', 'Male']]\ny = ad_data['Clicked on Ad']", "_____no_output_____" ] ], [ [ "TODO 2: **Split the data into training set and testing set using train_test_split**", "_____no_output_____" ] ], [ [ "# TODO: Your code goes here", "_____no_output_____" ] ], [ [ "**Train and fit a logistic regression model on the training set.**", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LogisticRegression", "_____no_output_____" ], [ "logmodel = LogisticRegression()\nlogmodel.fit(X_train,y_train)", "_____no_output_____" ] ], [ [ "## Predictions and Evaluations\n**Now predict values for the testing data.**", "_____no_output_____" ] ], [ [ "predictions = logmodel.predict(X_test)", "_____no_output_____" ] ], [ [ "**Create a classification report for the model.**", "_____no_output_____" ] ], [ [ "from sklearn.metrics import classification_report", "_____no_output_____" ], [ "print(classification_report(y_test,predictions))", " precision recall f1-score support\n\n 0 0.87 0.96 0.91 162\n 1 0.96 0.86 0.91 168\n\navg / total 0.91 0.91 0.91 330\n\n" ] ], [ [ "Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\nhttp://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb97b95716bea9df5ad15489f1ebd76b696e5752
80,503
ipynb
Jupyter Notebook
gan-mnist/MNIST_GAN_Exercise.ipynb
noopur2805/deep-learning-v2-pytorch
80bd8c91a371f94f17a8cf91d2a4a692ced8033a
[ "MIT" ]
1
2020-07-15T14:01:32.000Z
2020-07-15T14:01:32.000Z
gan-mnist/MNIST_GAN_Exercise.ipynb
noopur2805/deep-learning-v2-pytorch
80bd8c91a371f94f17a8cf91d2a4a692ced8033a
[ "MIT" ]
null
null
null
gan-mnist/MNIST_GAN_Exercise.ipynb
noopur2805/deep-learning-v2-pytorch
80bd8c91a371f94f17a8cf91d2a4a692ced8033a
[ "MIT" ]
null
null
null
89.051991
16,504
0.787598
[ [ [ "# Generative Adversarial Network\n\nIn this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!\n\nGANs were [first reported on](https://arxiv.org/abs/1406.2661) in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:\n\n* [Pix2Pix](https://affinelayer.com/pixsrv/) \n* [CycleGAN & Pix2Pix in PyTorch, Jun-Yan Zhu](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix)\n* [A list of generative models](https://github.com/wiseodd/generative-models)\n\nThe idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes \"fake\" data to pass to the discriminator. The discriminator also sees real training data and predicts if the data it's received is real or fake. \n> * The generator is trained to fool the discriminator, it wants to output data that looks _as close as possible_ to real, training data. \n* The discriminator is a classifier that is trained to figure out which data is real and which is fake. \n\nWhat ends up happening is that the generator learns to make data that is indistinguishable from real data to the discriminator.\n\n<img src='assets/gan_pipeline.png' width=70% />\n\nThe general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector that the generator uses to construct its fake images. This is often called a **latent vector** and that vector space is called **latent space**. As the generator trains, it figures out how to map latent vectors to recognizable images that can fool the discriminator.\n\nIf you're interested in generating only new images, you can throw out the discriminator after training. In this notebook, I'll show you how to define and train these adversarial networks in PyTorch and generate new images!", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport numpy as np\nimport torch\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "from torchvision import datasets\nimport torchvision.transforms as transforms\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 64\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# get the training datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\n\n# prepare data loader\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n num_workers=num_workers)", "_____no_output_____" ] ], [ [ "### Visualize the data", "_____no_output_____" ] ], [ [ "# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# get one image from the batch\nimg = np.squeeze(images[0])\n\nfig = plt.figure(figsize = (3,3)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')", "_____no_output_____" ] ], [ [ "---\n# Define the Model\n\nA GAN is comprised of two adversarial networks, a discriminator and a generator.", "_____no_output_____" ], [ "## Discriminator\n\nThe discriminator network is going to be a pretty typical linear classifier. To make this network a universal function approximator, we'll need at least one hidden layer, and these hidden layers should have one key attribute:\n> All hidden layers will have a [Leaky ReLu](https://pytorch.org/docs/stable/nn.html#torch.nn.LeakyReLU) activation function applied to their outputs.\n\n<img src='assets/gan_network.png' width=70% />\n\n#### Leaky ReLu\n\nWe should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.\n\n<img src='assets/leaky_relu.png' width=40% />\n\n#### Sigmoid Output\n\nWe'll also take the approach of using a more numerically stable loss function on the outputs. Recall that we want the discriminator to output a value 0-1 indicating whether an image is _real or fake_. \n> We will ultimately use [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss), which combines a `sigmoid` activation function **and** and binary cross entropy loss in one function. \n\nSo, our final output layer should not have any activation function applied to it.", "_____no_output_____" ] ], [ [ "import torch.nn as nn\nimport torch.nn.functional as F\n\nclass Discriminator(nn.Module):\n\n def __init__(self, input_size, hidden_dim, output_size):\n super(Discriminator, self).__init__()\n \n # define all layers\n self.fc1 = nn.Linear(input_size, hidden_dim * 4)\n self.fc2 = nn.Linear(hidden_dim * 4, hidden_dim * 2)\n self.fc3 = nn.Linear(hidden_dim * 2, hidden_dim)\n \n self.fc4 = nn.Linear(hidden_dim, output_size)\n self.dropout = nn.Dropout(0.3)\n \n def forward(self, x):\n # flatten image\n x = x.view(-1, 28 * 28)\n \n # pass x through all layers\n # apply leaky relu activation to all hidden layers\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc2(x), 0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc3(x), 0.2)\n x = self.dropout(x)\n out = self.fc4(x)\n\n return out\n", "_____no_output_____" ] ], [ [ "## Generator\n\nThe generator network will be almost exactly the same as the discriminator network, except that we're applying a [tanh activation function](https://pytorch.org/docs/stable/nn.html#tanh) to our output layer.\n\n#### tanh Output\nThe generator has been found to perform the best with $tanh$ for the generator output, which scales the output to be between -1 and 1, instead of 0 and 1. \n\n<img src='assets/tanh_fn.png' width=40% />\n\nRecall that we also want these outputs to be comparable to the *real* input pixel values, which are read in as normalized values between 0 and 1. \n> So, we'll also have to **scale our real input images to have pixel values between -1 and 1** when we train the discriminator. \n\nI'll do this in the training loop, later on.", "_____no_output_____" ] ], [ [ "class Generator(nn.Module):\n\n def __init__(self, input_size, hidden_dim, output_size):\n super(Generator, self).__init__()\n \n # define all layers\n self.fc1 = nn.Linear(input_size, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, hidden_dim * 2)\n self.fc3 = nn.Linear(hidden_dim * 2, hidden_dim * 4)\n \n self.fc4 = nn.Linear(hidden_dim * 4, output_size)\n self.dropout = nn.Dropout(0.2)\n\n def forward(self, x):\n # pass x through all layers\n x = F.leaky_relu(self.fc1(x), 0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc2(x), 0.2)\n x = self.dropout(x)\n x = F.leaky_relu(self.fc3(x), 0.2)\n x = self.dropout(x)\n # final layer should have tanh applied\n out = F.tanh(self.fc4(x))\n \n return out", "_____no_output_____" ] ], [ [ "## Model hyperparameters", "_____no_output_____" ] ], [ [ "# Discriminator hyperparams\n\n# Size of input image to discriminator (28*28)\ninput_size = 784\n# Size of discriminator output (real or fake)\nd_output_size = 1\n# Size of *last* hidden layer in the discriminator\nd_hidden_size = 32\n\n# Generator hyperparams\n\n# Size of latent vector to give to generator\nz_size = 100\n# Size of discriminator output (generated image)\ng_output_size = 784\n# Size of *first* hidden layer in the generator\ng_hidden_size = 32", "_____no_output_____" ] ], [ [ "## Build complete network\n\nNow we're instantiating the discriminator and generator from the classes defined above. Make sure you've passed in the correct input arguments.", "_____no_output_____" ] ], [ [ "# instantiate discriminator and generator\nD = Discriminator(input_size, d_hidden_size, d_output_size)\nG = Generator(z_size, g_hidden_size, g_output_size)\n\n# check that they are as you expect\nprint(D)\nprint()\nprint(G)", "Discriminator(\n (fc1): Linear(in_features=784, out_features=128, bias=True)\n (fc2): Linear(in_features=128, out_features=64, bias=True)\n (fc3): Linear(in_features=64, out_features=32, bias=True)\n (fc4): Linear(in_features=32, out_features=1, bias=True)\n (dropout): Dropout(p=0.3, inplace=False)\n)\n\nGenerator(\n (fc1): Linear(in_features=100, out_features=32, bias=True)\n (fc2): Linear(in_features=32, out_features=64, bias=True)\n (fc3): Linear(in_features=64, out_features=128, bias=True)\n (fc4): Linear(in_features=128, out_features=784, bias=True)\n (dropout): Dropout(p=0.2, inplace=False)\n)\n" ] ], [ [ "---\n## Discriminator and Generator Losses\n\nNow we need to calculate the losses. \n\n### Discriminator Losses\n\n> * For the discriminator, the total loss is the sum of the losses for real and fake images, `d_loss = d_real_loss + d_fake_loss`. \n* Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.\n\n<img src='assets/gan_pipeline.png' width=70% />\n\nThe losses will by binary cross entropy loss with logits, which we can get with [BCEWithLogitsLoss](https://pytorch.org/docs/stable/nn.html#bcewithlogitsloss). This combines a `sigmoid` activation function **and** and binary cross entropy loss in one function.\n\nFor the real images, we want `D(real_images) = 1`. That is, we want the discriminator to classify the the real images with a label = 1, indicating that these are real. To help the discriminator generalize better, the labels are **reduced a bit from 1.0 to 0.9**. For this, we'll use the parameter `smooth`; if True, then we should smooth our labels. In PyTorch, this looks like `labels = torch.ones(size) * 0.9`\n\nThe discriminator loss for the fake data is similar. We want `D(fake_images) = 0`, where the fake images are the _generator output_, `fake_images = G(z)`. \n\n### Generator Loss\n\nThe generator loss will look similar only with flipped labels. The generator's goal is to get `D(fake_images) = 1`. In this case, the labels are **flipped** to represent that the generator is trying to fool the discriminator into thinking that the images it generates (fakes) are real!", "_____no_output_____" ] ], [ [ "# Calculate losses\ndef real_loss(D_out, smooth=False):\n # compare logits to real labels\n # smooth labels if smooth=True\n batch_size = D_out.size(0)\n if smooth:\n labels = torch.ones(batch_size) * 0.9\n else:\n labels = torch.ones(batch_size)\n criterion = nn.BCEWithLogitsLoss()\n loss = criterion(D_out.squeeze(), labels)\n return loss\n\ndef fake_loss(D_out):\n # compare logits to fake labels\n batch_size = D_out.size(0)\n labels = torch.zeros(batch_size)\n criterion = nn.BCEWithLogitsLoss()\n loss = criterion(D_out.squeeze(), labels)\n return loss", "_____no_output_____" ] ], [ [ "## Optimizers\n\nWe want to update the generator and discriminator variables separately. So, we'll define two separate Adam optimizers.", "_____no_output_____" ] ], [ [ "import torch.optim as optim\n\n# learning rate for optimizers\nlr = 0.002\n\n# Create optimizers for the discriminator and generator\nd_optimizer = optim.Adam(D.parameters(), lr)\ng_optimizer = optim.Adam(G.parameters(), lr)", "_____no_output_____" ] ], [ [ "---\n## Training\n\nTraining will involve alternating between training the discriminator and the generator. We'll use our functions `real_loss` and `fake_loss` to help us calculate the discriminator losses in all of the following cases.\n\n### Discriminator training\n1. Compute the discriminator loss on real, training images \n2. Generate fake images\n3. Compute the discriminator loss on fake, generated images \n4. Add up real and fake loss\n5. Perform backpropagation + an optimization step to update the discriminator's weights\n\n### Generator training\n1. Generate fake images\n2. Compute the discriminator loss on fake images, using **flipped** labels!\n3. Perform backpropagation + an optimization step to update the generator's weights\n\n#### Saving Samples\n\nAs we train, we'll also print out some loss statistics and save some generated \"fake\" samples.", "_____no_output_____" ] ], [ [ "import pickle as pkl\n\n# training hyperparams\nnum_epochs = 40\n\n# keep track of loss and generated, \"fake\" samples\nsamples = []\nlosses = []\n\nprint_every = 400\n\n# Get some fixed data for sampling. These are images that are held\n# constant throughout training, and allow us to inspect the model's performance\nsample_size=16\nfixed_z = np.random.uniform(-1, 1, size=(sample_size, z_size))\nfixed_z = torch.from_numpy(fixed_z).float()\n\n# train the network\nD.train()\nG.train()\nfor epoch in range(num_epochs):\n \n for batch_i, (real_images, _) in enumerate(train_loader):\n \n batch_size = real_images.size(0)\n \n ## Important rescaling step ## \n real_images = real_images*2 - 1 # rescale input images from [0,1) to [-1, 1)\n \n # ============================================\n # TRAIN THE DISCRIMINATOR\n # ============================================\n d_optimizer.zero_grad()\n \n # 1. Train with real images\n\n # Compute the discriminator losses on real images\n # use smoothed labels\n D_real = D(real_images)\n d_real_loss = real_loss(D_real, smooth=True)\n \n # 2. Train with fake images\n \n # Generate fake images\n z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n z = torch.from_numpy(z).float()\n fake_images = G(z)\n \n # Compute the discriminator losses on fake images \n D_fake = D(fake_images)\n d_fake_loss = fake_loss(D_fake)\n \n # add up real and fake losses and perform backprop\n d_loss = d_real_loss + d_fake_loss\n d_loss.backward()\n d_optimizer.step()\n \n # =========================================\n # TRAIN THE GENERATOR\n # =========================================\n g_optimizer.zero_grad()\n \n # 1. Train with fake images and flipped labels\n \n # Generate fake images\n z = np.random.uniform(-1, 1, size=(batch_size, z_size))\n z = torch.from_numpy(z).float()\n fake_images = G(z)\n \n # Compute the discriminator losses on fake images \n # using flipped labels!\n D_fake = D(fake_images)\n g_loss = real_loss(D_fake)\n \n # perform backprop\n g_loss.backward()\n g_optimizer.step() \n\n # Print some loss stats\n if batch_i % print_every == 0:\n # print discriminator and generator loss\n print('Epoch [{:5d}/{:5d}] | d_loss: {:6.4f} | g_loss: {:6.4f}'.format(\n epoch+1, num_epochs, d_loss.item(), g_loss.item()))\n\n \n ## AFTER EACH EPOCH##\n # append discriminator loss and generator loss\n losses.append((d_loss.item(), g_loss.item()))\n \n # generate and save sample, fake images\n G.eval() # eval mode for generating samples\n samples_z = G(fixed_z)\n samples.append(samples_z)\n G.train() # back to train mode\n\n\n# Save training generator samples\nwith open('train_samples.pkl', 'wb') as f:\n pkl.dump(samples, f)", "/home/noopur/anaconda3/envs/dl/lib/python3.7/site-packages/torch/nn/functional.py:1340: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.\n warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n" ] ], [ [ "## Training loss\n\nHere we'll plot the training losses for the generator and discriminator, recorded after each epoch.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots()\nlosses = np.array(losses)\nplt.plot(losses.T[0], label='Discriminator')\nplt.plot(losses.T[1], label='Generator')\nplt.title(\"Training Losses\")\nplt.legend()", "_____no_output_____" ] ], [ [ "## Generator samples from training\n\nHere we can view samples of images from the generator. First we'll look at the images we saved during training.", "_____no_output_____" ] ], [ [ "# helper function for viewing a list of passed in sample images\ndef view_samples(epoch, samples):\n fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)\n for ax, img in zip(axes.flatten(), samples[epoch]):\n img = img.detach()\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')", "_____no_output_____" ], [ "# Load samples from generator, taken while training\nwith open('train_samples.pkl', 'rb') as f:\n samples = pkl.load(f)", "_____no_output_____" ] ], [ [ "These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.", "_____no_output_____" ] ], [ [ "# -1 indicates final epoch's samples (the last in the list)\nview_samples(-1, samples)", "_____no_output_____" ] ], [ [ "Below I'm showing the generated images as the network was training, every 10 epochs.", "_____no_output_____" ] ], [ [ "rows = 10 # split epochs into 10, so 100/10 = every 10 epochs\ncols = 6\nfig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)\n\nfor sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):\n for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):\n img = img.detach()\n ax.imshow(img.reshape((28,28)), cmap='Greys_r')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)", "_____no_output_____" ] ], [ [ "It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.", "_____no_output_____" ], [ "## Sampling from the generator\n\nWe can also get completely new images from the generator by using the checkpoint we saved after training. **We just need to pass in a new latent vector $z$ and we'll get new samples**!", "_____no_output_____" ] ], [ [ "# randomly generated, new latent vectors\nsample_size=16\nrand_z = np.random.uniform(-1, 1, size=(sample_size, z_size))\nrand_z = torch.from_numpy(rand_z).float()\n\nG.eval() # eval mode\n# generated samples\nrand_images = G(rand_z)\n\n# 0 indicates the first set of samples in the passed in list\n# and we only have one batch of samples, here\nview_samples(0, [rand_images])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb97bb0423cfa2b903f402a5d1162d13af29abfb
51,987
ipynb
Jupyter Notebook
algorithms/regression01.ipynb
jorgealexandreb/Covid-19-ML-Project-
cc73dc2324f43127d1959f13ca01389473b2c224
[ "MIT" ]
1
2021-03-19T18:23:03.000Z
2021-03-19T18:23:03.000Z
algorithms/regression01.ipynb
jorgealexandreb/Covid-19-ML-Project-
cc73dc2324f43127d1959f13ca01389473b2c224
[ "MIT" ]
null
null
null
algorithms/regression01.ipynb
jorgealexandreb/Covid-19-ML-Project-
cc73dc2324f43127d1959f13ca01389473b2c224
[ "MIT" ]
null
null
null
77.246657
14,180
0.680439
[ [ [ "import pandas as pd", "_____no_output_____" ], [ "df=pd.read_excel(\"https://github.com/masterfloss/data/blob/main/socialmedia.xlsx?raw=true\")", "_____no_output_____" ], [ "df.dtypes", "_____no_output_____" ], [ "import statsmodels.api as sm", "_____no_output_____" ], [ "X=df[['Linux','Chrome','Twitter']]\ny=df[['Facebook']]\n", "_____no_output_____" ], [ "X = sm.add_constant(X)", "_____no_output_____" ], [ "result = sm.OLS(y,X).fit()\nresult.summary()", "_____no_output_____" ], [ "df.plot.scatter('Linux','Facebook')", "_____no_output_____" ], [ "df.plot.scatter('Chrome','Facebook')", "_____no_output_____" ], [ "X=df[['Linux']]\ny=df[['Facebook']]\nresult = sm.OLS(y,X).fit()\nresult.summary()", "_____no_output_____" ], [ "X=df[['Linux','Chrome','Twitter']]\ny=df[['Facebook']]", "_____no_output_____" ], [ "from statsmodels.stats.outliers_influence import variance_inflation_factor\n# VIF dataframe \nvif_data = pd.DataFrame() \nvif_data[\"feature\"] = X.columns \n\n# calculating VIF for each feature \nvif_data[\"VIF\"] = [variance_inflation_factor(X.values, i) for i in range(len(X.columns))] \n \nprint(vif_data)", " feature VIF\n0 Linux 9.195029\n1 Chrome 5.241991\n2 Twitter 6.951483\n" ], [ "len(X.columns)", "_____no_output_____" ], [ "X.corr()", "_____no_output_____" ], [ "XX=df[['Twitter']]**(1/2)\nXX['Linux']=df[['Linux']]**(1/2)\nXX['Chrome']=df[['Chrome']]**(1/2)", "_____no_output_____" ], [ "from statsmodels.stats.outliers_influence import variance_inflation_factor\n# VIF dataframe \nvif_data = pd.DataFrame() \nvif_data[\"feature\"] = XX.columns \n\n# calculating VIF for each feature \nvif_data[\"VIF\"] = [variance_inflation_factor(XX.values, i) for i in range(len(XX.columns))] \n \nprint(vif_data)", " feature VIF\n0 Twitter 26.187922\n1 Linux 36.248031\n2 Chrome 12.766491\n" ], [ "XX = sm.add_constant(XX)\nresult = sm.OLS(y,XX).fit()\nresult.summary()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb97cac731ccee68e71310c46a15b32d774769dc
8,608
ipynb
Jupyter Notebook
finch/tensorflow1/free_chat/chinese/main/transformer_export.ipynb
hengchao0248/tensorflow-nlp
844f7b092a92aa0a1fd8f6c24364243d60b8af80
[ "MIT" ]
248
2019-07-18T05:59:03.000Z
2022-03-29T21:57:24.000Z
finch/tensorflow1/free_chat/chinese/main/transformer_export.ipynb
NLP4Science/tensorflow-nlp
4944ace8e861d89282cbae3123016c71c0869a6c
[ "MIT" ]
5
2019-07-26T09:29:20.000Z
2020-11-01T08:40:13.000Z
finch/tensorflow1/free_chat/chinese/main/transformer_export.ipynb
NLP4Science/tensorflow-nlp
4944ace8e861d89282cbae3123016c71c0869a6c
[ "MIT" ]
68
2019-07-25T06:59:58.000Z
2022-03-22T06:44:30.000Z
8,608
8,608
0.577951
[ [ [ "\"\"\"\nWe use following lines because we are running on Google Colab\nIf you are running notebook on a local computer, you don't need this cell\n\"\"\"\nfrom google.colab import drive\ndrive.mount('/content/gdrive')\nimport os\nos.chdir('/content/gdrive/My Drive/finch/tensorflow1/free_chat/chinese/main')", "_____no_output_____" ], [ "%tensorflow_version 1.x\n!pip install texar", "_____no_output_____" ], [ "import tensorflow as tf\nimport texar.tf as tx\nimport numpy as np\nimport copy\n\nfrom texar.tf.modules import TransformerEncoder, TransformerDecoder\n\nprint(\"TensorFlow Version\", tf.__version__)\nprint('GPU Enabled:', tf.test.is_gpu_available())", "TensorFlow Version 1.15.0\nGPU Enabled: False\n" ], [ "def forward(features, labels, mode):\n if isinstance(features, dict):\n words = features['words']\n else:\n words = features\n \n words_len = tf.count_nonzero(words, 1, dtype=tf.int32)\n \n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n batch_sz = tf.shape(words)[0]\n \n \n with tf.variable_scope('Embedding'):\n embedding = tf.Variable(np.load('../vocab/char.npy'),\n dtype=tf.float32,\n name='fasttext_vectors')\n embedding = tf.concat([tf.zeros(shape=[1, params['embed_dim']]), embedding[1:, :]], axis=0)\n x = tf.nn.embedding_lookup(embedding, words)\n pos_embedder = tx.modules.SinusoidsPositionEmbedder(\n position_size = 2*params['max_len'],\n hparams = config_model.position_embedder_hparams)\n x = (x * config_model.hidden_dim ** 0.5) + pos_embedder(sequence_length=words_len)\n\n\n with tf.variable_scope('Encoder'):\n encoder = TransformerEncoder(hparams=config_model.encoder)\n enc_out = encoder(inputs=x, sequence_length=words_len)\n \n \n with tf.variable_scope('Decoder'):\n decoder = TransformerDecoder(vocab_size=len(params['char2idx'])+1,\n output_layer=tf.transpose(embedding, (1, 0)),\n hparams=config_model.decoder)\n \n start_tokens = tf.fill([batch_sz], 1)\n\n def _embedding_fn(x, y):\n x_w_embed = tf.nn.embedding_lookup(embedding, x)\n y_p_embed = pos_embedder(y)\n return x_w_embed * config_model.hidden_dim ** 0.5 + y_p_embed\n\n predictions = decoder(\n memory=enc_out,\n memory_sequence_length=words_len,\n beam_width=params['beam_width'],\n length_penalty=params['length_penalty'],\n start_tokens=start_tokens,\n end_token=2,\n embedding=_embedding_fn,\n max_decoding_length=params['max_len'],\n mode=tf.estimator.ModeKeys.PREDICT)\n \n return predictions['sample_id'][:, :, :params['top_k']]", "_____no_output_____" ], [ "def model_fn(features, labels, mode, params):\n logits_or_ids = forward(features, labels, mode)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode, predictions=logits_or_ids)", "_____no_output_____" ], [ "class config_model:\n hidden_dim = 300\n num_heads = 8\n dropout_rate = .2\n num_blocks = 6\n\n position_embedder_hparams = {\n 'dim': hidden_dim\n }\n\n encoder = {\n 'dim': hidden_dim,\n 'embedding_dropout': dropout_rate,\n 'residual_dropout': dropout_rate,\n 'num_blocks': num_blocks,\n 'initializer': {\n 'type': 'variance_scaling_initializer',\n 'kwargs': {\n 'scale': 1.0,\n 'mode': 'fan_avg',\n 'distribution': 'uniform',\n },\n },\n 'multihead_attention': {\n 'dropout_rate': dropout_rate,\n 'num_heads': num_heads,\n 'output_dim': hidden_dim,\n 'use_bias': True,\n },\n 'poswise_feedforward': {\n 'name': 'fnn',\n 'layers': [\n {\n 'type': 'Dense',\n 'kwargs': {\n 'name': 'conv1',\n 'units': hidden_dim * 2,\n 'activation': 'gelu',\n 'use_bias': True,\n },\n },\n {\n 'type': 'Dropout',\n 'kwargs': {\n 'rate': dropout_rate,\n }\n },\n {\n 'type': 'Dense',\n 'kwargs': {\n 'name': 'conv2',\n 'units': hidden_dim,\n 'use_bias': True,\n }\n }\n ],\n },\n }\n\n decoder = copy.deepcopy(encoder)\n decoder['output_layer_bias'] = True\n\n\nparams = {\n 'model_dir': '../model/transformer',\n 'export_dir': '../model/transformer_export',\n 'vocab_path': '../vocab/char.txt',\n 'max_len': 15,\n 'embed_dim': config_model.hidden_dim,\n 'beam_width': 5,\n 'top_k': 3,\n 'length_penalty': .6,\n}", "_____no_output_____" ], [ "def serving_input_receiver_fn():\n words = tf.placeholder(tf.int32, [None, None], 'words')\n \n features = {'words': words}\n receiver_tensors = features\n \n return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)", "_____no_output_____" ], [ "def get_vocab(f_path):\n word2idx = {}\n with open(f_path) as f:\n for i, line in enumerate(f):\n line = line.rstrip('\\n')\n word2idx[line] = i\n return word2idx", "_____no_output_____" ], [ "params['char2idx'] = get_vocab(params['vocab_path'])\nparams['idx2char'] = {idx: char for char, idx in params['char2idx'].items()}", "_____no_output_____" ], [ "estimator = tf.estimator.Estimator(model_fn, params['model_dir'])\nestimator.export_saved_model(params['export_dir'], serving_input_receiver_fn)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb97dc745d0c0ef12d9bbbc46e8e75f70f5b7ba0
50,599
ipynb
Jupyter Notebook
chapter4/Chap4.ipynb
tuanlm173/mathematics-and-statistics-for-DS-HCMUS
a540fa589ae4b7158d5f52ff133e1ed8e7076baa
[ "MIT" ]
null
null
null
chapter4/Chap4.ipynb
tuanlm173/mathematics-and-statistics-for-DS-HCMUS
a540fa589ae4b7158d5f52ff133e1ed8e7076baa
[ "MIT" ]
null
null
null
chapter4/Chap4.ipynb
tuanlm173/mathematics-and-statistics-for-DS-HCMUS
a540fa589ae4b7158d5f52ff133e1ed8e7076baa
[ "MIT" ]
null
null
null
80.9584
16,788
0.777407
[ [ [ "from scipy.misc import derivative\nimport scipy.integrate\nfrom math import *\nimport sympy as sp\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\nfrom sklearn.datasets.samples_generator import make_regression \nfrom scipy import stats\nimport seaborn as sns", "_____no_output_____" ], [ "def f1(x):\n return sin(x) * cos(x) + exp(2*x) + 2*x**4 - 10", "_____no_output_____" ], [ "f1_result = derivative(f1, 2, dx = 1e-10)\nf1_result", "_____no_output_____" ], [ "def f2(x):\n return (5*(x**2))/sin(x)", "_____no_output_____" ], [ "f2_result = derivative(f2, 2, dx = 1e-10)\nf2_result", "_____no_output_____" ] ], [ [ "## Partial Derivative", "_____no_output_____" ] ], [ [ "x = sp.Symbol('x')\ny = sp.Symbol('y')\nz = sp.Symbol('z')", "_____no_output_____" ], [ "w = (x**2)*y - 10*(y**2)*(z**3) + 43*x - 7*sp.tan(4*y)", "_____no_output_____" ], [ "w_p = sp.diff(w, x)\nw_p", "_____no_output_____" ], [ "w_p_v = w_p.subs({x:3})\nw_p_v", "_____no_output_____" ] ], [ [ "## Derivarive Rule", "_____no_output_____" ] ], [ [ "def f(x):\n return 4*(x**3) + 3*(x**2)\ndef g(x):\n return 5*(x**6) + 4*x", "_____no_output_____" ], [ "x = 2\nfg_prime_of_x = derivative(f, x, dx=1e-10)*g(x) + f(x)*derivative(g, x, dx=1e-10)\nfg_prime_of_x", "_____no_output_____" ], [ "f_prime_g_prime_of_x = derivative(f, x, dx=1e-10) * derivative(g, x, dx=1e-10)\nf_prime_g_prime_of_x", "_____no_output_____" ] ], [ [ "## Definite Integral", "_____no_output_____" ] ], [ [ "f = lambda x,y: 1 + 8*x*y\ng = lambda x:0\nh = lambda x:3", "_____no_output_____" ], [ "i, e = scipy.integrate.dblquad(f, 1,2, g, h)\ni", "_____no_output_____" ] ], [ [ "## Gradient Descent", "_____no_output_____" ] ], [ [ "h = [74, 74, 72, 72, 73, 69, 69, 71, 76, 71, 73, 73, 74, 74, 69, 70, 73, 75, 78, 79, 76, 74, 76, 72, 71, 75, 77, 74, 73, 74, 78, 73, 75, 73, 75, 75, 74, 69, 71, 74, 73, 73, 76, 74, 74, 70, 72, 77, 74, 70, 73, 75, 76, 76, 78, 74, 74, 76, 77, 81, 78, 75, 77, 75, 76, 74, 72, 72, 75, 73, 73, 73, 70, 70, 70, 76, 68, 71, 72, 75, 75, 75, 75, 68, 74, 78, 71, 73, 76, 74, 74, 79, 75, 73, 76, 74, 74, 73, 72, 74, 73, 74, 72, 73, 69, 72, 73, 75, 75, 73, 72, 72, 76, 74, 72, 77, 74, 77, 75, 76, 80, 74, 74, 75, 78, 73, 73, 74, 75, 76, 71, 73, 74, 76, 76, 74, 73, 74, 70, 72, 73, 73, 73, 73, 71, 74, 74, 72, 74, 71, 74, 73, 75, 75, 79, 73, 75, 76, 74, 76, 78, 74, 76, 72, 74, 76, 74, 75, 78, 75, 72, 74, 72, 74, 70, 71, 70, 75, 71, 71, 73, 72, 71, 73, 72, 75, 74, 74, 75, 73, 77, 73, 76, 75, 74, 76, 75, 73, 71, 76, 75, 72, 71, 77, 73, 74, 71, 72, 74, 75, 73, 72, 75, 75, 74, 72, 74, 71, 70, 74, 77, 77, 75, 75, 78, 75, 76, 73, 75, 75, 79, 77, 76, 71, 75, 74, 69, 71, 76, 72, 72, 70, 72, 73, 71, 72, 71, 73, 72, 73, 74, 74, 72, 75, 74, 74, 77, 75, 73, 72, 71, 74, 77, 75, 75, 75, 78, 78, 74, 76, 78, 76, 70, 72, 80, 74, 74, 71, 70, 72, 71, 74, 71, 72, 71, 74, 69, 76, 75, 75, 76, 73, 76, 73, 77, 73, 72, 72, 77, 77, 71, 74, 74, 73, 78, 75, 73, 70, 74, 72, 73, 73, 75, 75, 74, 76, 73, 74, 75, 75, 72, 73, 73, 72, 74, 78, 76, 73, 74, 75, 70, 75, 71, 72, 78, 75, 73, 73, 71, 75, 77, 72, 69, 73, 74, 72, 70, 75, 70, 72, 72, 74, 73, 74, 76, 75, 80, 72, 75, 73, 74, 74, 73, 75, 75, 71, 73, 75, 74, 74, 72, 74, 74, 74, 73, 76, 75, 72, 73, 73, 73, 72, 72, 72, 72, 71, 75, 75, 74, 73, 75, 79, 74, 76, 73, 74, 74, 72, 74, 74, 75, 78, 74, 74, 74, 77, 70, 73, 74, 73, 71, 75, 71, 72, 77, 74, 70, 77, 73, 72, 76, 71, 76, 78, 75, 73, 78, 74, 79, 75, 76, 72, 75, 75, 70, 72, 70, 74, 71, 76, 73, 76, 71, 69, 72, 72, 69, 73, 69, 73, 74, 74, 72, 71, 72, 72, 76, 76, 76, 74, 76, 75, 71, 72, 71, 73, 75, 76, 75, 71, 75, 74, 72, 73, 73, 73, 73, 76, 72, 76, 73, 73, 73, 75, 75, 77, 73, 72, 75, 70, 74, 72, 80, 71, 71, 74, 74, 73, 75, 76, 73, 77, 72, 73, 77, 76, 71, 75, 73, 74, 77, 71, 72, 73, 69, 73, 70, 74, 76, 73, 73, 75, 73, 79, 74, 73, 74, 77, 75, 74, 73, 77, 73, 77, 74, 74, 73, 77, 74, 77, 75, 77, 75, 71, 74, 70, 79, 72, 72, 70, 74, 74, 72, 73, 72, 74, 74, 76, 82, 74, 74, 70, 73, 73, 74, 77, 72, 76, 73, 73, 72, 74, 74, 71, 72, 75, 74, 74, 77, 70, 71, 73, 76, 71, 75, 74, 72, 76, 79, 76, 73, 76, 78, 75, 76, 72, 72, 73, 73, 75, 71, 76, 70, 75, 74, 75, 73, 71, 71, 72, 73, 73, 72, 69, 73, 78, 71, 73, 75, 76, 70, 74, 77, 75, 79, 72, 77, 73, 75, 75, 75, 73, 73, 76, 77, 75, 70, 71, 71, 75, 74, 69, 70, 75, 72, 75, 73, 72, 72, 72, 76, 75, 74, 69, 73, 72, 72, 75, 77, 76, 80, 77, 76, 79, 71, 75, 73, 76, 77, 73, 76, 70, 75, 73, 75, 70, 69, 71, 72, 72, 73, 70, 70, 73, 76, 75, 72, 73, 79, 71, 72, 74, 74, 74, 72, 76, 76, 72, 72, 71, 72, 72, 70, 77, 74, 72, 76, 71, 76, 71, 73, 70, 73, 73, 72, 71, 71, 71, 72, 72, 74, 74, 74, 71, 72, 75, 72, 71, 72, 72, 72, 72, 74, 74, 77, 75, 73, 75, 73, 76, 72, 77, 75, 72, 71, 71, 75, 72, 73, 73, 71, 70, 75, 71, 76, 73, 68, 71, 72, 74, 77, 72, 76, 78, 81, 72, 73, 76, 72, 72, 74, 76, 73, 76, 75, 70, 71, 74, 72, 73, 76, 76, 73, 71, 68, 71, 71, 74, 77, 69, 72, 76, 75, 76, 75, 76, 72, 74, 76, 74, 72, 75, 78, 77, 70, 72, 79, 74, 71, 68, 77, 75, 71, 72, 70, 72, 72, 73, 72, 74, 72, 72, 75, 72, 73, 74, 72, 78, 75, 72, 74, 75, 75, 76, 74, 74, 73, 74, 71, 74, 75, 76, 74, 76, 76, 73, 75, 75, 74, 68, 72, 75, 71, 70, 72, 73, 72, 75, 74, 70, 76, 71, 82, 72, 73, 74, 71, 75, 77, 72, 74, 72, 73, 78, 77, 73, 73, 73, 73, 73, 76, 75, 70, 73, 72, 73, 75, 74, 73, 73, 76, 73, 75, 70, 77, 72, 77, 74, 75, 75, 75, 75, 72, 74, 71, 76, 71, 75, 76, 83, 75, 74, 76, 72, 72, 75, 75, 72, 77, 73, 72, 70, 74, 72, 74, 72, 71, 70, 71, 76, 74, 76, 74, 74, 74, 75, 75, 71, 71, 74, 77, 71, 74, 75, 77, 76, 74, 76, 72, 71, 72, 75, 73, 68, 72, 69, 73, 73, 75, 70, 70, 74, 75, 74, 74, 73, 74, 75, 77, 73, 74, 76, 74, 75, 73, 76, 78, 75, 73, 77, 74, 72, 74, 72, 71, 73, 75, 73, 67, 67, 76, 74, 73, 70, 75, 70, 72, 77, 79, 78, 74, 75, 75, 78, 76, 75, 69, 75, 72, 75, 73, 74, 75, 75, 73]\n", "_____no_output_____" ], [ "w = [180, 215, 210, 210, 188, 176, 209, 200, 231, 180, 188, 180, 185, 160, 180, 185, 189, 185, 219, 230, 205, 230, 195, 180, 192, 225, 203, 195, 182, 188, 200, 180, 200, 200, 245, 240, 215, 185, 175, 199, 200, 215, 200, 205, 206, 186, 188, 220, 210, 195, 200, 200, 212, 224, 210, 205, 220, 195, 200, 260, 228, 270, 200, 210, 190, 220, 180, 205, 210, 220, 211, 200, 180, 190, 170, 230, 155, 185, 185, 200, 225, 225, 220, 160, 205, 235, 250, 210, 190, 160, 200, 205, 222, 195, 205, 220, 220, 170, 185, 195, 220, 230, 180, 220, 180, 180, 170, 210, 215, 200, 213, 180, 192, 235, 185, 235, 210, 222, 210, 230, 220, 180, 190, 200, 210, 194, 180, 190, 240, 200, 198, 200, 195, 210, 220, 190, 210, 225, 180, 185, 170, 185, 185, 180, 178, 175, 200, 204, 211, 190, 210, 190, 190, 185, 290, 175, 185, 200, 220, 170, 220, 190, 220, 205, 200, 250, 225, 215, 210, 215, 195, 200, 194, 220, 180, 180, 170, 195, 180, 170, 206, 205, 200, 225, 201, 225, 233, 180, 225, 180, 220, 180, 237, 215, 190, 235, 190, 180, 165, 195, 200, 190, 190, 185, 185, 205, 190, 205, 206, 220, 208, 170, 195, 210, 190, 211, 230, 170, 185, 185, 241, 225, 210, 175, 230, 200, 215, 198, 226, 278, 215, 230, 240, 184, 219, 170, 218, 190, 225, 220, 176, 190, 197, 204, 167, 180, 195, 220, 215, 185, 190, 205, 205, 200, 210, 215, 200, 205, 211, 190, 208, 200, 210, 232, 230, 210, 220, 210, 202, 212, 225, 170, 190, 200, 237, 220, 170, 193, 190, 150, 220, 200, 190, 185, 185, 200, 172, 220, 225, 190, 195, 219, 190, 197, 200, 195, 210, 177, 220, 235, 180, 195, 195, 190, 230, 190, 200, 190, 190, 200, 200, 184, 200, 180, 219, 187, 200, 220, 205, 190, 170, 160, 215, 175, 205, 200, 214, 200, 190, 180, 205, 220, 190, 215, 235, 191, 200, 181, 200, 210, 240, 185, 165, 190, 185, 175, 155, 210, 170, 175, 220, 210, 205, 200, 205, 195, 240, 150, 200, 215, 202, 200, 190, 205, 190, 160, 215, 185, 200, 190, 210, 185, 220, 190, 202, 205, 220, 175, 160, 190, 200, 229, 206, 220, 180, 195, 175, 188, 230, 190, 200, 190, 219, 235, 180, 180, 180, 200, 234, 185, 220, 223, 200, 210, 200, 210, 190, 177, 227, 180, 195, 199, 175, 185, 240, 210, 180, 194, 225, 180, 205, 193, 230, 230, 220, 200, 249, 190, 208, 245, 250, 160, 192, 220, 170, 197, 155, 190, 200, 220, 210, 228, 190, 160, 184, 180, 180, 200, 176, 160, 222, 211, 195, 200, 175, 206, 240, 185, 260, 185, 221, 205, 200, 170, 201, 205, 185, 205, 245, 220, 210, 220, 185, 175, 170, 180, 200, 210, 175, 220, 206, 180, 210, 195, 200, 200, 164, 180, 220, 195, 205, 170, 240, 210, 195, 200, 205, 192, 190, 170, 240, 200, 205, 175, 250, 220, 224, 210, 195, 180, 245, 175, 180, 215, 175, 180, 195, 230, 230, 205, 215, 195, 180, 205, 180, 190, 180, 190, 190, 220, 210, 255, 190, 230, 200, 205, 210, 225, 215, 220, 205, 200, 220, 197, 225, 187, 245, 185, 185, 175, 200, 180, 188, 225, 200, 210, 245, 213, 231, 165, 228, 210, 250, 191, 190, 200, 215, 254, 232, 180, 215, 220, 180, 200, 170, 195, 210, 200, 220, 165, 180, 200, 200, 170, 224, 220, 180, 198, 240, 239, 185, 210, 220, 200, 195, 220, 230, 170, 220, 230, 165, 205, 192, 210, 205, 200, 210, 185, 195, 202, 205, 195, 180, 200, 185, 240, 185, 220, 205, 205, 180, 201, 190, 208, 240, 180, 230, 195, 215, 190, 195, 215, 215, 220, 220, 230, 195, 190, 195, 209, 204, 170, 185, 205, 175, 210, 190, 180, 180, 160, 235, 200, 210, 180, 190, 197, 203, 205, 170, 200, 250, 200, 220, 200, 190, 170, 190, 220, 215, 206, 215, 185, 235, 188, 230, 195, 168, 190, 160, 200, 200, 189, 180, 190, 200, 220, 187, 240, 190, 180, 185, 210, 220, 219, 190, 193, 175, 180, 215, 210, 200, 190, 185, 220, 170, 195, 205, 195, 210, 190, 190, 180, 220, 190, 186, 185, 190, 180, 190, 170, 210, 240, 220, 180, 210, 210, 195, 160, 180, 205, 200, 185, 245, 190, 210, 200, 200, 222, 215, 240, 170, 220, 156, 190, 202, 221, 200, 190, 210, 190, 200, 165, 190, 185, 230, 208, 209, 175, 180, 200, 205, 200, 250, 210, 230, 244, 202, 240, 200, 215, 177, 210, 170, 215, 217, 198, 200, 220, 170, 200, 230, 231, 183, 192, 167, 190, 180, 180, 215, 160, 205, 223, 175, 170, 190, 240, 175, 230, 223, 196, 167, 195, 190, 250, 190, 190, 190, 170, 160, 150, 225, 220, 209, 210, 176, 260, 195, 190, 184, 180, 195, 195, 219, 225, 212, 202, 185, 200, 209, 200, 195, 228, 210, 190, 212, 190, 218, 220, 190, 235, 210, 200, 188, 210, 235, 188, 215, 216, 220, 180, 185, 200, 210, 220, 185, 231, 210, 195, 200, 205, 200, 190, 250, 185, 180, 170, 180, 208, 235, 215, 244, 220, 185, 230, 190, 200, 180, 190, 196, 180, 230, 224, 160, 178, 205, 185, 210, 180, 190, 200, 257, 190, 220, 165, 205, 200, 208, 185, 215, 170, 235, 210, 170, 180, 170, 190, 150, 230, 203, 260, 246, 186, 210, 198, 210, 215, 180, 200, 245, 200, 192, 192, 200, 192, 205, 190, 186, 170, 197, 219, 200, 220, 207, 225, 207, 212, 225, 170, 190, 210, 230, 210, 200, 238, 234, 222, 200, 190, 170, 220, 223, 210, 215, 196, 175, 175, 189, 205, 210, 180, 180, 197, 220, 228, 190, 204, 165, 216, 220, 208, 210, 215, 195, 200, 215, 229, 240, 207, 205, 208, 185, 190, 170, 208, 225, 190, 225, 185, 180, 165, 240, 220, 212, 163, 215, 175, 205, 210, 205, 208, 215, 180, 200, 230, 211, 230, 190, 220, 180, 205, 190, 180, 205, 190, 195]", "_____no_output_____" ], [ "heights = np.array(h)\nweights = np.array(w)\nheights = heights*0.0254\nweights = weights * 0.453592", "_____no_output_____" ], [ "plt.figure(figsize=(12,8))\nsns.jointplot(x=heights, y = weights)\nplt.show()", "c:\\users\\dell\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\scipy\\stats\\stats.py:1713: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.\n return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval\n" ], [ "# weights = m * heights + b", "_____no_output_____" ], [ "print(heights.shape)\nprint(weights.shape)", "(1015, 1)\n(1015, 1)\n" ], [ "weights = weights.reshape(weights.size, 1)\nheights = heights.reshape(weights.size, 1)", "_____no_output_____" ], [ "import sys\nsys.path.insert(\n 0,\n r\"C:\\Users\\DELL\\Desktop\\Sentifi\\machine_learning\\linear_regression_from_scratch\\gradient_descent\",\n)", "_____no_output_____" ], [ "import GD_linear_regression as glr\n\nbuilder = glr.GDLinearRegression(iterations=200)\nreg = builder.fit(heights, weights)\npredictions = reg.predict(heights)\ncosts = reg.costs\nthetas = reg.theta", "_____no_output_____" ], [ "print(len(costs))\nprint(reg.theta[-1])\nprint(costs[-1])", "200\n[[48.79217707]]\n34.286653301755706\n" ], [ "plt.plot(costs)\nplt.show()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb97df270a55caede040ec64cd8fa6d83ccadbf4
71,716
ipynb
Jupyter Notebook
jupyter_notebooks/pandas/WQ_Data.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
2
2021-02-13T05:52:05.000Z
2022-02-08T09:52:35.000Z
pandas/WQ_Data.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
null
null
null
pandas/WQ_Data.ipynb
manual123/Nacho-Jupyter-Notebooks
e75523434b1a90313a6b44e32b056f63de8a7135
[ "MIT" ]
null
null
null
95.240372
49,397
0.765645
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb97e84ed3fec07e1ccef5824c6ec5ecf88e5165
485,584
ipynb
Jupyter Notebook
docs/examples/UserModels/PyIndMach012/README.ipynb
dss-extensions/dss_python
f6c4440a14287d06f1bd10180484b349f764ba7e
[ "BSD-3-Clause" ]
24
2019-03-07T20:24:24.000Z
2022-03-23T17:58:00.000Z
docs/examples/UserModels/PyIndMach012/README.ipynb
dss-extensions/dss_python
f6c4440a14287d06f1bd10180484b349f764ba7e
[ "BSD-3-Clause" ]
32
2019-02-14T03:46:31.000Z
2022-03-23T00:01:28.000Z
docs/examples/UserModels/PyIndMach012/README.ipynb
PMeira/dss_python
2dbc72ed875108d3f98d21cb0a488bab6b0d7f4c
[ "BSD-3-Clause" ]
5
2019-02-19T04:54:49.000Z
2022-03-23T10:40:51.000Z
585.040964
36,980
0.919796
[ [ [ "# PyIndMach012: an example of user-model using DSS Python\n\nThis example runs a modified example from the OpenDSS distribution for the induction machine model with a sample PyIndMach012 implementation, written in Python, and the original, built-in IndMach012.\n\nCheck the `PyIndMach012.py` file for more comments. Comparing it to [the Pascal code for IndMach012](https://github.com/dss-extensions/dss_capi/blob/master/Version7/Source/PCElements/IndMach012.pas) can be useful to understand some of the inner workings of OpenDSS.\n\nThe user-model code in DSS Python is not stable yet but can be used to develop new ideas before commiting the final model in a traditional DLL user-model. Particularly, I (@PMeira) found some issues with callbacks with newer Version 8 COM DLLs, so changes related to that are expected.\n", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom dss.UserModels import GenUserModel # used to get the DLL path\nimport PyIndMach012 # we need to import the model so it gets registered", "_____no_output_____" ] ], [ [ "## The model class", "_____no_output_____" ] ], [ [ "??PyIndMach012", "_____no_output_____" ] ], [ [ "## OpenDSS setup", "_____no_output_____" ], [ "For this example, we can use either COM or DSS Python (DSS C-API). The IndMach012 model in DSS C-API seems to have a bug somewhere though -- this is being tracked in [dss_capi#62](https://github.com/dss-extensions/dss_capi/issues/62).", "_____no_output_____" ] ], [ [ "original_dir = os.getcwd() # same the original working directory since the COM module messes with it\nUSE_COM = True # toggle this value to run with DSS C-API\nif USE_COM:\n from dss import patch_dss_com\n import win32com.client\n DSS = patch_dss_com(win32com.client.gencache.EnsureDispatch('OpenDSSengine.DSS'))\n DSS.DataPath = original_dir\n os.chdir(original_dir)\nelse:\n from dss import DSS\n \nDSS.Version ", "_____no_output_____" ], [ "Text = DSS.Text\nMonitors = DSS.ActiveCircuit.Monitors", "_____no_output_____" ] ], [ [ "## Using the model\n\nTo use a Python model for generators:\n- the model class needs to be registered in advance\n- create a generator with `model=6`\n - pass a `usermodel=\"{dll_path}\"` as in the following DSS command in the `run` function\n - pass a `\"pymodel=MODELNAME\"` parameter in the userdata property, where MODELNAME is the name of the model class in Python\n", "_____no_output_____" ] ], [ [ "def run(pymodel):\n Text.Command = 'redirect \"master.dss\"'\n\n if pymodel:\n # This uses our custom user-model in Python\n Text.Command = 'New \"Generator.Motor1\" bus1=Bg2 kW=1200 conn=delta kVA=1500.000 H=6 model=6 kv=0.48 usermodel=\"{dll_path}\" userdata=(pymodel=PyIndMach012 purs=0.048 puxs=0.075 purr=0.018 puxr=0.12 puxm=3.8 slip=0.02 SlipOption=variableslip)'.format(\n dll_path=GenUserModel.dll_path,\n )\n Text.Command = 'New \"Monitor.mfr2\" element=Generator.Motor1 terminal=1 mode=3'\n else:\n # This uses the built-in model for comparison\n Text.Command = 'New \"IndMach012.Motor1\" bus1=Bg2 kW=1200 conn=delta kVA=1500.000 H=6 purs=0.048 puxs=0.075 purr=0.018 puxr=0.12 puxm=3.8 slip=0.02 SlipOption=variableslip kv=0.48'\n Text.Command = 'New \"Monitor.mfr2\" element=IndMach012.Motor1 terminal=1 mode=3'\n \n # This will run a power-flow solution\n Text.Command = 'Solve'\n \n # This will toggle to the dynamics mode\n Text.Command = 'Set mode=dynamics number=1 h=0.000166667'\n \n # And finally run 5000 steps for the dynamic simulation\n Text.Command = f'Solve number=5000'\n ", "_____no_output_____" ], [ "# There are the channels from the Pascal/built-in IndMach012\nchannels_pas = (' Frequency', 'Theta (deg)', 'E1', 'dSpeed (deg/sec)', 'dTheta (deg)', 'Slip', 'Is1', 'Is2', 'Ir1', 'Ir2', 'Stator Losses', 'Rotor Losses', 'Shaft Power (hp)', 'Power Factor', 'Efficiency (%)')\n\n# There are the channels from the Python module -- we define part of these and part come from the generator model itself\nchannels_py = (' Frequency', 'Theta (Deg)', 'E1_pu', 'dSpeed (Deg/sec)', 'dTheta (Deg)', 'Slip', 'Is1', 'Is2', 'Ir1', 'Ir2', 'StatorLosses', 'RotorLosses', 'ShaftPower_hp', 'PowerFactor', 'Efficiency_pct')", "_____no_output_____" ] ], [ [ "## Running and saving the outputs", "_____no_output_____" ], [ "Let's run the Pascal/built-in version of IndMach012 and our custom Python version for comparison:", "_____no_output_____" ] ], [ [ "run(False)\nMonitors.Name = 'mfr2'\noutputs_pas = {channel: Monitors.Channel(Monitors.Header.index(channel) + 1) for channel in channels_pas}\n\nrun(True)\nMonitors.Name = 'mfr2'\noutputs_py = {channel: Monitors.Channel(Monitors.Header.index(channel) + 1) for channel in channels_py}\n\ntime = np.arange(1, 5000 + 1) * 0.000166667\noffset = int(0.1 / 0.000166667)", "_____no_output_____" ] ], [ [ "## Plotting the various output channels", "_____no_output_____" ], [ "The example circuit applies a fault at 0.3 s, isolating the machine at 0.4s (check `master.dss` for more details).\n\nAs we can see from the figures below, the outputs match very closely. After the induction machine is isolated, the efficiency and power factor values can misbehave as the power goes to zero, seem especially in the Pascal version.", "_____no_output_____" ] ], [ [ "for ch_pas, ch_py in zip(channels_pas, channels_py):\n plt.figure(figsize=(8,4))\n plt.plot(time, outputs_pas[ch_pas], label='IndMach012', lw=3)\n plt.plot(time, outputs_py[ch_py], label='PyIndMach012', ls='--', lw=2)\n plt.axvline(0.3, linestyle=':', color='k', alpha=0.5, label='Fault occurs')\n plt.axvline(0.4, linestyle='--', color='r', alpha=0.5, label='Relays operate')\n plt.legend()\n plt.xlabel('Time (s)')\n plt.ylabel(ch_pas)\n \n if ch_pas == 'Efficiency (%)':\n # Limit efficiency to 0-100\n plt.ylim(0, 100)\n \n plt.xlim(0, time[-1])\n plt.tight_layout()\n ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb97ebd86dead55b2b7b583507df89fb9211f286
236,281
ipynb
Jupyter Notebook
ipython/Factorial - Two-Level Six-Factor Design.ipynb
davidschlachter/empirical-model-building
71aa491f16428bd1aa264a61058574b4c933804d
[ "MIT" ]
2
2017-06-09T08:32:26.000Z
2019-05-19T07:28:19.000Z
ipython/Factorial - Two-Level Six-Factor Design.ipynb
davidschlachter/empirical-model-building
71aa491f16428bd1aa264a61058574b4c933804d
[ "MIT" ]
1
2021-04-05T00:34:54.000Z
2021-04-05T00:34:54.000Z
ipython/Factorial - Two-Level Six-Factor Design.ipynb
davidschlachter/empirical-model-building
71aa491f16428bd1aa264a61058574b4c933804d
[ "MIT" ]
2
2017-07-15T11:42:01.000Z
2021-04-03T18:34:56.000Z
138.907113
46,392
0.820303
[ [ [ "# A Two-Level, Six-Factor Full Factorial Design\n\n<br />\n<br />\n<br />\n\n### Table of Contents\n\n* [Introduction](#intro)\n* Factorial Experimental Design:\n * [Two-Level Six-Factor Full Factorial Design](#fullfactorial)\n * [Variables and Variable Labels](#varlabels)\n * [Computing Main and Interaction Effects](#computing_effects)\n* Analysis of results:\n * [Analyzing Effects](#analyzing_effects)\n * [Quantile-Quantile Effects Plot](#quantile_effects)\n * [Utilizing Degrees of Freedom](#dof)\n * [Ordinary Least Squares Regression Model](#ols)\n * [Goodness of Fit](#goodness_of_fit)\n * [Distribution of Error](#distribution_of_error)\n * [Aggregating Results](#aggregating)\n * [Distribution of Variance](#dist_variance)\n * [Residual vs. Response Plots](#residual)\n\n<br />\n<br />\n<br />\n", "_____no_output_____" ], [ "<a name=\"intro\"></a>\n## Introduction\n\nThis notebook roughly follows content from Box and Draper's _Empirical Model-Building and Response Surfaces_ (Wiley, 1984). This content is covered by Chapter 4 of Box and Draper.\n\nIn this notebook, we'll carry out an anaylsis of a full factorial design, and show how we can obtain inforomation about a system and its responses, and a quantifiable range of certainty about those values. This is the fundamental idea behind empirical model-building and allows us to construct cheap and simple models to represent complex, nonlinear systems.", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport pandas as pd\nimport numpy as np\nfrom numpy.random import rand, seed\nimport seaborn as sns\nimport scipy.stats as stats\nfrom matplotlib.pyplot import *\n\nseed(10)", "_____no_output_____" ] ], [ [ "<a name=\"fullfactorial\"></a>\n## Two-Level Six-Factor Full Factorial Design\n\nLet's start with our six-factor factorial design example. Six factors means there are six input variables; this is still a two-level experiment, so this is now a $2^6$-factorial experiment.\n\nAdditionally, there are now three response variables, $(y_1, y_2, y_3)$. \n\nTo generate a table of the 64 experiments to be run at each factor level, we will use the ```itertools.product``` function below. This is all put into a DataFrame.\n\nThis example generates some random response data, by multiplying a vector of random numbers by the vector of input variable values. (Nothing too complicated.)", "_____no_output_____" ] ], [ [ "import itertools\n\n# Create the inputs:\nencoded_inputs = list( itertools.product([-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1]) )\n\n# Create the experiment design table:\ndoe = pd.DataFrame(encoded_inputs,columns=['x%d'%(i+1) for i in range(6)])", "_____no_output_____" ], [ "# \"Manufacture\" observed data y\ndoe['y1'] = doe.apply( lambda z : sum([ rand()*z[\"x%d\"%(i)]+0.01*(0.5-rand()) for i in range(1,7) ]), axis=1)\ndoe['y2'] = doe.apply( lambda z : sum([ 5*rand()*z[\"x%d\"%(i)]+0.01*(0.5-rand()) for i in range(1,7) ]), axis=1)\ndoe['y3'] = doe.apply( lambda z : sum([ 100*rand()*z[\"x%d\"%(i)]+0.01*(0.5-rand()) for i in range(1,7) ]), axis=1)\nprint(doe[['y1','y2','y3']])", " y1 y2 y3\n0 -2.953976 -16.995589 -229.814820\n1 -2.624777 -14.558143 -165.701057\n2 -1.693929 -13.034991 -237.887785\n3 -0.277497 -7.319593 -188.562237\n4 -2.959396 -16.067548 -243.198515\n5 -1.385948 -4.418310 -97.470198\n6 -0.651712 -1.928300 -73.312885\n7 -0.278417 2.396784 -62.784436\n8 -2.153633 -7.975643 -136.147108\n9 0.759198 -4.809812 -0.699339\n10 0.394272 -9.209924 -53.132653\n11 0.522598 1.005940 -29.222722\n12 -0.196592 -5.636971 -134.246284\n13 -0.788804 0.064581 -56.366364\n14 -0.109515 2.584266 -30.826161\n15 0.857284 4.937100 162.478927\n16 -1.717446 -9.544005 -278.879535\n17 -0.377420 -5.952183 -85.342715\n18 0.235274 -3.912253 -84.515613\n19 -0.438223 4.263438 -15.766010\n20 -0.719210 -6.169082 -38.400881\n21 0.874268 -0.500338 24.659802\n22 0.287457 3.135320 -8.054772\n23 1.516798 1.067475 49.077046\n24 -1.553102 -2.767240 -121.753634\n25 0.464493 6.216963 -72.732938\n26 1.043009 0.026061 151.820464\n27 0.833418 7.357876 243.057970\n28 0.298348 -4.827417 -65.585136\n29 0.941801 1.014842 187.835560\n.. ... ... ...\n34 -1.017371 -3.644773 -159.850253\n35 -0.350332 6.069516 -145.127826\n36 -0.799106 -1.630720 -126.055080\n37 1.295705 -3.912424 -2.621840\n38 -0.089521 1.547411 79.416555\n39 0.560900 0.078900 184.277209\n40 -0.407593 -6.632601 63.675426\n41 0.618949 -5.512618 -105.304592\n42 -1.437565 -0.213920 -89.853846\n43 0.474758 7.741040 57.816207\n44 0.169640 0.452901 33.329942\n45 1.727513 5.280021 137.536553\n46 1.279086 2.998999 95.690736\n47 1.365180 9.161979 265.214703\n48 0.478457 -6.117942 -73.399285\n49 0.464257 1.971184 -16.842436\n50 0.077946 -2.338977 65.386939\n51 1.447433 0.026873 59.150766\n52 0.021663 -6.108921 -145.336880\n53 0.418467 -2.276366 103.339198\n54 0.833682 6.227068 33.308046\n55 2.102518 10.879208 127.471196\n56 1.474965 -1.836822 -52.282054\n57 1.737385 7.955251 65.447322\n58 0.307222 5.286811 44.223989\n59 2.794374 10.913180 225.621205\n60 1.087903 6.553223 255.150465\n61 2.440223 15.495756 223.083850\n62 1.162720 12.011973 68.035610\n63 3.291962 14.972866 161.953919\n\n[64 rows x 3 columns]\n" ] ], [ [ "<a name=\"varlablels\"></a>\n## Defining Variables and Variable Labels\n\nNext we'll define some containers for input variable labels, output variable labels, and any interaction terms that we'll be computing:", "_____no_output_____" ] ], [ [ "labels = {}\nlabels[1] = ['x1','x2','x3','x4','x5','x6']\nfor i in [2,3,4,5,6]:\n labels[i] = list(itertools.combinations(labels[1], i))\n\nobs_list = ['y1','y2','y3']\n\nfor k in labels.keys():\n print(str(k) + \" : \" + str(labels[k]))", "1 : ['x1', 'x2', 'x3', 'x4', 'x5', 'x6']\n2 : [('x1', 'x2'), ('x1', 'x3'), ('x1', 'x4'), ('x1', 'x5'), ('x1', 'x6'), ('x2', 'x3'), ('x2', 'x4'), ('x2', 'x5'), ('x2', 'x6'), ('x3', 'x4'), ('x3', 'x5'), ('x3', 'x6'), ('x4', 'x5'), ('x4', 'x6'), ('x5', 'x6')]\n3 : [('x1', 'x2', 'x3'), ('x1', 'x2', 'x4'), ('x1', 'x2', 'x5'), ('x1', 'x2', 'x6'), ('x1', 'x3', 'x4'), ('x1', 'x3', 'x5'), ('x1', 'x3', 'x6'), ('x1', 'x4', 'x5'), ('x1', 'x4', 'x6'), ('x1', 'x5', 'x6'), ('x2', 'x3', 'x4'), ('x2', 'x3', 'x5'), ('x2', 'x3', 'x6'), ('x2', 'x4', 'x5'), ('x2', 'x4', 'x6'), ('x2', 'x5', 'x6'), ('x3', 'x4', 'x5'), ('x3', 'x4', 'x6'), ('x3', 'x5', 'x6'), ('x4', 'x5', 'x6')]\n4 : [('x1', 'x2', 'x3', 'x4'), ('x1', 'x2', 'x3', 'x5'), ('x1', 'x2', 'x3', 'x6'), ('x1', 'x2', 'x4', 'x5'), ('x1', 'x2', 'x4', 'x6'), ('x1', 'x2', 'x5', 'x6'), ('x1', 'x3', 'x4', 'x5'), ('x1', 'x3', 'x4', 'x6'), ('x1', 'x3', 'x5', 'x6'), ('x1', 'x4', 'x5', 'x6'), ('x2', 'x3', 'x4', 'x5'), ('x2', 'x3', 'x4', 'x6'), ('x2', 'x3', 'x5', 'x6'), ('x2', 'x4', 'x5', 'x6'), ('x3', 'x4', 'x5', 'x6')]\n5 : [('x1', 'x2', 'x3', 'x4', 'x5'), ('x1', 'x2', 'x3', 'x4', 'x6'), ('x1', 'x2', 'x3', 'x5', 'x6'), ('x1', 'x2', 'x4', 'x5', 'x6'), ('x1', 'x3', 'x4', 'x5', 'x6'), ('x2', 'x3', 'x4', 'x5', 'x6')]\n6 : [('x1', 'x2', 'x3', 'x4', 'x5', 'x6')]\n" ] ], [ [ "Now that we have variable labels for each main effect and interaction effect, we can actually compute those effects.", "_____no_output_____" ], [ "<a name=\"computing_effects\"></a>\n## Computing Main and Interaction Effects\n\nWe'll start by finding the constant effect, which is the mean of each response:", "_____no_output_____" ] ], [ [ "effects = {}\n\n# Start with the constant effect: this is $\\overline{y}$\neffects[0] = {'x0' : [doe['y1'].mean(),doe['y2'].mean(),doe['y3'].mean()]}\nprint(effects[0])", "{'x0': [0.21434217402141204, -0.34071899604049039, -0.31091988519642655]}\n" ] ], [ [ "Next, compute the main effect of each variable, which quantifies the amount the response changes by when the input variable is changed from the -1 to +1 level. That is, it computes the average effect of an input variable $x_i$ on each of the three response variables $y_1, y_2, y_3$.", "_____no_output_____" ] ], [ [ "effects[1] = {}\nfor key in labels[1]:\n effects_result = []\n for obs in obs_list:\n effects_df = doe.groupby(key)[obs].mean()\n result = sum([ zz*effects_df.ix[zz] for zz in effects_df.index ])\n effects_result.append(result)\n effects[1][key] = effects_result\n\neffects[1]", "_____no_output_____" ] ], [ [ "Our next step is to crank through each variable interaction level: two-variable, three-variable, and on up to six-variable interaction effects. We compute interaction effects for each two-variable combination, three-variable combination, etc.", "_____no_output_____" ] ], [ [ "for c in [2,3,4,5,6]:\n effects[c] = {}\n for key in labels[c]:\n effects_result = []\n for obs in obs_list:\n effects_df = doe.groupby(key)[obs].mean()\n result = sum([ np.prod(zz)*effects_df.ix[zz]/(2**(len(zz)-1)) for zz in effects_df.index ])\n effects_result.append(result)\n effects[c][key] = effects_result", "_____no_output_____" ], [ "def printd(d):\n for k in d.keys():\n print(\"%25s : %s\"%(k,d[k]))\n\nfor i in range(1,7):\n printd(effects[i])\n", " x1 : [0.9433070339050128, 4.9958304988566571, 81.100835834869898]\n x2 : [1.1618193437509752, 5.2904519800700855, 88.974507036059862]\n x3 : [1.0098728146371736, 5.889504208769762, 117.98004192024942]\n x4 : [0.70708531571387601, 4.3129802767035228, 93.841283759812427]\n x5 : [0.73764230931195218, 5.9194227362518177, 80.560308830542212]\n x6 : [0.90984137041079882, 5.3509498773330124, 85.074447049008739]\n ('x1', 'x2') : [-0.016163435253460867, -0.403048707291632, -26.414521929296882]\n ('x1', 'x3') : [-0.12102391162066295, 0.37460806899997978, -17.291608673837352]\n ('x1', 'x4') : [0.02949032440056043, 0.33911161905856435, 12.403992911833804]\n ('x1', 'x5') : [-0.50925949665219905, -0.019045678054248061, -26.947410571506602]\n ('x1', 'x6') : [0.094773221600433111, -0.24795908645176201, -5.7366788045510191]\n ('x2', 'x3') : [-0.10760912883635443, 0.63819708384148699, 5.8907670647868429]\n ('x2', 'x4') : [-0.025586832491203748, -1.1474933181625455, -12.137920756689155]\n ('x2', 'x5') : [0.060980069476135323, -0.44877164689074345, 19.17227060262849]\n ('x2', 'x6') : [0.039223114426751304, 0.29160241201384451, 9.1053917694735276]\n ('x3', 'x4') : [-0.0026215026063064473, -0.047981745167774648, -0.29979369048000137]\n ('x3', 'x5') : [-0.12667195223876626, -1.0906581433977438, 9.1801847151461615]\n ('x3', 'x6') : [0.1322420431071285, 0.74116738324730314, 3.3131723909361455]\n ('x4', 'x5') : [0.094818079489672469, 0.54796615695441764, 5.7023611165682553]\n ('x4', 'x6') : [0.047419291282623011, -1.1264199481431265, 20.197538859153521]\n ('x5', 'x6') : [-0.047724684991834132, -0.31904863446769216, -5.6501374764128158]\n ('x1', 'x2', 'x3') : [0.27530166327235817, 1.733954361940508, -1.8096050710728981]\n ('x1', 'x2', 'x4') : [-0.38885140814812624, 1.7323075051908237, -30.394986190076096]\n ('x1', 'x2', 'x5') : [0.19745454785101421, -0.15865034503932174, -19.536232411247756]\n ('x1', 'x2', 'x6') : [0.11266963448078049, 0.38809920155731881, 5.824114033135686]\n ('x1', 'x3', 'x4') : [0.13626250047114513, 1.5493144711022879, 22.886032235724279]\n ('x1', 'x3', 'x5') : [-0.053116687676143581, 0.33000840607054371, -36.784881473744093]\n ('x1', 'x3', 'x6') : [0.21488908273913887, 0.079205564055645272, -5.9760781496625697]\n ('x1', 'x4', 'x5') : [0.19486413006789105, -0.9452264336865297, 7.8024613775946463]\n ('x1', 'x4', 'x6') : [0.14001630223708372, -0.52307026803632573, 13.804117497392205]\n ('x1', 'x5', 'x6') : [0.36443443064444409, -0.037821005672608043, 26.314819786681028]\n ('x2', 'x3', 'x4') : [0.0384496352024572, -0.26880979674065797, 9.9693260721351145]\n ('x2', 'x3', 'x5') : [0.097812953027926086, -0.19441635664880419, -1.4801464908074138]\n ('x2', 'x3', 'x6') : [0.0030381303831370499, 0.61287467341355728, -6.0881873520544332]\n ('x2', 'x4', 'x5') : [0.037696220126619329, 0.96045927363556127, -71.565404771127021]\n ('x2', 'x4', 'x6') : [0.07922131188191639, 0.024353629210014827, -20.016686830389673]\n ('x2', 'x5', 'x6') : [0.047802155789024414, -0.88131343590063693, -18.911846905266643]\n ('x3', 'x4', 'x5') : [0.01722440204670439, -0.50290008223611737, -29.727808977222878]\n ('x3', 'x4', 'x6') : [-0.32253564751424324, 0.44465005390528312, -3.9889855157301781]\n ('x3', 'x5', 'x6') : [-0.057794671772135986, 0.36422184395103652, 26.955452454662893]\n ('x4', 'x5', 'x6') : [-0.072511569035680679, -0.97920830963342009, -12.120243136884532]\n ('x1', 'x2', 'x3', 'x4') : [-0.077012094322499502, 0.2094205920322092, 10.035411098118757]\n ('x1', 'x2', 'x3', 'x5') : [-0.20089102717776863, -0.58353301160841298, -22.055307753405028]\n ('x1', 'x2', 'x3', 'x6') : [0.051106698276142615, -0.3854729384915565, 4.7283907963541374]\n ('x1', 'x2', 'x4', 'x5') : [0.041460887947165959, 1.750701699009062, -6.555319383737249]\n ('x1', 'x2', 'x4', 'x6') : [-0.13636392595350361, 0.93947451490494371, -7.0795127068068524]\n ('x1', 'x2', 'x5', 'x6') : [0.29265965432182628, -0.64319643804432403, -5.2094838959455068]\n ('x1', 'x3', 'x4', 'x5') : [0.062916636390418529, -1.3985879474919183, -24.336675466150226]\n ('x1', 'x3', 'x4', 'x6') : [0.06473668370391425, 1.0048577542812709, -22.791965084616375]\n ('x1', 'x3', 'x5', 'x6') : [0.043042064730141855, -0.2544156123000838, 23.832389040589028]\n ('x1', 'x4', 'x5', 'x6') : [-0.40259999058230878, 0.95945270111252345, -6.2673433582474303]\n ('x2', 'x3', 'x4', 'x5') : [-0.093545919492229568, -0.32389346462037683, -8.4877628800121343]\n ('x2', 'x3', 'x4', 'x6') : [0.14084577073008919, -0.27960443111756228, -14.633241377330609]\n ('x2', 'x3', 'x5', 'x6') : [0.073114655553117552, -0.55753258524931448, -8.0146826586229736]\n ('x2', 'x4', 'x5', 'x6') : [0.15162524219467147, 0.64853361709653257, -2.2297925906290565]\n ('x3', 'x4', 'x5', 'x6') : [0.20463936917883771, 0.016016700001278084, -5.4490521482551095]\n('x1', 'x2', 'x3', 'x4', 'x5') : [-0.096107140490898246, -1.3749450513766432, 0.93697102065876159]\n('x1', 'x2', 'x3', 'x4', 'x6') : [0.16965789974039819, -1.3629950919698559, -24.809989454143341]\n('x1', 'x2', 'x3', 'x5', 'x6') : [0.034879782216321153, -0.20773040015431132, 8.0966804525758178]\n('x1', 'x2', 'x4', 'x5', 'x6') : [0.07855311161227238, -0.037912435751633278, 16.942026490863817]\n('x1', 'x3', 'x4', 'x5', 'x6') : [-0.20519903998045583, -0.91064282445138434, 0.20968490755234548]\n('x2', 'x3', 'x4', 'x5', 'x6') : [-0.26774198202914368, 0.37550820821858089, -12.293289389979673]\n('x1', 'x2', 'x3', 'x4', 'x5', 'x6') : [0.1512825348691661, -0.525731817407734, 36.787280204702299]\n" ] ], [ [ "We've computed the main and interaction effects for every variable combination (whew!), but now we're at a point where we want to start doing things with these quantities.", "_____no_output_____" ], [ "<a name=\"analyzing_effects\"></a>\n## Analyzing Effects\n\nThe first and most important question is, what variable, or combination of variables, has the strongest effect on the three responses $y_1$? $y_2$? $y_3$?\n\nTo figure this out, we'll need to use the data we computed above. Python makes it easy to slice and dice data. In this case, we've constructed a nested dictionary, with the outer keys mapping to the number of variables and inner keys mapping to particular combinations of input variables. Its pretty easy to convert this to a flat data structure that we can use to sort by variable effects. We've got six \"levels\" of variable combinations, so we'll flatten ```effects``` by looping through all six dictionaries of variable combinations (from main effects to six-variable interaction effects), and adding each entry to a master dictionary.\n\nThe master dictionary will be a flat dictionary, and once we've populated it, we can use it to make a DataFrame for easier sorting, printing, manipulating, aggregating, and so on.", "_____no_output_____" ] ], [ [ "print(len(effects))", "7\n" ], [ "master_dict = {}\nfor nvars in effects.keys():\n\n effect = effects[nvars]\n for k in effect.keys():\n v = effect[k]\n master_dict[k] = v\n\nmaster_df = pd.DataFrame(master_dict).T\nmaster_df.columns = obs_list", "_____no_output_____" ], [ "y1 = master_df['y1'].copy()\ny1.sort_values(inplace=True,ascending=False)\n\nprint(\"Top 10 effects for observable y1:\")\nprint(y1[:10])", "Top 10 effects for observable y1:\nx2 1.161819\nx3 1.009873\nx1 0.943307\nx6 0.909841\nx5 0.737642\nx4 0.707085\n(x1, x5, x6) 0.364434\n(x1, x2, x5, x6) 0.292660\n(x1, x2, x3) 0.275302\n(x1, x3, x6) 0.214889\nName: y1, dtype: float64\n" ], [ "y2 = master_df['y2'].copy()\ny2.sort_values(inplace=True,ascending=False)\n\nprint(\"Top 10 effects for observable y2:\")\nprint(y2[:10])", "Top 10 effects for observable y2:\nx5 5.919423\nx3 5.889504\nx6 5.350950\nx2 5.290452\nx1 4.995830\nx4 4.312980\n(x1, x2, x4, x5) 1.750702\n(x1, x2, x3) 1.733954\n(x1, x2, x4) 1.732308\n(x1, x3, x4) 1.549314\nName: y2, dtype: float64\n" ], [ "y3 = master_df['y3'].copy()\ny3.sort_values(inplace=True,ascending=False)\n\nprint(\"Top 10 effects for observable y3:\")\nprint(y3[:10])\n", "Top 10 effects for observable y3:\nx3 117.980042\nx4 93.841284\nx2 88.974507\nx6 85.074447\nx1 81.100836\nx5 80.560309\n(x1, x2, x3, x4, x5, x6) 36.787280\n(x3, x5, x6) 26.955452\n(x1, x5, x6) 26.314820\n(x1, x3, x5, x6) 23.832389\nName: y3, dtype: float64\n" ] ], [ [ "If we were only to look at the list of rankings of each variable, we would see that each response is affected by different input variables, listed below in order of descending importance:\n* $y_1$: 136254\n* $y_2$: 561234\n* $y_3$: 453216\n\nThis is a somewhat mixed message that's hard to interpret - can we get rid of variable 2? We can't eliminate 1, 4, or 5, and probably not 3 or 6 either. \n\nHowever, looking at the quantile-quantile plot of the effects answers the question in a more visual way.", "_____no_output_____" ], [ "<a name=\"quantile_effects\"></a>\n## Quantile-Quantile Effects Plot\n\nWe can examine the distribution of the various input variable effects using a quantile-quantile plot of the effects. Quantile-quantile plots arrange the effects in order from least to greatest, and can be applied in several contexts (as we'll see below, when assessing model fits). If the quantities plotted on a quantile-qantile plot are normally distributed, they will fall on a straight line; data that do not fall on the straight line indicate significant deviations from normal behavior.\n\nIn the case of a quantile-quantile plot of effects, non-normal behavior means the effect is paticularly strong. By identifying the outlier points on thse quantile-quantile plots (they're ranked in order, so they correspond to the lists printed above), we can identify the input variables most likely to have a strong impact on the responses.\n\nWe need to look both at the top (the variables that have the largest overall positive effect) and the bottom (the variables that have the largest overall negative effect) for significant outliers. When we find outliers, we can add them to a list of variabls that we have decided are important and will keep in our analysis.", "_____no_output_____" ] ], [ [ "# Quantify which effects are not normally distributed, \n# to assist in identifying important variables\n\nfig = figure(figsize=(14,4))\nax1 = fig.add_subplot(131)\nax2 = fig.add_subplot(132)\nax3 = fig.add_subplot(133)\n\nstats.probplot(y1, dist=\"norm\", plot=ax1)\nax1.set_title('y1')\n\nstats.probplot(y2, dist=\"norm\", plot=ax2)\nax2.set_title('y2')\n\nstats.probplot(y3, dist=\"norm\", plot=ax3)\nax3.set_title('y3')", "_____no_output_____" ] ], [ [ "Normally, we would use the main effects that were computed, and their rankings, to eliminate any variables that don't have a strong effect on any of our variables. However, this analysis shows that sometimes we can't eliminate any variables.\n\nAll six input variables are depicted as the effects that fall far from the red line - indicating all have a statistically meaningful (i.e., not normally distributed) effect on all three response variables. This means we should keep all six factors in our analysis.\n\nThere is also a point on the $y_3$ graph that appears significant on the bottom. Examining the output of the lists above, this point represents the effect for the six-way interaction of all input variables. High-order interactions are highly unlikely (and in this case it is a numerical artifact of the way the responses were generated), so we'll keep things simple and stick to a linear model.\n\nLet's continue our analysis without eliminating any of the six factors, since they are important to all of our responses.", "_____no_output_____" ], [ "<a name=\"dof\"></a>\n## Utilizing Degrees of Freedom\n\nOur very expensive, 64-experiment full factorial design (the data for which maps $(x_1,x_2,\\dots,x_6)$ to $(y_1,y_2,y_3)$) gives us 64 data points, and 64 degrees of freedom. What we do with those 64 degrees of freedom is up to us.\n\nWe _could_ fit an empirical model, or response surface, that has 64 independent parameters, and account for many of the high-order interaction terms - all the way up to six-variable interaction effects. However, high-order effects are rarely important, and are a waste of our degrees of freedom.\n\nAlternatively, we can fit an empirical model with fewer coefficients, using up fewer degrees of freedom, and use the remaining degrees of freedom to characterize the error introduced by our approximate model.\n\nTo describe a model with the 6 variables listed above and no other variable interaction effects would use only 6 degrees of freedom, plus 1 degree of freedom for the constant term, leaving 57 degrees of freedom available to quantify error, attribute variance, etc.\n\nOur goal is to use least squares to compute model equations for $(y_1,y_2,y_3)$ as functions of $(x_1,x_2,x_3,x_4,x_5,x_6)$. ", "_____no_output_____" ] ], [ [ "xlabs = ['x1','x2','x3','x4','x5','x6']\nylabs = ['y1','y2','y3']\nls_data = doe[xlabs+ylabs]", "_____no_output_____" ], [ "import statsmodels.api as sm\nimport numpy as np\n\nx = ls_data[xlabs]\nx = sm.add_constant(x)", "_____no_output_____" ] ], [ [ "The first ordinary least squares linear model is created to predict values of the first variable, $y_1$, as a function of each of our input variables, the list of which are contained in the ```xlabs``` variable. When we perform the linear regression fitting, we see much of the same information that we found in the prior two-level three-factor full factorial design, but here, everything is done automatically.\n\nThe model is linear, meaning it's fitting the coefficients of the function:\n\n$$\n\\hat{y} = a_0 + a_1 x_1 + a_2 x_2 + a_3 + x_3 + a_4 x_4 + a_5 x_5 + a_6 x_6\n$$\n\n(here, the variables $y$ and $x$ are vectors, with one component for each response; in our case, they are three-dimensional vectors.) \n\nBecause there are 64 observations and 7 coefficients, the 57 extra observations give us extra degrees of freedom with which to assess how good the model is. That analysis can be done with an ordinary least squares (OLS) model, available through the statsmodel library in Python. ", "_____no_output_____" ], [ "<a name=\"ols\"></a>\n## Ordinary Least Squares Regression Model\n\nThis built-in OLS model will fit an input vector $(x_1,x_2,x_3,x_4,x_5,x_6)$ to an output vector $(y_1,y_2,y_3)$ using a linear model; the OLS model is designed to fit the model with more observations than coefficients, and utilize the remaining data to quantify the fit of the model.\n\nLet's run through one of these, and analyze the results:", "_____no_output_____" ] ], [ [ "y1 = ls_data['y1']\nest1 = sm.OLS(y1,x).fit()\nprint(est1.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: y1 R-squared: 0.759\nModel: OLS Adj. R-squared: 0.734\nMethod: Least Squares F-statistic: 29.96\nDate: Tue, 27 Jun 2017 Prob (F-statistic): 6.28e-16\nTime: 18:45:41 Log-Likelihood: -62.028\nNo. Observations: 64 AIC: 138.1\nDf Residuals: 57 BIC: 153.2\nDf Model: 6 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst 0.2143 0.084 2.537 0.014 0.045 0.384\nx1 0.4717 0.084 5.583 0.000 0.302 0.641\nx2 0.5809 0.084 6.877 0.000 0.412 0.750\nx3 0.5049 0.084 5.977 0.000 0.336 0.674\nx4 0.3535 0.084 4.185 0.000 0.184 0.523\nx5 0.3688 0.084 4.366 0.000 0.200 0.538\nx6 0.4549 0.084 5.385 0.000 0.286 0.624\n==============================================================================\nOmnibus: 0.004 Durbin-Watson: 2.156\nProb(Omnibus): 0.998 Jarque-Bera (JB): 0.094\nSkew: -0.017 Prob(JB): 0.954\nKurtosis: 2.815 Cond. No. 1.00\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ] ], [ [ "The StatsModel OLS object prints out quite a bit of useful information, in a nicely-formatted table. Starting at the top, we see a couple of important pieces of information: specifically, the name of the dependent variable (the response) that we're looking at, the number of observations, and the number of degrees of freedom. \n\nWe can see an $R^2$ statistic, which indicates how well this data is fit with our linear model, and an adjusted $R^2$ statistic, which accounts for the large nubmer of degrees of freedom. While an adjusted $R^2$ of 0.73 is not great, we have to remember that this linear model is trying to capture a wealth of complexity in six coefficients. Furthermore, the adjusted $R^2$ value is too broad to sum up how good our model actually is.\n\nThe table in the middle is where the most useful information is located. The `coef` column shows the coefficients $a_0, a_1, a_2, \\dots$ for the model equation:\n\n$$\n\\hat{y} = a_0 + a_1 x_1 + a_2 x_2 + a_3 + x_3 + a_4 x_4 + a_5 x_5 + a_6 x_6\n$$\n\nUsing the extra degrees of freedom, an estime $s^2$ of the variance in the regression coefficients is also computed, and reported in the the `std err` column. Each linear term is attributed the same amount of variance, $\\pm 0.082$.", "_____no_output_____" ] ], [ [ "y2 = ls_data['y2']\nest2 = sm.OLS(y2,x).fit()\nprint(est2.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: y2 R-squared: 0.832\nModel: OLS Adj. R-squared: 0.814\nMethod: Least Squares F-statistic: 47.06\nDate: Tue, 27 Jun 2017 Prob (F-statistic): 2.61e-20\nTime: 18:45:41 Log-Likelihood: -159.57\nNo. Observations: 64 AIC: 333.1\nDf Residuals: 57 BIC: 348.3\nDf Model: 6 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst -0.3407 0.388 -0.878 0.383 -1.117 0.436\nx1 2.4979 0.388 6.440 0.000 1.721 3.275\nx2 2.6452 0.388 6.820 0.000 1.869 3.422\nx3 2.9448 0.388 7.593 0.000 2.168 3.721\nx4 2.1565 0.388 5.560 0.000 1.380 2.933\nx5 2.9597 0.388 7.631 0.000 2.183 3.736\nx6 2.6755 0.388 6.898 0.000 1.899 3.452\n==============================================================================\nOmnibus: 1.009 Durbin-Watson: 1.388\nProb(Omnibus): 0.604 Jarque-Bera (JB): 0.915\nSkew: -0.040 Prob(JB): 0.633\nKurtosis: 2.420 Cond. No. 1.00\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ], [ "y3 = ls_data['y3']\nest3 = sm.OLS(y3,x).fit()\nprint(est3.summary())", " OLS Regression Results \n==============================================================================\nDep. Variable: y3 R-squared: 0.714\nModel: OLS Adj. R-squared: 0.684\nMethod: Least Squares F-statistic: 23.72\nDate: Tue, 27 Jun 2017 Prob (F-statistic): 7.57e-14\nTime: 18:45:41 Log-Likelihood: -364.01\nNo. Observations: 64 AIC: 742.0\nDf Residuals: 57 BIC: 757.1\nDf Model: 6 \nCovariance Type: nonrobust \n==============================================================================\n coef std err t P>|t| [0.025 0.975]\n------------------------------------------------------------------------------\nconst -0.3109 9.461 -0.033 0.974 -19.256 18.634\nx1 40.5504 9.461 4.286 0.000 21.605 59.496\nx2 44.4873 9.461 4.702 0.000 25.542 63.432\nx3 58.9900 9.461 6.235 0.000 40.045 77.935\nx4 46.9206 9.461 4.959 0.000 27.975 65.866\nx5 40.2802 9.461 4.258 0.000 21.335 59.225\nx6 42.5372 9.461 4.496 0.000 23.592 61.482\n==============================================================================\nOmnibus: 1.901 Durbin-Watson: 1.508\nProb(Omnibus): 0.387 Jarque-Bera (JB): 1.485\nSkew: 0.186 Prob(JB): 0.476\nKurtosis: 2.353 Cond. No. 1.00\n==============================================================================\n\nWarnings:\n[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.\n" ] ], [ [ "<a name=\"goodness_of_fit\"></a>\n## Quantifying Model Goodness-of-Fit\n\nWe can now use these linear models to evaluate each set of inputs and compare the model response $\\hat{y}$ to the actual observed response $y$. What we would expect to see, if our model does an adequate job of representing the underlying behavior of the model, is that in each of the 64 experiments, the difference between the model prediction $M$ and the measured data $d$, defined as the residual $r$,\n\n$$\nr = \\left| d - M \\right|\n$$\n\nshould be comparable across all experiments. If the residuals appear to have functional dependence on the input variables, it is an indication that our model is missing important effects and needs more or different terms. The way we determine this, mathematically, is by looking at a quantile-quantile plot of our errors (that is, a ranked plot of our error magnitudes). \n\nIf the residuals are normally distributed, they will follow a straight line; if the plot shows the data have significant wiggle and do not follow a line, it is an indication that the errors are not normally distributed, and are therefore skewed (indicating terms missing from our OLS model).", "_____no_output_____" ] ], [ [ "%matplotlib inline\nimport seaborn as sns\nimport scipy.stats as stats\nfrom matplotlib.pyplot import *\n\n# Quantify goodness of fit\n\nfig = figure(figsize=(14,4))\nax1 = fig.add_subplot(131)\nax2 = fig.add_subplot(132)\nax3 = fig.add_subplot(133)\n\nr1 = y1 - est1.predict(x)\nr2 = y2 - est2.predict(x)\nr3 = y3 - est3.predict(x)\n\nstats.probplot(r1, dist=\"norm\", plot=ax1)\nax1.set_title('Residuals, y1')\n\nstats.probplot(r2, dist=\"norm\", plot=ax2)\nax2.set_title('Residuals, y2')\n\nstats.probplot(r3, dist=\"norm\", plot=ax3)\nax3.set_title('Residuals, y3')\n", "_____no_output_____" ] ], [ [ "Determining whether significant trends are being missed by the model depends on how many points deviate from the red line, and how significantly. If there is a single point that deviates, it does not necessarily indicate a problem; but if there is significant wiggle and most points deviate significantly from the red line, it means that there is something about the relationship between the inputs and the outputs that our model is missing.\n\nThere are only a few points deviating from the red line. We saw from the effect quantile for $y_3$ that there was an interaction variable that was important to modeling the response $y_3$, and it is likely this interaction that is leading to noise at the tail end of these residuals. This indicates residual errors (deviations of the model from data) that do not follow a natural, normal distribution, which indicates there is a _pattern_ in the deviations - namely, the interaction effect.\n\nThe conclusion about the error from the quantile plots above is that there are only a few points deviation from the line, and no particularly significant outliers. Our model can use some improvement, but it's a pretty good first-pass model.", "_____no_output_____" ], [ "<a name=\"distribution_of_error\"></a>\n## Distribution of Error\n\nAnother thing we can look at is the normalized error: what are the residual errors (differences between our model prediction and our data)? How are their values distributed? \n\nA kernel density estimate (KDE) plot, which is a smoothed histogram, shows the probability distribution of the normalized residual errors. As expected, they're bunched pretty close to zero. There are some bumps far from zero, corresponding to the outliers on the quantile-quantile plot of the errors above. However, they're pretty close to randomly distributed, and therefore it doesn't look like there is any systemic bias there.", "_____no_output_____" ] ], [ [ "fig = figure(figsize=(10,12))\nax1 = fig.add_subplot(311)\nax2 = fig.add_subplot(312)\nax3 = fig.add_subplot(313)\naxes = [ax1,ax2,ax3]\n\ncolors = sns.xkcd_palette([\"windows blue\", \"amber\", \"faded green\", \"dusty purple\",\"aqua blue\"])\n\n#resids = [r1, r2, r3]\nnormed_resids = [r1/y1, r2/y2, r3/y3]\n\nfor (dataa, axx, colorr) in zip(normed_resids,axes,colors):\n sns.kdeplot(dataa, bw=1.0, ax=axx, color=colorr, shade=True, alpha=0.5);\n\nax1.set_title('Probability Distribution: Normalized Residual Error, y1')\nax2.set_title('Normalized Residual Error, y2')\nax3.set_title('Normalized Residual Error, y3')\n", "_____no_output_____" ] ], [ [ "Note that in these figures, the bumps at extreme value are caused by the fact that the interval containing the responses includes 0 and values close to 0, so the normalization factor is very tiny, leading to large values.", "_____no_output_____" ], [ "<a name=\"aggregating\"></a>\n## Aggregating Results\n\nLet's next aggregate experimental results, by taking the mean over various variables to compute the mean effect for regressed varables. For example, we may want to look at the effects of variables 2, 3, and 4, and take the mean over the other three variables.\n\nThis is simple to do with Pandas, by grouping the data by each variable, and applying the mean function on all of the results. The code looks like this:", "_____no_output_____" ] ], [ [ "# Our original regression variables\nxlabs = ['x2','x3','x4']\ndoe.groupby(xlabs)[ylabs].mean()", "_____no_output_____" ], [ "# If we decided to go for a different variable set\nxlabs = ['x2','x3','x4','x6']\ndoe.groupby(xlabs)[ylabs].mean()", "_____no_output_____" ] ], [ [ "This functionality can also be used to determine the variance in all of the experimental observations being aggregated. For example, here we aggregate over $x_3 \\dots x_6$ and show the variance broken down by $x_1, x_2$ vs $y_1, y_2, y_3$.", "_____no_output_____" ] ], [ [ "xlabs = ['x1','x2']\ndoe.groupby(xlabs)[ylabs].var()", "_____no_output_____" ] ], [ [ "Or even the number of experimental observations being aggregated!", "_____no_output_____" ] ], [ [ "doe.groupby(xlabs)[ylabs].count()", "_____no_output_____" ] ], [ [ "<a name=\"dist_variance\"></a>\n## Distributions of Variance\n\nWe can convert these dataframes of averages, variances, and counts into data for plotting. For example, if we want to make a histogram of every value in the groupby dataframe, we can use the ```.values``` method, so that this:\n\n doe.gorupby(xlabs)[ylabs].mean()\n\nbecomes this:\n\n\n doe.groupby(xlabs)[ylabs].mean().values\n\n\nThis $M \\times N$ array can then be flattened into a vector using the ```ravel()``` method from numpy:\n\n np.ravel( doe.groupby(xlabs)[ylabs].mean().values )\n\nThe resulting data can be used to generate histograms, as shown below:", "_____no_output_____" ] ], [ [ "# Histogram of means of response values, grouped by xlabs\n\nxlabs = ['x1','x2','x3','x4']\n\nprint(\"Grouping responses by %s\"%( \"-\".join(xlabs) ))\n\ndat = np.ravel(doe.groupby(xlabs)[ylabs].mean().values) / np.ravel(doe.groupby(xlabs)[ylabs].var().values)\n\nhist(dat, 10, normed=False, color=colors[3]);\nxlabel(r'Relative Variance ($\\mu$/$\\sigma^2$)')\nshow()", "Grouping responses by x1-x2-x3-x4\n" ], [ "# Histogram of variances of response values, grouped by xlabs\n\nprint(\"Grouping responses by %s\"%( \"-\".join(xlabs) ))\n\ndat = np.ravel(doe.groupby(xlabs)['y1'].var().values)\n\nhist(dat, normed=True, color=colors[4])\nxlabel(r'Variance in $y_{1}$ Response')\nylabel(r'Frequency')\nshow()", "Grouping responses by x1-x2-x3-x4\n" ] ], [ [ "The distribution of variance looks _mostly_ normal, with some outliers. These are the same outliers that showed up in our quantile-quantile plot, and they'll show up in the plots below as well.", "_____no_output_____" ], [ "<a name=\"residual\"></a>\n## Residual vs. Response Plots\n\nAnother thing we can do, to look for uncaptured effects, is to look at our residuals vs. $\\hat{y}$. This is a further effort to look for underlying functional relationships between $\\hat{y}$ and the residuals, which would indicate that our system exhibits behavior not captured by our linear model.", "_____no_output_____" ] ], [ [ "# normal plot of residuals\n\nfig = figure(figsize=(14,4))\nax1 = fig.add_subplot(131)\nax2 = fig.add_subplot(132)\nax3 = fig.add_subplot(133)\n\nax1.plot(y1,r1,'o',color=colors[0])\nax1.set_xlabel('Response value $y_1$')\nax1.set_ylabel('Residual $r_1$')\n\nax2.plot(y2,r2,'o',color=colors[1])\nax2.set_xlabel('Response value $y_2$')\nax2.set_ylabel('Residual $r_2$')\nax2.set_title('Response vs. Residual Plots')\n\nax3.plot(y1,r1,'o',color=colors[2])\nax3.set_xlabel('Response value $y_3$')\nax3.set_ylabel('Residual $r_3$')\n\nshow()", "_____no_output_____" ] ], [ [ "Notice that each plot is trending up and to the right - indicative of an underlying trend that our model $\\hat{y}$ is not capturing. The trend is relatively weak, however, indicating that our linear model does a good job of capturing _most_ of the relevant effects of this system.", "_____no_output_____" ], [ "# Discussion \n\nThe analysis shows that there are some higher-order or nonlinear effects in the system that a purely linear model does not account for. Next steps would involve adding higher order points for a quadratic or higher order polynomial model to gather additional data to fit the higher-degree models.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ] ]
cb97f00987be0cc671483bf541d063ef369056dd
123,956
ipynb
Jupyter Notebook
Homework 3/domaci3.ipynb
NikolaZubic/AppliedGameTheoryHomeworkSolutions
4ba711cb06a6cf7bff22247d963e3d0bc382559a
[ "MIT" ]
2
2021-04-06T07:50:41.000Z
2021-12-14T09:19:05.000Z
Homework 3/domaci3.ipynb
NikolaZubic/AppliedGameTheoryHomeworkSolutions
4ba711cb06a6cf7bff22247d963e3d0bc382559a
[ "MIT" ]
null
null
null
Homework 3/domaci3.ipynb
NikolaZubic/AppliedGameTheoryHomeworkSolutions
4ba711cb06a6cf7bff22247d963e3d0bc382559a
[ "MIT" ]
1
2021-04-06T07:52:10.000Z
2021-04-06T07:52:10.000Z
143.96748
28,322
0.836474
[ [ [ "<a href=\"https://colab.research.google.com/github/NikolaZubic/AppliedGameTheoryHomeworkSolutions/blob/main/domaci3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# TREĆI DOMAĆI ZADATAK iz predmeta \"Primenjena teorija igara\" (Applied Game Theory)", "_____no_output_____" ], [ "Razvoj bota za igranje igre Ajnc (BlackJack) koristeći \"Q-learning\" pristup.", "_____no_output_____" ], [ "# Potrebni import-i", "_____no_output_____" ] ], [ [ "import gym\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom gym import spaces\nimport seaborn as sns", "_____no_output_____" ] ], [ [ "# Definisanje Ajnc okruženja koristeći \"Open AI Gym\" toolkit", "_____no_output_____" ] ], [ [ "class BlackJackEnvironment(gym.Env):\n # Because of human-friendly output\n metadata = {'render.modes':['human']}\n \n def __init__(self):\n \"\"\"\n We will define possible number of states with observation_space.\n Player's sum can go from 4 to 32: Now when the sum is 22, and the player chooses to hit, he may get a card with value 10, resulting in a sum of 32, and thus loosing the game.\n Dealer's card can be from 1 to 10 and we have 2 actions.\n Total number of states: 29 * 10 * 2 = 580\n Total number of actions = 2 = len( {\"HIT\", \"STAND\"} )\n \"\"\"\n self.observation_space = spaces.Discrete(580)\n self.action_space = spaces.Discrete(2)\n self.step_count = 0 # at the beginning of the game we have 0 actions taken\n\n def check_usable_ace(self,hand):\n \"\"\"\n If someone has an usable ace, we will replace that ace (1) with 11.\n\n :param hand: player's or dealer's card\n :return: True if we have usable ace, False otherwise\n \"\"\"\n temp_hand = hand.copy()\n \n # Check if there is ace in hand\n if np.any(temp_hand == 1):\n # If we have any ace then replace it with 11, but if we have more than one ace replace the first one with 11\n temp_hand[np.where(temp_hand == 1)[0][0]] = 11\n\n # If the sum is less or equal than 21 then we can use it\n if temp_hand.sum() <= 21:\n return True\n\n return False\n \n def use_ace(self,hand):\n \"\"\"\n If there is usable ace in function above, then replace 1 with 11.\n\n :param hand: player's or dealer's hand\n :return: new hand where 1 is replaced with 11\n \"\"\"\n temp_hand = hand.copy()\n temp_hand[np.where(temp_hand == 1)[0][0]] = 11\n return temp_hand\n \n def reset(self):\n # Resets the environment after one game.\n \n # Initialize player's hand\n self.current_hand = np.random.choice(range(1,11),2)\n \n # Initialize usable Ace to False, since we don't have it at the very beginning\n self.usable_ace = False\n \n self.dealer_stand, self.player_stand = False, False\n \n # Replace usable ace in the player's hand \n if self.check_usable_ace(self.current_hand):\n self.usable_ace = True\n self.current_hand = self.use_ace(self.current_hand)\n \n # Player's current sum\n self.current_sum = self.current_hand.sum()\n \n # Dealer's hand\n self.dealer_hand = np.random.choice(range(1,11),2)\n \n # Dealer's sum\n self.dealer_sum = self.dealer_hand.sum()\n \n # First element of self.dealer_hand is the current showing card of dealer\n self.dealer_showing_card = self.dealer_hand[0]\n \n # Replace usable ace in the dealer's hand\n if self.check_usable_ace(self.dealer_hand):\n temp_dealer_hand = self.use_ace(self.dealer_hand)\n self.dealer_sum = temp_dealer_hand.sum()\n \n def take_turn(self, current_player):\n \"\"\"\n Play one turn for the player. This function will be called from step() function, directly depending on the game state.\n We will take new random card, add it to the current_player hand.\n\n :param player: {\"player\", \"dealer\"}\n :return: None\n \"\"\"\n if current_player == 'dealer':\n # Take new random card\n new_card = np.random.choice(range(1,11))\n \n # Add new card to the current_player hand\n new_dealer_hand = np.array(self.dealer_hand.tolist() + [new_card])\n \n # Check for usable ace and replace if found\n if self.check_usable_ace(new_dealer_hand):\n new_dealer_hand = self.use_ace(new_dealer_hand)\n \n self.dealer_hand = new_dealer_hand\n\n # Update his sum\n self.dealer_sum = self.dealer_hand.sum()\n \n if current_player == 'player': \n new_card = np.random.choice(range(1,11)) \n new_player_hand = np.array(self.current_hand.tolist()+ [new_card])\n \n if self.check_usable_ace(new_player_hand):\n self.usable_ace = True\n new_player_hand = self.use_ace(new_player_hand)\n \n self.current_hand = new_player_hand\n self.current_sum = self.current_hand.sum()\n \n def check_game_status(self, mode = 'normal'):\n \"\"\"\n Check the current status of the game.\n During the 'normal' we check after each turn whether we got in the terminal state.\n In the 'compare' mode we compare the totals of both players (player vs dealer) in order to pronounce the winner.\n \n :param mode: {'normal', 'compare'}\n :return: dictionary with the winner, whether the game is finished and the reward of the game\n \"\"\"\n result = {'winner':'',\n 'is_done': False,\n 'reward':0}\n \n if mode == 'normal':\n if self.current_sum > 21:\n result['winner'] = 'dealer'\n result['is_done'] = True\n result['reward'] = -1\n elif self.dealer_sum > 21:\n result['winner'] = 'player'\n result['is_done'] = True\n result['reward'] = 1\n elif self.current_sum == 21:\n result['winner'] = 'player'\n result['is_done'] = True\n result['reward'] = 1\n elif self.dealer_sum == 21:\n result['winner'] = 'dealer'\n result['is_done'] = True\n result['reward'] = -1\n \n elif mode == 'compare': \n result['is_done'] = True\n diff_21_player = 21 - self.current_sum \n diff_21_dealer = 21 - self.dealer_sum\n if diff_21_player > diff_21_dealer:\n result['reward'] = -1\n result['winner'] = 'dealer'\n elif diff_21_player < diff_21_dealer:\n result['reward'] = 1\n result['winner'] = 'player'\n else:\n result['reward'] = 0\n result['winner'] = 'draw'\n return result\n \n return result\n \n def step(self,action):\n \"\"\"\n Performs one action.\n\n :param action:\n :return: dictionary with the winner, whether the game is finished and the reward of the game\n \"\"\"\n \n # Increase number of actions that are taken during the game.\n self.step_count += 1\n \n result = {'winner':'',\n 'is_done': False,\n 'reward':0} \n \n \"\"\"\n Before taking the first step of the game, we need to ensure that there is no winning condition.\n Check if the initial two cards of the players are 21. If anyone has 21, then that player wins.\n If both players have 21, then the game is DRAW. Otherwise, we will continue with the game.\n \"\"\"\n if self.step_count == 1:\n if self.check_usable_ace(self.current_hand):\n self.current_hand = self.use_ace(self.current_hand)\n if self.check_usable_ace(self.dealer_hand):\n self.current_hand = self.use_ace(self.dealer_hand)\n \n if self.current_sum == 21 and self.dealer_sum == 21:\n result['is_done'] = True\n result['reward'] = 0\n result['winner'] = 'draw'\n return result\n elif self.current_sum == 21 and self.dealer_sum < 21:\n result['is_done'] = True\n result['reward'] = 1\n result['winner'] = 'player'\n return result\n elif self.dealer_sum == 21 and self.current_sum < 21:\n result['is_done'] = True\n result['reward'] = -1\n result['winner'] = 'dealer'\n return result\n \n if self.dealer_sum >= 17:\n self.dealer_stand = True\n \n # action = 0 means \"HIT\"\n if action == 0: \n self.take_turn('player')\n result = self.check_game_status()\n if result['is_done'] == True:\n return result\n \n # action = 1 means \"STAND\"\n if action == 1:\n if self.dealer_stand == True:\n return self.check_game_status(mode = 'compare')\n \n \"\"\"\n If the dealer hasn't stand, he will hit unless his sum is greater than or equal to 17.\n After that, he will stand.\n \"\"\"\n while self.dealer_sum < 17:\n self.take_turn('dealer')\n result = self.check_game_status()\n # After dealer stands, check the game status.\n if result['is_done'] == True:\n return result\n\n # If the game hasn't finished yet, we set dealer_stand to True, so the player will either HIT or STAND \n self.dealer_stand = True\n \n return result\n \n def get_current_state(self):\n \"\"\"\n Get current state which is comprised of current player's sum, dealer's showing card and usable ace presence.\n \n :return: return current state variables\n \"\"\"\n current_state = {}\n \n current_state['current_sum'] = self.current_sum\n current_state['dealer_showing_card'] = self.dealer_showing_card\n current_state['usable_ace'] = self.usable_ace\n \n return current_state\n \n \n def render(self):\n \n print(\"OBSERVABLE STATES\")\n print(\"Current player's sum: {}\".format(self.current_sum))\n print(\"Dealer's showing card: {}\".format(self.dealer_showing_card))\n print(\"Player has usable Ace: {}\".format(self.usable_ace))\n \n print(\"INFORMATION ABOUT CARDS AND DEALER'S SUM\")\n print(\"Player's hand: {}\".format(self.current_hand))\n print(\"Dealer's hand: {}\".format(self.dealer_hand))\n print(\"Dealer's sum: {}\".format(self.dealer_sum))\n", "_____no_output_____" ] ], [ [ "# Pomoćne funkcije za Q-learning", "_____no_output_____" ] ], [ [ "# dictionaries used for converting the state values to indexes in the Q table\ncurrent_sum_to_index = dict(zip(np.arange(4,33),np.arange(29))) \ndealer_showing_card_to_index = dict(zip(np.arange(1,11),np.arange(10)))\nusable_ace_index = dict(zip([False,True],[0,1]))\naction_index = dict(zip(['HIT','STAND'],[0,1]))\n\ndef get_state_q_indices(current_state):\n \"\"\"\n Get indexes of Q table for any given state.\n\n :param current_state: comprised of current player's sum, dealer's showing card and usable ace presence.\n :return: get table indexes for a state\n \"\"\"\n current_sum_idx = current_sum_to_index[current_state['current_sum']]\n dealer_showing_card_idx = dealer_showing_card_to_index[current_state['dealer_showing_card']]\n usable_ace_idx = usable_ace_index[current_state['usable_ace']]\n \n return [current_sum_idx,dealer_showing_card_idx,usable_ace_idx]\n\ndef get_max_action(Q_sa, current_state):\n \"\"\"\n Get the action with the max Q-value for the given current state and the Q table.\n \n :param Q_sa: given Q table\n :param current_state: current state\n :return: best action for given state and Q table\n \"\"\"\n state_q_idxs = get_state_q_indices(current_state)\n action = Q_sa[state_q_idxs[0],state_q_idxs[1],state_q_idxs[2],:].argmax()\n \n return action\n\ndef get_q_value(Q_sa, state, action):\n \"\"\"\n Get Q(s,a) value for state and action in certain Q table.\n \n :param Q_sa: given Q table\n :param state: given state\n :param action: given action\n :return: Q(s, a)\n \"\"\"\n state_q_idxs = get_state_q_indices(state)\n q_value = Q_sa[state_q_idxs[0],state_q_idxs[1],state_q_idxs[2],action]\n\n return q_value\n", "_____no_output_____" ] ], [ [ "# Q-learning", "_____no_output_____" ], [ "Inicijalizacija Q tabele.", "_____no_output_____" ] ], [ [ "\"\"\"\nPlayer's current sum is ranging from 4 to 32 => 32 - 4 + 1 = 29\nDealer's showing card can be one from the following set {1, 2, 3, 4, 5, 6, 7, 8, 9, 10} => 10 values\nAce can be usable or not => 2\nActions are from the following set {\"HIT\", \"STAND\"} => 2\n\"\"\"\nQ = np.zeros((29,10,2,2))", "_____no_output_____" ] ], [ [ "Proces treniranja.", "_____no_output_____" ] ], [ [ "episode_count = 0\ntotal_episodes = 2000000\n\n# Discounting factor\ngamma = 0.9 \n\n# Used for filtering q-values, learning rate\nLAMBDA = 0.1\n\n# Defined Black Jack Environment\nenvironment = BlackJackEnvironment()\n\nwhile episode_count < total_episodes: \n environment.reset()\n \n current_state = environment.get_current_state()\n current_action = get_max_action(Q, current_state)\n \n # Take action\n step_result = environment.step(current_action)\n \n # Get into next state and get the reward\n next_state = environment.get_current_state()\n next_max_action = get_max_action(Q, next_state)\n immediate_reward = step_result['reward']\n \n next_state_q_idxs = get_state_q_indices(next_state)\n \n # Get the q-value for the next state and max action in the next state\n q_max_s_a = get_q_value(Q, next_state, next_max_action)\n \n td_target = immediate_reward + gamma * q_max_s_a\n \n # Get the q-value for the current state and action\n q_current_s_a = get_q_value(Q, current_state, current_action)\n \n td_error = td_target - q_current_s_a\n \n state_q_idxs = get_state_q_indices(current_state)\n \n # Update the current Q(s, a)\n Q[state_q_idxs[0],state_q_idxs[1],state_q_idxs[2],current_action] = q_current_s_a + LAMBDA * td_error\n\n # get into the next state \n current_state = next_state\n \n if step_result['is_done']:\n episode_count += 1\n \n if episode_count % 100000 == 0:\n print(\"Episode number: {}\".format(episode_count))\n", "Episode number: 100000\nEpisode number: 200000\nEpisode number: 300000\nEpisode number: 400000\nEpisode number: 500000\nEpisode number: 600000\nEpisode number: 700000\nEpisode number: 800000\nEpisode number: 900000\nEpisode number: 1000000\nEpisode number: 1100000\nEpisode number: 1200000\nEpisode number: 1300000\nEpisode number: 1400000\nEpisode number: 1500000\nEpisode number: 1600000\nEpisode number: 1700000\nEpisode number: 1800000\nEpisode number: 1900000\nEpisode number: 2000000\n" ] ], [ [ "# Diskusija rezultata", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(ncols= 2,figsize=(16,8)) \nsns.heatmap(Q[:,:,0,0],cmap = sns.light_palette((210, 90, 60), input=\"husl\"), ax = ax[0], \n xticklabels=np.arange(1,11),yticklabels=np.arange(4,33))\nax[0].set_title(\"Usable Ace = False, Action = HIT\")\nax[0].set_xlabel(\"Dealer's Showing Card\")\nax[0].set_ylabel(\"Current Player's Sum\")\n\n\nsns.heatmap(Q[:,:,0,1],cmap = sns.light_palette((210, 90, 60), input=\"husl\"), ax = ax[1],\n xticklabels=np.arange(1,11),yticklabels=np.arange(4,33))\nax[1].set_title(\"Usable Ace = False, Action = STAND\")\nax[1].set_xlabel(\"Dealer's Showing Card\")\nax[1].set_ylabel(\"Current Player's Sum\")\n", "_____no_output_____" ] ], [ [ "Na osnovu gornjih heatmapa možemo uočiti koje je to akcije dobro izvršiti u kojem stanju.\n\n**Zaključak sa lijeve heatmape**: kada je ukupna suma igrača manja od 12, 13 onda je najbolje da se izvršava akcija \"HIT\".\n\n**Zaključak sa desne heatmape**: Za veće vrijednosti otkrivene karte djelitelja i veće vrijednosti ukupne sume igrača bolje je izvršiti akciju \"STAND\".", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(ncols = 2, figsize=(16,8)) \nsns.heatmap(Q[:,:,1,0],cmap = sns.light_palette((210, 90, 60), input=\"husl\"), ax = ax[0],\n xticklabels=np.arange(1,11),yticklabels=np.arange(4,33))\nax[0].set_title(\"Usable Ace = True, Action = HIT\")\nax[0].set_xlabel(\"Dealer's Showing Card\")\nax[0].set_ylabel(\"Current Player's Sum\")\n\n\nsns.heatmap(Q[:,:,1,1],cmap = sns.light_palette((210, 90, 60), input=\"husl\"), ax = ax[1],\n xticklabels=np.arange(1,11),yticklabels=np.arange(4,33))\nax[1].set_title(\"Usable Ace = True, Action = STAND\")\nax[1].set_xlabel(\"Dealer's Showing Card\")\nax[1].set_ylabel(\"Current Player's Sum\")", "_____no_output_____" ] ], [ [ "U slučaju kad imamo iskoristiv kec, broj semplova je znatno manji, tako da paterni Q-vrijednosti nisu baš potpuno jasni, ali može se zaključiti da je najbolje izvršiti akciju **\"HIT\" u slučajevima kad je suma igrača oko 12**, dok se akcija **\"STAND\" izvršava u slučaju kada je igra pri kraju po pitanju sume igrača**.", "_____no_output_____" ], [ "Sada ćemo pogledati naučene politike (za slučaj pohlepne politike, jer želimo da naš igrač uvijek bira onako da najbolje igra).\n\n**Sa crnim blokovima označeno je kada treba izvršiti akciju \"HIT\"**, a imamo 2 heatmape za slučaj kad nemamo i imamo iskoristiv kec.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(ncols= 1,figsize=(8,6)) \nsns.heatmap(np.argmax(Q[:17,:,0,:],axis=2),cmap = sns.cubehelix_palette(8, start=2, rot=0, dark=0, light=.95, reverse=True)\\\n ,linewidths=1,xticklabels=np.arange(1,11),yticklabels=np.arange(4,33))\nax.set_title(\"Usable Ace = False\")\nax.set_xlabel(\"Dealer's Showing Card\")\nax.set_ylabel(\"Current Player's Sum\")", "_____no_output_____" ], [ "fig, ax = plt.subplots(ncols= 1,figsize=(8,6)) \nsns.heatmap(np.argmax(Q[:17,:,1,:],axis=2),cmap = sns.cubehelix_palette(8, start=2, rot=0, dark=0, light=.95, reverse=True)\\\n ,linewidths=1,xticklabels=np.arange(1,11),yticklabels=np.arange(4,33))\nax.set_title(\"Usable Ace = True\")\nax.set_xlabel(\"Dealer's Showing Card\")\nax.set_ylabel(\"Current Player's Sum\")\n", "_____no_output_____" ] ], [ [ "# Na kraju, nakon 2 miliona iteracija treniranja, testiraćemo algoritam na 10 000 partija.", "_____no_output_____" ] ], [ [ "player_wins = 0\ndealer_wins = 0\nNUMBER_OF_GAMES = 10000\n\nfor i in range(NUMBER_OF_GAMES):\n\n environment.reset()\n\n while True:\n current_state = environment.get_current_state()\n current_action = get_max_action(Q, current_state)\n\n # Take action\n step_result = environment.step(current_action)\n #environment.render()\n next_state = environment.get_current_state()\n current_state = next_state\n\n if step_result['is_done']:\n break\n\n if step_result['winner'] == 'player':\n player_wins += 1\n elif step_result['winner'] == 'dealer':\n dealer_wins += 1\n\nprint(\"Player wins: \" + str(player_wins))\nprint(\"Dealer wins: \" + str(dealer_wins))\nprint(\"Player wins percentage = \" + str(round(100 * (player_wins / (player_wins + dealer_wins)), 2)) + \"%\")", "Player wins: 3681\nDealer wins: 5706\nPlayer wins percentage = 39.21%\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
cb9803a4f9d79ca2f981fd5d7d34c7b5e3c88ac9
15,272
ipynb
Jupyter Notebook
tftrt/examples/presentations/GTC-April2021-Dynamic-shape-BERT.ipynb
jhalakp-nvidia/tensorrt
65ed25ccd5d538d6a42223cb69c2fb9858f39805
[ "Apache-2.0" ]
662
2018-11-16T05:15:26.000Z
2022-03-28T12:32:50.000Z
tftrt/examples/presentations/GTC-April2021-Dynamic-shape-BERT.ipynb
jhalakp-nvidia/tensorrt
65ed25ccd5d538d6a42223cb69c2fb9858f39805
[ "Apache-2.0" ]
234
2018-12-02T15:44:08.000Z
2022-03-31T20:23:28.000Z
tftrt/examples/presentations/GTC-April2021-Dynamic-shape-BERT.ipynb
jhalakp-nvidia/tensorrt
65ed25ccd5d538d6a42223cb69c2fb9858f39805
[ "Apache-2.0" ]
239
2018-11-14T22:00:57.000Z
2022-03-31T20:01:33.000Z
30.72837
469
0.590492
[ [ [ "# Copyright 2021 NVIDIA Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Accelerate BERT encoder with TF-TRT\n\n\n## Introduction\n\nThe NVIDIA TensorRT is a C++ library that facilitates high performance inference on NVIDIA graphics processing units (GPUs). TensorFlow™ integration with TensorRT™ (TF-TRT) optimizes TensorRT compatible parts of your computation graph, allowing TensorFlow to execute the remaining graph. While you can use TensorFlow's wide and flexible feature set, TensorRT will produce a highly optimized runtime engine for the TensorRT compatible subgraphs of your network.\n\nIn this notebook, we demonstrate accelerating BERT inference using TF-TRT. We focus on the encoder.\n\n## Requirements\nThis notebook requires at least TF 2.5 and TRT 7.1.3.", "_____no_output_____" ], [ "## 1. Download the model\nWe will download a bert base model from [TF-Hub](https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3).", "_____no_output_____" ] ], [ [ "!pip install -q tf-models-official", "\u001b[33mWARNING: You are using pip version 21.0; however, version 21.0.1 is available.\r\nYou should consider upgrading via the '/usr/bin/python -m pip install --upgrade pip' command.\u001b[0m\r\n" ], [ "import tensorflow as tf\nimport tensorflow_hub as hub", "INFO:tensorflow:Enabling eager execution\nINFO:tensorflow:Enabling v2 tensorshape\nINFO:tensorflow:Enabling resource variables\nINFO:tensorflow:Enabling tensor equality\nINFO:tensorflow:Enabling control flow v2\n" ], [ "tfhub_handle_encoder = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3'\nbert_saved_model_path = 'bert_base'", "_____no_output_____" ], [ "bert_model = hub.load(tfhub_handle_encoder)\ntf.saved_model.save(bert_model, bert_saved_model_path)", "WARNING:absl:Found untraced functions such as restored_function_body, restored_function_body, restored_function_body, restored_function_body, restored_function_body while saving (showing 5 of 910). These functions will not be directly callable after loading.\n" ] ], [ [ "## 2. Inference\nIn this section we will convert the model using TF-TRT and run inference. ", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\nimport numpy as np\n\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.python.saved_model import tag_constants\nfrom tensorflow.python.compiler.tensorrt import trt_convert as trt\nfrom timeit import default_timer as timer\n\ntf.get_logger().setLevel('ERROR')", "_____no_output_____" ] ], [ [ "### 2.1 Helper functions", "_____no_output_____" ] ], [ [ "def get_func_from_saved_model(saved_model_dir):\n saved_model_loaded = tf.saved_model.load(\n saved_model_dir, tags=[tag_constants.SERVING])\n graph_func = saved_model_loaded.signatures[\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n return graph_func, saved_model_loaded", "_____no_output_____" ], [ "def predict_and_benchmark_throughput(input_dict, model, N_warmup_run=50, N_run=500,\n result_key='predictions', batch_size=None):\n elapsed_time = []\n \n for val in input_dict.values():\n input_batch_size = val.shape[0]\n break\n if batch_size is None or batch_size > input_batch_size:\n batch_size = input_batch_size\n \n print('Benchmarking with batch size', batch_size)\n \n elapsed_time = np.zeros(N_run)\n for i in range(N_warmup_run): \n preds = model(**input_dict)\n \n # Force device synchronization with .numpy()\n tmp = preds[result_key][0].numpy() \n \n for i in range(N_run):\n start_time = timer()\n preds = model(**input_dict)\n # Synchronize\n tmp += preds[result_key][0].numpy() \n end_time = timer()\n elapsed_time[i] = end_time - start_time\n\n if i>=50 and i % 50 == 0:\n print('Steps {}-{} average: {:4.1f}ms'.format(i-50, i, (elapsed_time[i-50:i].mean()) * 1000))\n\n latency = elapsed_time.mean() * 1000\n print('Latency: {:5.2f}+/-{:4.2f}ms'.format(latency, elapsed_time.std() * 1000))\n print('Throughput: {:.0f} samples/s'.format(N_run * batch_size / elapsed_time.sum()))\n return latency", "_____no_output_____" ], [ "def trt_convert(input_path, output_path, input_shapes, explicit_batch=False,\n dtype=np.float32, precision='FP32', prof_strategy='Optimal'):\n conv_params=trt.TrtConversionParams(\n precision_mode=precision, minimum_segment_size=50,\n max_workspace_size_bytes=12*1<<30, maximum_cached_engines=1)\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=input_path, conversion_params=conv_params,\n use_dynamic_shape=explicit_batch,\n dynamic_shape_profile_strategy=prof_strategy)\n\n converter.convert()\n\n def input_fn():\n for shapes in input_shapes:\n # return a list of input tensors\n yield [np.ones(shape=x).astype(dtype) for x in shapes]\n\n converter.build(input_fn)\n converter.save(output_path)\n ", "_____no_output_____" ], [ "def random_input(batch_size, seq_length):\n # Generate random input data\n mask = tf.convert_to_tensor(np.ones((batch_size, seq_length), dtype=np.int32))\n type_id = tf.convert_to_tensor(np.zeros((batch_size, seq_length), dtype=np.int32))\n word_id = tf.convert_to_tensor(np.random.randint(0, 1000, size=[batch_size, seq_length], dtype=np.int32))\n return {'input_mask':mask, 'input_type_ids': type_id, 'input_word_ids':word_id}", "_____no_output_____" ] ], [ [ "### 2.2 Convert the model with TF-TRT", "_____no_output_____" ] ], [ [ "bert_trt_path = bert_saved_model_path + '_trt'\ninput_shapes = [[(1, 128), (1, 128), (1, 128)]] \ntrt_convert(bert_saved_model_path, bert_trt_path, input_shapes, True, np.int32, precision='FP16')", "WARNING:absl:Found untraced functions such as restored_function_body, restored_function_body, restored_function_body, restored_function_body, restored_function_body while saving (showing 5 of 910). These functions will not be directly callable after loading.\n" ] ], [ [ "### 2.3 Run inference with converted model", "_____no_output_____" ] ], [ [ "trt_func, _ = get_func_from_saved_model(bert_trt_path)", "_____no_output_____" ], [ "input_dict = random_input(1, 128)\nresult_key = 'bert_encoder_1' # 'classifier'\nres = predict_and_benchmark_throughput(input_dict, trt_func, result_key=result_key)", "Benchmarking with batch size 1\nSteps 0-50 average: 4.6ms\nSteps 50-100 average: 4.6ms\nSteps 100-150 average: 4.6ms\nSteps 150-200 average: 4.6ms\nSteps 200-250 average: 4.5ms\nSteps 250-300 average: 4.5ms\nSteps 300-350 average: 4.5ms\nSteps 350-400 average: 4.5ms\nSteps 400-450 average: 4.5ms\nLatency: 4.54+/-0.24ms\nThroughput: 220 samples/s\n" ] ], [ [ "### Compare to the original function", "_____no_output_____" ] ], [ [ "func, model = get_func_from_saved_model(bert_saved_model_path)\nres = predict_and_benchmark_throughput(input_dict, func, result_key=result_key)", "Benchmarking with batch size 1\nSteps 0-50 average: 8.5ms\nSteps 50-100 average: 9.0ms\nSteps 100-150 average: 8.5ms\nSteps 150-200 average: 8.6ms\nSteps 200-250 average: 8.7ms\nSteps 250-300 average: 10.1ms\nSteps 300-350 average: 8.6ms\nSteps 350-400 average: 9.2ms\nSteps 400-450 average: 8.5ms\nLatency: 8.84+/-0.86ms\nThroughput: 113 samples/s\n" ] ], [ [ "## 3. Dynamic sequence length\nThe sequence length for the encoder is dynamic, we can use different input sequence lengths. Here we call the original model for two sequences.", "_____no_output_____" ] ], [ [ "seq1 = random_input(1, 128)\nres1 = func(**seq1)", "_____no_output_____" ], [ "seq2 = random_input(1, 180)\nres2 = func(**seq2)", "_____no_output_____" ] ], [ [ "The converted model is optimized for a sequnce length of 128 (and batch size 8). If we infer the converted model using a different sequence length, then two things can happen:\n1. If `TrtConversionParams.allow_build_at_runtime` == False: native TF model is inferred\n2. if `TrtConversionParams.allow_build_at_runtime` == True a new TRT engine is created which is optimized for the new sequence length. \n\nThe first option do not provide TRT accelaration while the second one creates a large overhead while the new engine is constructed. In the next section we convert the model to handle multiple sequence lengths.\n\n### 3.1 TRT Conversion with dynamic sequence length", "_____no_output_____" ] ], [ [ "bert_trt_path = bert_saved_model_path + '_trt2'\ninput_shapes = [[(1, 128), (1, 128), (1, 128)], [(1, 180), (1, 180), (1, 180)]] \ntrt_convert(bert_saved_model_path, bert_trt_path, input_shapes, True, np.int32, precision='FP16',\n prof_strategy='Range')", "WARNING:absl:Found untraced functions such as restored_function_body, restored_function_body, restored_function_body, restored_function_body, restored_function_body while saving (showing 5 of 910). These functions will not be directly callable after loading.\n" ], [ "trt_func_dynamic, _ = get_func_from_saved_model(bert_trt_path)", "_____no_output_____" ], [ "trt_res = trt_func_dynamic(**seq1)", "_____no_output_____" ], [ "result_key = 'bert_encoder_1' # 'classifier'\nres = predict_and_benchmark_throughput(seq1, trt_func_dynamic, result_key=result_key)", "_____no_output_____" ], [ "res = predict_and_benchmark_throughput(seq2, trt_func_dynamic, result_key=result_key)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb98314757f074cd309af49d0cba2535cbc198a7
2,450
ipynb
Jupyter Notebook
nbs/index.ipynb
lgvaz/fastcore
fa39696dee5929e9dede5a69e4614ac9f0257ba2
[ "Apache-2.0" ]
null
null
null
nbs/index.ipynb
lgvaz/fastcore
fa39696dee5929e9dede5a69e4614ac9f0257ba2
[ "Apache-2.0" ]
null
null
null
nbs/index.ipynb
lgvaz/fastcore
fa39696dee5929e9dede5a69e4614ac9f0257ba2
[ "Apache-2.0" ]
null
null
null
23.557692
273
0.56898
[ [ [ "# Welcome to fastcore\n\n> Python supercharged for the fastai library", "_____no_output_____" ], [ "## Installing", "_____no_output_____" ], [ "fastcore is on PyPI so you can just run:\n``` \npip install fastcore\n```\n\nFor an [editable install](https://stackoverflow.com/questions/35064426/when-would-the-e-editable-option-be-useful-with-pip-install), use the following:\n```\ngit clone https://github.com/fastai/fastcore\ncd fastcore\npip install -e \".[dev]\"\n```", "_____no_output_____" ], [ "## Tests", "_____no_output_____" ], [ "To run the tests in parallel, launch:\n\n```bash\nnbdev_test_nbs\n```\nor \n```bash\nmake test\n```", "_____no_output_____" ], [ "## Contributing", "_____no_output_____" ], [ "After you clone this repository, please run `nbdev_install_git_hooks` in your terminal. This sets up git hooks, which clean up the notebooks to remove the extraneous stuff stored in the notebooks (e.g. which cells you ran) which causes unnecessary merge conflicts.\n\nBefore submitting a PR, check that the local library and notebooks match. The script `nbdev_diff_nbs` can let you know if there is a difference between the local library and the notebooks.\n* If you made a change to the notebooks in one of the exported cells, you can export it to the library with `nbdev_build_lib` or `make fastcore`.\n* If you made a change to the library, you can export it back to the notebooks with `nbdev_update_lib`.", "_____no_output_____" ], [ "TODO: Write this page", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb98365e9b1c31cc3fa7f827859ab7a9f19372bb
141,417
ipynb
Jupyter Notebook
notebooks/example_model_rank.ipynb
alphasea-dapp/alphasea-example-model
9d14072425a8e38cbef49de752fb4bd8bab0ad18
[ "CC0-1.0" ]
8
2022-01-25T14:28:09.000Z
2022-03-31T04:35:27.000Z
notebooks/example_model_rank.ipynb
yuzi-ziyu/alphasea-example-model
9d14072425a8e38cbef49de752fb4bd8bab0ad18
[ "CC0-1.0" ]
null
null
null
notebooks/example_model_rank.ipynb
yuzi-ziyu/alphasea-example-model
9d14072425a8e38cbef49de752fb4bd8bab0ad18
[ "CC0-1.0" ]
8
2022-01-26T14:31:26.000Z
2022-03-23T16:11:06.000Z
318.506757
79,072
0.904403
[ [ [ "import sys\nsys.path.append('..') # for import src\n\nimport os\nimport cloudpickle\nimport lzma\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import Ridge\nfrom sklearn.model_selection import cross_val_predict\nfrom scipy.stats import pearsonr\nimport matplotlib.pyplot as plt\nimport lightgbm as lgb\nimport talib\n\nimport src\nfrom src.ml_utils import (\n fetch_ohlcv, \n visualize_result, \n normalize_position, \n calc_position_cv,\n get_feature_columns,\n get_symbols,\n unbiased_rank,\n ewm_finite,\n)\ncloudpickle.register_pickle_by_value(src) # for model portability", "_____no_output_____" ], [ "# symbols = 'BTC,ETH'.split(',')\nsymbols = os.getenv('ALPHASEA_SYMBOLS').split(',') # 売買代金が多く、古いもの\ndf = fetch_ohlcv(symbols=symbols, with_target=True)\ndf.to_pickle('/tmp/df.pkl')\ndisplay(df)", "_____no_output_____" ], [ "class ExampleModelRank:\n def __init__(self):\n self._model = Ridge(fit_intercept=False, alpha=1e5)\n self.max_data_sec = 7 * 24 * 60 * 60 # for predict script\n\n def fit(self, df):\n df = self._calc_features(df)\n features = get_feature_columns(df)\n df['ret_rank'] = unbiased_rank(df.groupby('timestamp')['ret']) - 0.5\n df = df.dropna()\n self.symbols = get_symbols(df) # for predict script \n return self._model.fit(df[features], df['ret_rank'])\n \n def predict(self, df):\n df = self._calc_features(df)\n features = get_feature_columns(df)\n y_pred = self._model.predict(df[features])\n df['position'] = np.sign(y_pred)\n normalize_position(df)\n return df['position']\n \n def _calc_features(self, df): \n df = df.copy()\n \n for i in [2, 4, 8, 24, 48, 72]:\n df['feature_momentum_{}'.format(i)] = (df['cl'] / df.groupby('symbol')['cl'].shift(i) - 1).fillna(0)\n for i in [2, 4, 8, 24, 48, 72]:\n df['feature_rsi_{}'.format(i)] = df.groupby('symbol')['cl'].transform(lambda x: talib.RSI(x, timeperiod=i).fillna(50))\n\n for col in get_feature_columns(df):\n df[col] = unbiased_rank(df.groupby('timestamp')[col]) - 0.5\n \n return df", "_____no_output_____" ], [ "df = pd.read_pickle('/tmp/df.pkl')\n\nmodel = ExampleModelRank()\n\n# cv\ncalc_position_cv(model, df)\nvisualize_result(df.dropna())\n\n# fit and save model as portable format\nmodel.fit(df)\ndata = cloudpickle.dumps(model)\ndata = lzma.compress(data)\nwith open('/home/jovyan/data/example_model_rank.xz', 'wb') as f:\n f.write(data)", "return without cost statistics\nmean 0.0022600867999316068\nstd 0.01639809969473679\nsharpe 0.13782614095565077\nmax drawdown 0.23284295745540864\nreturn with cost statistics\nmean 0.0016258083771372833\nstd 0.01640696577183881\nsharpe 0.0990925683484906\nmax drawdown 0.23324295745540863\n" ], [ "# model validation (Just run this cell in the new kernel to make sure you saved it in a portable format.)\n\nimport os\nimport joblib\nimport pandas as pd\n\nmodel = joblib.load('/home/jovyan/data/example_model_rank.xz')\ndf = pd.read_pickle('/tmp/df.pkl')\ndf = df[['op', 'hi', 'lo', 'cl']]\nmax_timestamp = df.index.get_level_values('timestamp').max()\ndf = df.loc[max_timestamp - pd.to_timedelta(model.max_data_sec, unit='S') <= df.index.get_level_values('timestamp')]\nprint(model.predict(df))\nprint(model.symbols)", "timestamp symbol\n2022-01-18 17:00:00+00:00 ADA 0.0\n ATOM 0.0\n BNB 0.0\n BTC 0.0\n DOT 0.0\n ... \n2022-01-25 17:00:00+00:00 ETH -0.1\n LINK -0.1\n MATIC -0.1\n SOL -0.1\n XRP -0.1\nName: position, Length: 1690, dtype: float64\n['ADA', 'ATOM', 'BNB', 'BTC', 'DOT', 'ETH', 'LINK', 'MATIC', 'SOL', 'XRP']\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
cb9837da15aa5de5199502200243fab318eb3901
578,021
ipynb
Jupyter Notebook
data-science/scikit-learn/05/05-Eigenface.ipynb
le3t/ko-repo
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
[ "Apache-2.0" ]
4
2019-10-26T01:25:30.000Z
2020-01-12T08:10:25.000Z
data-science/scikit-learn/05/05-Eigenface.ipynb
le3t/ko-repo
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
[ "Apache-2.0" ]
3
2019-08-26T13:41:57.000Z
2019-08-26T13:44:21.000Z
data-science/scikit-learn/05/05-Eigenface.ipynb
le3t/ko-repo
50eb0b4cadb9db9bf608a9e5d36376f38ff5cce5
[ "Apache-2.0" ]
1
2018-12-07T10:06:42.000Z
2018-12-07T10:06:42.000Z
1,149.147117
315,812
0.954635
[ [ [ "# 特征脸", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "from sklearn.datasets import fetch_lfw_people", "_____no_output_____" ], [ "faces = fetch_lfw_people()", "_____no_output_____" ], [ "faces", "_____no_output_____" ], [ "faces.keys()", "_____no_output_____" ], [ "faces.data.shape", "_____no_output_____" ], [ "faces.images.shape", "_____no_output_____" ], [ "random_indexes = np.random.permutation(len(faces.data))\nX = faces.data[random_indexes]", "_____no_output_____" ], [ "example_faces = X[:36, :]\nexample_faces.shape", "_____no_output_____" ], [ "def plot_faces(faces):\n fig, axes = plt.subplots(6, 6, figsize=(10, 10),\n subplot_kw = {'xticks':[], 'yticks':[]},\n gridspec_kw=dict(hspace=0.1, wspace=0.1))\n for i, ax in enumerate(axes.flat):\n ax.imshow(faces[i].reshape(62, 47), cmap='bone')\n plt.show()", "_____no_output_____" ], [ "plot_faces(example_faces)", "_____no_output_____" ], [ "faces.target_names", "_____no_output_____" ], [ "len(faces.target_names)", "_____no_output_____" ] ], [ [ "## 特征脸", "_____no_output_____" ] ], [ [ "%%time\nfrom sklearn.decomposition import PCA\npca = PCA(svd_solver='randomized')\npca.fit(X)", "Wall time: 29.5 s\n" ], [ "pca.components_.shape", "_____no_output_____" ], [ "plot_faces(pca.components_[:36, :])", "_____no_output_____" ], [ "faces2 = fetch_lfw_people(min_faces_per_person=60)", "_____no_output_____" ], [ "faces2.data.shape", "_____no_output_____" ], [ "faces2.target_names", "_____no_output_____" ], [ "len(faces2.target_names)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb983d775d97881cf7b367cae342ebdd73e27045
9,597
ipynb
Jupyter Notebook
ipython/05. Seasonal Changes in Diet and Size Selectivity.ipynb
rah/optimal-size-selection
491b9c6900974e1e04b0fba8ee3d5d488f37b5a6
[ "MIT" ]
null
null
null
ipython/05. Seasonal Changes in Diet and Size Selectivity.ipynb
rah/optimal-size-selection
491b9c6900974e1e04b0fba8ee3d5d488f37b5a6
[ "MIT" ]
null
null
null
ipython/05. Seasonal Changes in Diet and Size Selectivity.ipynb
rah/optimal-size-selection
491b9c6900974e1e04b0fba8ee3d5d488f37b5a6
[ "MIT" ]
null
null
null
80.647059
1,084
0.739815
[ [ [ "# Seasonal Changes in Diet and Size Selectivity", "_____no_output_____" ], [ "## Introduction", "_____no_output_____" ], [ "The relationship between numbers of different types of prey items eaten by fish and the availability of those items in the environment is not clear. In some cases a particular type of prey may show a high frequency in stomach contents and yet not be the most abundant item in the resource. _Pomatoschistus microps_ was shown to consume _Corophium_ more than any other prey even when it was not the most abundant prey species, although in laboratory experiements this fish always took the closest moving prey regardless of species and size (Magnhagen and Wiederholm, 1982). Observation of the diet of pinfish, _Lagadon rhomboides_, indicates that the predation of amphipods in seagrass is directly affected by the relative susceptibility of different species to predation than their abundance (Nelson, 1979). Other studies have also indicated similar findings that predation is affected more by other factors than abundance of prey (Ware, 1973; Moore and Moore, 1976).\n\nThe problem of availability of prey items in assessing selectivity of prey is difficult to overcome. If relative availability between prey species is assumed to be constant, this problem may be avoided by investigating the effects of abundance over time in a comparative sense.\n\nChanges in the abundance of prey items in both stomach contents of _F. melanobranchus_ and benthic samples are examined for each sampling session to determine if any trends exist between prey abundance and number eaten.", "_____no_output_____" ], [ "## Methods", "_____no_output_____" ], [ "Bimonthly samples of fish and benthos were collected on 26th February, 23-26th April, 25-26th June, and 25-26th August 1983, during afternoon low tides.\n\nTo obtain a larger sample size of fish for February, specimens collected during January were included for analysis. Only fish of 20mm and above have been used in the analysis. In the benthic samples analysis has been restricted to amphipods retained by a 0.5mm sieve. Methods for sampling and data collection are as previously described.", "_____no_output_____" ], [ "## Results", "_____no_output_____" ], [ "The diet of gobies in terms of frequency of prey items shows a marked variation between bimonthly samples (Fig. 11). Overall there is a reduction in the total numbers of prey items eaten (Fig. 12a), and an increase in the number of prey categories. Harpacticoids and calanoids are the major cause of this drop in frequency of prey items, with a very marked change in the numbers of calanoids. Amphipod A shows a decrease through to June and remaining prey categories increase over this period. In August the trends are reversed, amphipod A increasing in frequency and other categories decreasing.\n\nCalculation of Shannon-Wiener diversity index summarises these changes, and shows increasing diversity through to June and then falling in August (Fig. 12b). To determine if these changes reflect significant differences in the diet a t-test as proposed by Hutchenson (in Zar, 1974) was employed to test for significant differences in dietary diversity between adjacent months. The results of this test indicate that the diet of fish in April and June is not significantly different, but the months of January/February and August show significant differences to this period (Table 4).\n\nThe abundance of amphipods throughout the total sampling period drops considerable during June (Fig. 13a). This decrease is due to changes in the abundance of amphipod A, whereas the combined abundance of all other amphipods gradually increases throughout the season. Observation of the average size of amphipods (Fig. 13b) indicates little change in the size frequency distribution. This is confirmed by the observation of size frequency distributions of amphipods collected from benthic samples (Figs. 14,15). Shown alongside the size frequency distribution of amphipods in the environment is the size frequency of amphipods eaten by fish. A very marked shift to smaller amphipods in the diet of fish occurs in April and June (Fig. 14). This appears to be related to the abundance of amphipods available, although the observed total abundance of amphipods in benthic samples is not greatly different in February and April. For amphipod A alone (Fig. 15) the shift to smaller amphipods occurs in June and is more clearly related to the drop in abundance of these amphipods.\n\nThese changes are summarised in Figure 16. The number of fish eating amphipods and the number of amphipods eaten per fish show an increase in April and then a steady decrease to August (Fig. 16a,b). Amphipod A follows the above trend with the exception of June which shows a low frequency of occurrence and average number eaten. This corresponds with the low abundance of amphipods in June. The average size of amphipods eaten shows a decrease through to June and then returns to prior levels in August (Fig. 16c). These changes appear to be closely related to the changes in abundance of amphipod A (Fig. 13a), whilst all other amphipods show a steady increase throughout the sampling period. The fish appear to be switching from larger sizes of amphipod A to smaller sizes of other amphipods.\n\nThe relationship between prey abundance and numbers of each prey type eaten as shown in Figure 16 is not completely clear. When the abundance of amphipod A is compared with the proportion of amphipod A eaten a much clearer trend is apparent (Fig. 17).", "_____no_output_____" ], [ "## 5.4 Discussion", "_____no_output_____" ], [ "If the assumption of constant relative availability between different prey items is valid, the results obtained indicate that the abundance of prey items has a considerable affect on the diet of F. melanobranchus. The relationship is not a simple one involving the abundance of all prey items. Although the abundance of amphipods other than amphipod A show an increase throughout the sampling period, predation by gobies appears to be more keyed to the abundance of amphipod A. This may indicate that this amphipod is a preferred prey item.\n\nIn terms of optimal foraging theory, diversity of diet is predicted to increase as the abundance of preferred food items decreases. This prediction appears to have support from the data presented here. As the abundance of amphipod A declines so does the dietary diversity increase. The size of prey item shows a simular trend. As abundance of amphipod A decreases so does the average size of amphipod eaten. This is a result of the fish eating more number of amphipods other than amphipod A. As the other amphipods are generally smaller in size, the diet reflects this switch. Similar results have been obtained with planktivorous freshwater fish, where an active preference is shown for larger items. When the abundance of these falls, smaller items will be pursued (Bartell, 1982; Eggers, 1982).\n\nAs the size frequency of amphipods does not markedly change whilst abundance changes, the behaviour exhibited by F. melanobranchus indicates a density dependent aspect to predation for larger prey items. Evidence for a density dependent relationship is presented in Chapter 6, and will be further discussed in that section.\n\nThe habits of the various prey items may also be of relevance. Only one amphipod (AMA), has been identified as epifaunal. All other amphipods appear to be epibenthic or domicolous (see Appendix A). As amphipod A is epifaunal its availability to fish may be greater than the other amphipods (Nelson, 1979). This may be one possible causal factor in the preference of fish.\nAlthough the results appear to be consistent with theoretical predictions with respect to amphipods, the trends observed for harpacticoid and calanoid copepods do not appear to fit. These are relatively small prey items which if the above discussion is applicable should vary in dietary abundance in a similar manner to the small amphipods. Without knowledge of the changes in benthic abundance of these items it is difficult to propose any explanation. Bartell (1982), also notes that one prey item did not follow the trends as evidenced by other prey items.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
cb9849369590dd96b7f886d191f634fdc42c59ab
4,384
ipynb
Jupyter Notebook
Day_11_Lecture_2_Assignment.ipynb
Lucnalie/Thinkful
1e451134e9d5b1d151bc1de4c3f14b7cbe5cab81
[ "MIT" ]
null
null
null
Day_11_Lecture_2_Assignment.ipynb
Lucnalie/Thinkful
1e451134e9d5b1d151bc1de4c3f14b7cbe5cab81
[ "MIT" ]
null
null
null
Day_11_Lecture_2_Assignment.ipynb
Lucnalie/Thinkful
1e451134e9d5b1d151bc1de4c3f14b7cbe5cab81
[ "MIT" ]
null
null
null
37.470085
346
0.57208
[ [ [ "<a href=\"https://colab.research.google.com/github/Lucnalie/Thinkful/blob/master/Day_11_Lecture_2_Assignment.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# Assignment\n\nFor this assignment, you are going to apply statistical inference on [Titanic dataset](https://tf-assets-prod.s3.amazonaws.com/tf-curric/data-science/titanic.csv).\n\n\nTry to answer the following questions. Include a hypothesis and the test assumptions. **If the assumptions are valid**, try to conduct the t-test and addditionally calculate manually the 95% confidence interval which is derived from the hypothesis that you try to evaluate. Also try to come up with some valid findings from the data.\n\n\n\n1. Is it true that the younger a passenger was, they had more chance of survival? Is this difference statistically important? \n2. Is there a correlation between fare price and survival? Again, is this difference statistically important?\n\nAs you are conducting the analysis, consider what other variables may be accounting for any significant differences you find. ", "_____no_output_____" ], [ "##### More specifically, the tasks that you need to enounter in your deliverables are:\n\n1. Familiarize yourself with the dataset. Review the structure of it and focus on the fields necessary to answer the above mentioned questions.\n\n2. State the null and the alternative hypotheses that address the questions.\n\n3. Manipulate the dataset accordingly in order to prepare it for the statistical tests.\n\n4. Perform an exploratory analysis with summary statistics and corresponding visualizations, in order to gain a clearer view about the distributions of the samples. Focus on their differences.\n\n5. Perform the necessary normality checks in order to make sure that you can conduct the statistical tests.\n\n6. Apply t-test statistic if you pass successfully the normality checks\n\n7. For this exercise, you will also need to write a function to calculate the confidence interval using the standard error for comparing two means (used in t-statistic):\n\n$$\n \\bar{X_1} - \\bar{X_2} \\pm z*\\sqrt{\\dfrac{s_1^2}{n_1} + \\dfrac{s_2^2}{n_2}}\n$$\n\n- $z$ is the critical value\n- $s_1$ sample 1 standard deviation\n- $s_2$ sample 2 standard deviation\n\n- Let $z=1.96$ for a 95% confidence level\n\n- Use the following method signature:\n\n `def get_95_ci(data1, data2)`\n \n\n8. Elaborate on the results of your analyses.", "_____no_output_____" ] ], [ [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ] ]
cb98495860f09a9b252e36880d5d8f7851cbba16
835
ipynb
Jupyter Notebook
report/header.ipynb
OSU-CS-325/Project_Two_Coin_Change
266b415c876614ec83f4b9d7c6e157cb57ccc42a
[ "MIT" ]
null
null
null
report/header.ipynb
OSU-CS-325/Project_Two_Coin_Change
266b415c876614ec83f4b9d7c6e157cb57ccc42a
[ "MIT" ]
1
2016-10-25T18:23:53.000Z
2016-10-26T01:43:56.000Z
report/header.ipynb
OSU-CS-325/Project_Two_Coin_Change
266b415c876614ec83f4b9d7c6e157cb57ccc42a
[ "MIT" ]
null
null
null
16.372549
34
0.493413
[ [ [ "## Project Two: Coin Change", "_____no_output_____" ], [ "### Group 12", "_____no_output_____" ], [ "#### Group Members\n\n* Kyle Guthrie\n* Michael C. Stramel\n* Alex Miranda", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown" ] ]
cb984a4e6d750c501e348c91b26400b643db275c
108,251
ipynb
Jupyter Notebook
segment_analytics_tool.ipynb
braze-inc/segment_analytics_tool
fecf4813b52f3f5f4d83ea74d3b27fb600d2e7ba
[ "MIT" ]
2
2022-03-29T13:26:46.000Z
2022-03-29T15:58:40.000Z
segment_analytics_tool.ipynb
braze-inc/segment_analytics_tool
fecf4813b52f3f5f4d83ea74d3b27fb600d2e7ba
[ "MIT" ]
null
null
null
segment_analytics_tool.ipynb
braze-inc/segment_analytics_tool
fecf4813b52f3f5f4d83ea74d3b27fb600d2e7ba
[ "MIT" ]
null
null
null
52.44719
678
0.611994
[ [ [ "# Packages", "_____no_output_____" ] ], [ [ "#!/usr/bin/env python\n# coding: utf-8\nimport requests\nimport numpy as np\nimport json\nimport os\nimport time as tm\nimport pandas as pd\nimport http.client\nimport io\nimport boto3\nimport zipfile\nfrom threading import Thread\nimport logging\nfrom datetime import datetime\nimport time\nfrom operator import itemgetter\nimport xlsxwriter", "_____no_output_____" ] ], [ [ "# Define API Call Variables\n\nIn the following codeblock you'll need to input data for the following variables:\n\n***app_group_api_key*** - This is the string that allows you to access the API calls for an App Group. The App Group API Key can be found in the Braze Dashboard under Settings -> Developer Console -> Rest API Keys. An App Group API key that does not have sufficient access grants may result in an error.\n\n***API_URL*** - This is the URL used to make the Rest API Call.The current value of 'https://rest.iad-01.braze.com' may need to be updated to match the Cluster of your Braze instance. For example, your the cluster of your Braze instance may be 02, and you would update the url to 'https://rest.iad-02.braze.com'. You can find the integer value for the API URL by checking the same value next to \"dashboard-0\" in the URL you use to access the Braze Dashboard.\n\n***EXPORT_DATE*** - This field is optional, only if you have run the segment export on a prior date to the same S3 bucket. It can be left blank and will export the most recent user profile data for the selected SEGMENT_ID. If not, enter a date when the export was run previously in the following format: 'YYYY-MM-DD'. All other date formats will fail to return results\n\n***SEGMENT_ID*** - This is the Segment API Identifier used to return user data from the segment for the API call. This script can only return results for one segment at a time, and it is recommmended that the segment have no more than 200k users due to hardware limitations that were verified during testing. The Segment API Identifier can be found in the Braze Dashboard under Settings -> Developer Console -> Additional API Identifiers. Under the dropdown menu select 'Segments' and then click the 'Search for a value' dropdown to see a list of segments. Select the segement name that you wish to return results for and copy the value listed under \"API Identifier\".\n\n***The App Group API Key and Segment ID should never be shared outside of your organization, or be saved in a publically accessible workspace.***", "_____no_output_____" ] ], [ [ "app_group_api_key = \nnow = datetime.now().strftime(\"%Y-%m-%d\")\nAPI_URL = \"https://rest.iad-01.braze.com\"\nEXPORT_DATE = []\nSEGMENT_ID =\n\nREQUEST_HEADERS = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + app_group_api_key\n}\n\nFIELDS_TO_EXPORT = [\"braze_id\", \"custom_attributes\", \"country\", \"total_revenue\", \"push_subscribe\",\n \"email_subscribe\", \"custom_events\", \"purchases\", \"devices\", \"created_at\", \"apps\",\n \"campaigns_received\", \"canvases_received\", \"cards_clicked\", \"push_tokens\"]\n", "_____no_output_____" ] ], [ [ "# Define S3 Client Variables & Initializing the S3 Client\n\nThe codeblock below will initialize the client for Amazon S3 once the following values for the following variables have been added:\n\n***access_key*** - Listed under \"AWS Access ID\"\n\n***secret_key*** - Listed under \"AWS Secret Access Key\"\n\n***region_name*** - The region that your S3 bucket is listed under\n\n***user_export_bucket_name*** - The name of the S3 storage bucket that you would like to store the User Profile Export in. \n\nAll of these values, with the exception of the user_export_bucket_name can be found in the Braze Dashboard under \"Integrations\" -> \"Technology Partners\" -> \"AWS Cloud Storage\" -> \"Data Export Using AWS Credentials\".\n\nIf there are no values currently listed in this section of the Braze Dashboard, you will need to work with your System Admin to either create them for the first time, or access them. In the event that you are using MFA for AWS S3, you will need to create an account that does not require the use of MFA, as otherwise the export will fail.\n\n***This script will not function without the proper integration between Braze and Amazon S3. While it is possible to modify the script so that the files are returned to your local machine, that functionality requires additional development.*** \n\n*You can test your credentials by entering the proper values under 'AWS Access Key', 'AWS Secret Access Key' and 'AWS S3 Bucket Name' and then press 'Test Credentials'. If you see a success message, press save. If you do not see the success message, you'll need to work with your System Admin. to create an account and S3 bucket with the correct access controls.*\n\n**Necessary to point out**: Keep in mind costs related to a high amount of `GET` requests for the user profiles. While these costs are minimal, S3 storage is not free, so keep that in mind before making a high volume of API requests.\n\nOnce the S3 credentials have been tested and verified via the Braze Dashboard, you should be all set to store files from the `POST` request for the [User Profiles by Semgent endpoint](https://www.braze.com/docs/api/endpoints/export/user_data/post_users_segment/).\n\nAfter the variables have been entered, the S3 client will be initialized, and functions will be created so that the ZIP files returned from the API request to the S3 bucket can be processed and transformed into a pandas dataframe.", "_____no_output_____" ] ], [ [ "access_key = \nsecret_key = \nregion_name = \nuser_export_bucket_name = \n\ns3 = boto3.resource(\n service_name='s3',\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n region_name=region_name\n)\nuser_export_bucket = s3.Bucket(user_export_bucket_name)", "_____no_output_____" ] ], [ [ "# Segment List Endpoint\n\nHere we'll call the [Segment List API Endpoint](https://www.braze.com/docs/api/endpoints/export/segments/get_segment/) in order to return some data needed to build the dataframe and later to return user data from that segment.", "_____no_output_____" ] ], [ [ "page, finished = 0, False\nbraze_segments = []\nwhile True:\n endpoint = f\"{API_URL}/segments/list?page={page}&sort_direction=desc\"\n results = requests.get(endpoint, headers=REQUEST_HEADERS).json()['segments']\n if not results:\n break\n braze_segments.extend(results)\n page += 1\n\nbraze_segments_df = pd.DataFrame(braze_segments)\nbraze_segments_df.columns = ['segment_id', 'segment_name',\n 'segment_analytics_tracking_enabled', 'segment_tags']\n\nbraze_segments_df = braze_segments_df[braze_segments_df['segment_id'] == SEGMENT_ID]", "_____no_output_____" ] ], [ [ "# Defining Functions to Process User Profiles Stored in S3", "_____no_output_____" ] ], [ [ "def process_s3_profiles_to_dataframe(objects):\n \"\"\"Build a DataFrame chunk by chunk and return it.\n\n Temporary function for testing efficiency of building a DataFrame as we go. \n There are number of great hosting solutions for us but most come with memory limit. Wanted to leave this function\n for troubleshooting potential memory issues there.\n\n Parameters\n ----------\n objects: s3.ObjectSummary\n S3 object iterator returned from `bucket.objects.filter` method\n\n Returns\n -------\n pd.DataFrame\n New dataframe with exported user profile from the selected objects\n \"\"\"\n\n frame_chunks = []\n for obj in objects:\n segment_id = obj.key.split('/')[1]\n with io.BytesIO(obj.get()[\"Body\"].read()) as zip_bytes:\n user_chunk = process_s3_zip_object(zip_bytes, segment_id)\n frame_chunk = pd.DataFrame(user_chunk)\n frame_chunks.append(frame_chunk)\n return pd.concat(frame_chunks, ignore_index=True)\n\n\ndef process_s3_profiles(objects, user_data):\n \"\"\"Extract and process zip user profiles obtained from user segment export.\n\n Parameters\n ----------\n objects : s3.ObjectSummary\n S3 object iterator returned from `bucket.objects.filter` method\n user_data : list\n Store for the extracted profile objects\n \"\"\"\n\n for obj in objects:\n segment_id = obj.key.split('/')[1]\n with io.BytesIO(obj.get()[\"Body\"].read()) as zip_bytes:\n user_chunk = process_s3_zip_object(zip_bytes, segment_id)\n user_data.extend(user_chunk)\n\n\ndef process_s3_zip_object(zip_bytes, segment_id):\n \"\"\"Extract the zip file contents and process each text file within that zip file. \n Text files extracted contain user data JSONs, separated by new line.\n\n Parameters\n ----------\n zip_bytes : io.BytesIO\n segment_id : string\n\n Returns\n -------\n list \n Extracted user profile dictionaries from the zip file\n \"\"\"\n\n profiles = []\n with zipfile.ZipFile(zip_bytes) as open_zip:\n for user_file in open_zip.namelist():\n with open_zip.open(user_file) as users:\n for line in users:\n user_profile = json.loads(line.decode('utf-8'))\n user_profile['segment_id'] = segment_id\n profiles.append(user_profile)\n return profiles", "_____no_output_____" ] ], [ [ "# Define Functions for Processing Campaign Data\n\nThe below codeblock:defines functions to enable `GET` requests from the [Campaign Details Endpoint](https://www.braze.com/docs/api/endpoints/export/campaigns/get_campaign_details/) for one or many campaign_ids. It also creates functions to enable the creation of the Channel Combo and Custom Events used in Campaigns.\n\nThe URL may need to be updated in the same manner as above following 'iad-0' depending on the cluster of your Braze Instance. For example, you may need to update the string \"https://rest.iad-01.braze.com/campaigns/\" to \"https://rest.iad-02.braze.com/campaigns/\" if the Cluster for your Braze instance is 02.\n\nThe MAX_RETRIES variable is the number of times that the script will attempt to make a request to the the API Endpoint. If the number is increased the script will take longer to return results from the Campaign Details Endpoint.", "_____no_output_____" ] ], [ [ "MAX_RETRIES=3\n\ndef process_campaign_id(campaign_id, endpoint):\n requests_made = 0\n while requests_made < MAX_RETRIES:\n try:\n response = requests.get(\"https://rest.iad-01.braze.com/campaigns/\"+endpoint+\"?campaign_id=\"+campaign_id, headers=REQUEST_HEADERS\n )\n return response.json()\n except requests.exceptions.HTTPError:\n requests_made += 1\n tm.sleep(0.5)\n if requests_made >= MAX_RETRIES:\n raise\n\n\n\n### processes a range of ids\ndef process_campaign_id_range(campaign_id_range, endpoint, store=None):\n \"\"\"process a number of ids, storing the results in a dict\"\"\"\n if store is None:\n store = {}\n for campaign_id in campaign_id_range:\n store[campaign_id] = process_campaign_id(campaign_id, endpoint)\n return store\n\n\n\ndef threaded_process_campaigns(nthreads, campaign_id_range, endpoint):\n \"\"\"process the id range in a specified number of threads\"\"\"\n try:\n store = {}\n threads = []\n for i in range(nthreads):\n campaign_ids = campaign_id_range[i::nthreads]\n t = Thread(target=process_campaign_id_range,\n args=(campaign_ids, endpoint, store))\n threads.append(t)\n\n for t in threads:\n t.start() \n \n for t in threads:\n t.join()\n\n return store\n\n except Exception as e:\n logging.error(\"Threading exception: \"+str(e))\n tm.sleep(30)\n\n\ndef get_campaign_id(df_column):\n try:\n return df_column.get('api_campaign_id')\n except AttributeError:\n return float('NaN')\n\n\ndef get_message_variation_id(df_column):\n try:\n return df_column.get('variation_api_id')\n except AttributeError:\n return float('NaN')\n\n\ndef parse_channel(row):\n if row.num_channels > 0:\n return row.channel\n elif type(row.campaigns_received) != dict:\n return \"No Messages Received\"\n else:\n return \"Unable to Retrieve Campaign Details\"\n\ndef parse_channel_combo(row):\n if type(row.channel_combo) != float:\n return row.channel_combo\n elif row.channel == \"No Messages Received\":\n return \"No Messages Received\"\n else:\n return \"Unable to Retrieve Campaign Details\"\n\n\ndef get_campaign_custom_event(df_column):\n try:\n return df_column.get('custom_event_name')\n except AttributeError:\n return float('NaN')", "_____no_output_____" ] ], [ [ "# Define Field Getters to Enable Segment Analytics\n\nThe functions defined in the codeblocks below will get the corresponding fields from nested dictionaries stored in dataframes columns that are returned from the User Profiles Endpoint.", "_____no_output_____" ] ], [ [ "def get_email_open_engagement(df_column):\n try:\n return df_column.get('opened_email')\n except AttributeError:\n return False\n\n\ndef get_email_click_engagement(df_column):\n try:\n return df_column.get('clicked_email')\n except AttributeError:\n return False\n\n\ndef get_push_engagement(df_column):\n try:\n return df_column.get('opened_push')\n except AttributeError:\n return False\n\n\ndef get_iam_engagement(df_column):\n try:\n return df_column.get('clicked_in_app_message')\n except AttributeError:\n return False\n\n\ndef get_conversions(df_column):\n try:\n return df_column.get('converted')\n except AttributeError:\n return False\n\n### Create get engagement\n\n\ndef calc_engagement(series):\n return series.sum()/series.count()\n\n\ndef get_cards_clicked(row):\n if row.channel == 'No Messages Received':\n return 0\n else:\n return len(row.cards_clicked)", "_____no_output_____" ], [ "def days_between(d1, d2):\n d1 = datetime.datetime.strptime(str(d1), '%Y-%m-%dT%H:%M:%S.%f%z')\n d2 = datetime.datetime.strptime(str(d2), '%Y-%m-%dT%H:%M:%S.%f%z')\n return (d2 - d1).days\n\n\ndef get_custom_event_name(df_column):\n try:\n return df_column.get('name')\n except AttributeError:\n return float('NaN')\n\n\ndef get_custom_event_count(df_column):\n try:\n return df_column.get('count')\n except AttributeError:\n return float('NaN')\n\n\ndef get_custom_event_first_date(df_column):\n try:\n return df_column.get('first')\n except AttributeError:\n return float('NaN')\n\n\ndef get_custom_event_last_date(df_column):\n try:\n return df_column.get('last')\n except AttributeError:\n return float('NaN')", "_____no_output_____" ], [ "def get_notifications_enabled(df_column):\n try:\n return df_column.get('notifications_enabled')\n except AttributeError:\n return False\n\n\ndef get_token(df_column):\n try:\n return df_column.get('token')\n except AttributeError:\n return 'None'\n\n\ndef get_platform(df_column):\n try:\n return df_column.get('platform')\n except AttributeError:\n return 'No platform token'", "_____no_output_____" ] ], [ [ "# Export Data from S3 for either Today or a Prior Date\n\nThe below codeblock will do the following depending on the value entered above for the EXPORT_DATE variable:\n\n***If the EXPORT_DATE is left blank***:\n\n- Make a request to the [Users by Segment Endpoint](https://www.braze.com/docs/api/endpoints/export/user_data/post_users_segment/)\n\n- Process the user profile data returned to S3 following the successful request for the selected SEGMENT_ID\n\n- Displays the number of user profiles that have been returned.\n\nIf the value returned is 0 it is likely that some of the above variables were not configured properly. You'll need to double check and try again.\n\nIf the number of user profiles exported is low, it could be because of latency between the Braze API and S3. Try running the code block again and see if the number of users returned increases\n\n***If the EXPORT_DATE is a properly formatted data from a prior export***\n\n- Process the user profile data returned to S3 following the successful request for the selected SEGMENT_ID\n\n- Displays the number of user profiles that have been returned.\n\nIf the EXPORT_DATE is not formatted 'YYYY-MM-DD' the below codeblock will fail and you will be asked to try again.\n\nIf completed successfully, the segment_df data should return a dataframe for all user profiles from the segment, along with data from the fields listed out in the *FIELDS_TO_EXPORT* variable. Each row in the dataframe corresponds to one user profile within the selected segment.", "_____no_output_____" ] ], [ [ "if len(EXPORT_DATE) == 0:\n object_prefix_by_segment_id = []\n payload = {\n \"segment_id\": SEGMENT_ID,\n \"fields_to_export\": FIELDS_TO_EXPORT\n }\n res = requests.post(f\"{API_URL}/users/export/segment\",\n headers=REQUEST_HEADERS, json=payload)\n res_data = res.json()\n print(res_data)\n EXPORT_DATE = datetime.today().strftime('%Y-%m-%d')\n objects = user_export_bucket.objects.filter(\n Prefix=f\"segment-export/{SEGMENT_ID}/{EXPORT_DATE}\")\n tm.sleep(300)\n print(\"Waiting for data to be returned from the Users by Segment Endpoint.\")\n start = time.time()\n user_data = []\n print(\"Reading exported user data from S3\")\n process_s3_profiles(objects, user_data)\n print(f\"Took {(time.time() - start):.2f}s\")\n print(len(user_data))\nelif len(EXPORT_DATE) == 10 and EXPORT_DATE.count('-') == 2 and len(EXPORT_DATE) > 0:\n year, month, day = EXPORT_DATE.split('-')\n isValidDate = True\n try:\n datetime(int(year), int(month), int(day))\n except ValueError:\n print(\"Input date is not the valid YYYY-MM-DD format. Please return to the Define Variables cell and try again enter a properly formatted Date.\")\n isValidDate = False\n if(isValidDate):\n objects = user_export_bucket.objects.filter(\n Prefix=f\"segment-export/{SEGMENT_ID}/{EXPORT_DATE}\")\n start = time.time()\n user_data = []\n print(\"Reading exported user data from S3\")\n process_s3_profiles(objects, user_data)\n print(f\"Took {(time.time() - start):.2f}s\")\n print(len(user_data))\nelse:\n print(\"This is the text that will display if export date is neither blank nor properly formatted.\")", "_____no_output_____" ], [ "segment_df_raw = pd.DataFrame(user_data)\\\n .dropna(subset=['braze_id'])\n\n\nsegment_df = pd.merge(segment_df_raw, braze_segments_df,\n how='left',\n left_on=['segment_id'],\n right_on=['segment_id'],\n suffixes=['_from_user_segment_endpoint', '_from_segment_list'])", "_____no_output_____" ] ], [ [ "# Creating Separate Dataframes for Each KPI\n\nThe below codeblock will split the segment_df into the appropriate dataframes so that the following analytical outputs can be viewed from the selected Segment:\n\n1. Rolling Retention\n2. Purchasing Rates & \n3. Purchase Retention\n4. Session Engagement Metrics\n5. Custom Event Metrics\n5. Message Engagement Rates\n6. Custom Events used in Campaigns\n7. Opt-In Rates for Push and Email\n\n1-5 will also be crossed by the following dimensions from Message Engagement so that the impact of different messaging strategies can be viewed at the segement level:\n\n- Channel\n- Channel Combo\n- Campaign Tag\n\nIn the event that a segment fails one of the checks below, you can skip those sections in the script. For example, say you are tracking session data, but not purchasing data. Skip the purchasing codeblocks and comment out the final outputs associated with those metrics.", "_____no_output_____" ] ], [ [ "rolling_retention_columns = ['braze_id', 'segment_id',\n 'apps', 'segment_name', 'segment_tags']\npurchasing_stats_columns = ['braze_id',\n 'segment_id', 'apps', 'segment_name', 'segment_tags', 'purchases', 'total_revenue']\nsessions_stats_columns = ['braze_id', 'segment_id',\n 'apps', 'segment_name', 'segment_tags']\ncustom_events_stats_columns = ['braze_id', 'segment_id', 'apps',\n 'segment_name', 'segment_tags', 'custom_events']\nengagement_stats_columns_all = ['braze_id', 'segment_id', 'country', 'apps', 'segment_name',\n 'segment_tags', 'campaigns_received', 'canvases_received', 'cards_clicked']\nengagement_stats_columns_canvas = ['braze_id', 'segment_id', 'country', 'apps',\n 'segment_name', 'segment_tags', 'canvases_received', 'cards_clicked']\nengagement_stats_columns_campaigns = ['braze_id', 'segment_id', 'country', 'apps',\n 'segment_name', 'segment_tags', 'campaigns_received', 'cards_clicked']\nopt_ins_columns_all = ['braze_id', 'segment_id', 'segment_name', 'apps', 'push_tokens', 'email_subscribe', 'email_opted_in_at', 'push_subscribe',\n 'push_opted_in_at', 'email_unsubscribed_at', 'push_unsubscribed_at']\nopt_ins_columns_email = ['braze_id', 'segment_id', 'segment_name', 'apps', 'push_tokens',\n 'email_subscribe', 'email_opted_in_at', 'email_unsubscribed_at']\nopt_ins_columns_push = ['braze_id', 'segment_id', 'segment_name', 'apps', 'push_tokens',\n 'push_subscribe', 'push_opted_in_at', 'push_unsubscribed_at']\n\nusers_have_sessions = \"apps\" in segment_df\nusers_have_purchases = \"purchases\" in segment_df\nusers_have_custom_events = \"custom_events\" in segment_df\nusers_received_campaigns = \"campaigns_received\" in segment_df\nusers_received_canvas = \"canvases_received\" in segment_df\nusers_subscribed_email = \"email_subscribe\" in segment_df\nusers_subscribed_push = \"push_subscribe\" in segment_df\n\n\nif users_have_sessions == True:\n segment_rolling_retention_pre_apps = segment_df[rolling_retention_columns]\n segment_rolling_retention_pre_apps = segment_rolling_retention_pre_apps.reset_index()\nelse:\n print(\"Users in these Segments do not have Retention Data\")\n\nif users_have_purchases == True:\n segment_purchasing_stats_pre_apps = segment_df[purchasing_stats_columns]\n segment_purchasing_stats_pre_apps = segment_purchasing_stats_pre_apps.reset_index()\nelse:\n print(\"Users in these Segments do not have Purchasing Data\")\n\nif users_have_sessions == True:\n segment_sessions_stats_pre_apps = segment_df[sessions_stats_columns]\n segment_sessions_stats_pre_apps = segment_sessions_stats_pre_apps.reset_index()\nelse:\n print(\"Users in these Segments do not have Session Data\")\n\nif users_have_custom_events == True:\n segment_custom_event_stats_pre_custom_event = segment_df[custom_events_stats_columns]\n segment_custom_event_stats_pre_custom_event = segment_custom_event_stats_pre_custom_event.reset_index()\nelse:\n print(\"Users in these Segments do not have Custom Event Data\")\n\nif (users_received_campaigns == True and users_received_canvas == True):\n segment_engagement_stats_pre_apps = segment_df[engagement_stats_columns_all]\nelif (users_received_campaigns == False and users_received_canvas == True):\n segment_engagement_stats_pre_apps = segment_df[engagement_stats_columns_canvas]\nelif (users_received_campaigns == True and users_received_canvas == False):\n segment_engagement_stats_pre_apps = segment_df[engagement_stats_columns_campaigns]\nelif (users_received_campaigns == False and users_received_canvas == False):\n print(\"Users in these Segments do not have Engagement Data\")\n\nif (users_subscribed_email == True and users_subscribed_push == True):\n segment_opt_in_stats_pre_apps = segment_df[opt_ins_columns_all]\nelif (users_subscribed_email == False and users_subscribed_push == True):\n segment_opt_in_stats_pre_apps = segment_df[users_subscribed_push]\nelif (users_subscribed_email == True and users_subscribed_push == False):\n segment_opt_in_stats_pre_apps = segment_df[users_subscribed_email]\nelif (users_subscribed_email == False and users_subscribed_push == False):\n print(\"Users in these Segments do not have Opt-In Data\")", "_____no_output_____" ] ], [ [ "# Campaign & Engagement Data\n\nThe below codeblocks will complete the following tasks:\n\n- Return all of the campaign ids received by the exported Segment\n\n- Send `GET` results from the [Campaign Details API](https://www.braze.com/docs/api/endpoints/export/campaigns/get_campaign_details/#campaign-details-endpoint-api-response) and process the data that is returned.\n\n - Users that received messages from campaign_ids that do not have details returned will be assigned the 'Unable to Retrieve Campaign Details' value for both Channel and Channel Combo.\n\n- Create the Channel_Combo dimension. Please note that the Channel Combo is being created at the Campaign Level and not the User Level. \n\n- Removing Users in the control_group for multivariate campaigns\n\n- Cleaning the Channel names and Channel Combo names\n\n- Creating the dataframe used to caclulate Message Engagement Metrics\n\n- Creating dataframes used to cross other metrics with Channel, Channel Combo, and Campaign Tag\n", "_____no_output_____" ] ], [ [ "segment_engagement_temp = segment_engagement_stats_pre_apps.explode(\n 'campaigns_received')\nsegment_engagement_temp['campaign_id'] = list(\n map(get_campaign_id, segment_engagement_temp['campaigns_received']))\n\nbraze_campaigns = segment_engagement_temp[segment_engagement_temp['campaign_id'].isnull(\n) == False]['campaign_id']\nbraze_campaigns = list(set(braze_campaigns))", "_____no_output_____" ], [ "campaign_dict = threaded_process_campaigns(\n 10, braze_campaigns, 'details')", "_____no_output_____" ], [ "campaign_details_df = pd.DataFrame.from_dict(campaign_dict, orient='index')\ncampaign_details_df = campaign_details_df.reset_index()\ncampaign_details_df.rename(columns={\"index\": \"campaign_id\"},\n inplace=True)\ncampaign_details_df = campaign_details_df[campaign_details_df['message'] == 'success']\ncampaign_details_df['num_channels'] = campaign_details_df.channels.apply(len)\ncampaign_details_df = campaign_details_df[campaign_details_df['num_channels'] > 0]", "_____no_output_____" ], [ "joined_campaign = pd.merge(segment_engagement_temp, campaign_details_df,\n how='left',\n left_on=['campaign_id'],\n right_on=['campaign_id'],\n suffixes=['_from_segments', '_from_campaigns'])\nsegment_data_engagement_stats_temp = joined_campaign\nsegment_data_engagement_stats_temp.rename(columns={\"channels\": \"channel\"},\n inplace=True)\nsegment_data_engagement_stats_temp['in_control']=segment_data_engagement_stats_temp.campaigns_received.apply(\n lambda x: x.get('in_control') if type(x) != float else x)\nsegment_data_engagement_stats_temp=segment_data_engagement_stats_temp[segment_data_engagement_stats_temp['in_control']!=True]\n\nsegment_data_engagement_stats_temp.loc[:, 'channel'] = segment_data_engagement_stats_temp.apply(\n parse_channel, axis=1)\nsegment_data_engagement_stats_temp = segment_data_engagement_stats_temp.explode(\n 'channel')", "_____no_output_____" ], [ "segment_data_engagement_stats_temp['channel'] = segment_data_engagement_stats_temp.channel.apply(\n lambda x: 'mobile_push' if x == 'android_push' or x == 'ios_push' else x)\nsegment_data_engagement_stats_temp['channel'] = segment_data_engagement_stats_temp.channel.apply(\n lambda x: 'in_app_message' if x == 'legacy_in_app_message' or x == 'trigger_in_app_message ' or x == 'trigger_in_app_message' else x)\nsegment_data_engagement_stats_temp['channel'] = segment_data_engagement_stats_temp.channel.apply(\n lambda x: x.replace(\"_\", \" \"))\nsegment_data_engagement_stats_temp['channel'] = segment_data_engagement_stats_temp.channel.apply(\n lambda x: x.title())", "_____no_output_____" ], [ "segment_data_channel_combo = segment_data_engagement_stats_temp[(segment_data_engagement_stats_temp['channel'] != 'No Messages Received')]\nsegment_data_channel_combo = segment_data_engagement_stats_temp[segment_data_engagement_stats_temp['channel'] != 'Unable To Retrieve Campaign Details']\nsegment_data_channel_combo = segment_data_channel_combo[[\n 'braze_id', 'channel']].drop_duplicates()\nsegment_data_channel_combo = segment_data_channel_combo.dropna(subset=[\n 'channel'])\nsegment_data_channel_combo = segment_data_channel_combo.groupby('braze_id')\nsegment_data_channel_combo = segment_data_channel_combo.apply(\n lambda x: x['channel'].unique()).reset_index()\nsegment_data_channel_combo.columns = ['braze_id', 'channel_combo']\nsegment_data_channel_combo['channel_combo'] = segment_data_channel_combo.channel_combo.apply(\n lambda x: np.ndarray.tolist(x))\nsegment_data_channel_combo['channel_combo'] = segment_data_channel_combo['channel_combo'].apply(\n lambda x: list(set(x)))\nsegment_data_channel_combo['channel_combo'] = segment_data_channel_combo.channel_combo.apply(\n sorted)\nsegment_data_channel_combo['channel_combo'] = [\n ', '.join(map(str, l)) for l in segment_data_channel_combo['channel_combo']]\nsegment_data_channel_combo = segment_data_channel_combo.drop_duplicates()", "_____no_output_____" ], [ "segment_data_engagement_stats = pd.merge(segment_data_engagement_stats_temp, segment_data_channel_combo,\n how='left',\n left_on=['braze_id'],\n right_on=['braze_id'],\n suffixes=['_from_engagement', '_from_channel_combo'])", "_____no_output_____" ], [ "segment_data_engagement_stats.loc[:, 'channel_combo'] = segment_data_engagement_stats.apply(\n parse_channel_combo, axis=1)\n\nusers_per_channel_df = segment_data_engagement_stats.groupby(\n ['segment_name', 'segment_id','channel']).agg(num_users=('braze_id', 'nunique'))\nusers_per_channel_df = users_per_channel_df.reset_index(level=[0, 1, 2])\nusers_per_channel_combo_df = segment_data_engagement_stats.groupby(\n ['segment_name', 'segment_id','channel_combo']).agg(num_users=('braze_id', 'nunique'))\nusers_per_channel_combo_df = users_per_channel_combo_df.reset_index(level=[\n 0, 1, 2])\nusers_per_campaign_tags_df = segment_data_engagement_stats.explode('tags')\nusers_per_campaign_tags_df['tags'] = users_per_campaign_tags_df.tags.fillna(\n 'No Messages')\nusers_per_campaign_tags_df = users_per_campaign_tags_df.groupby(\n ['segment_name', 'segment_id','tags']).agg(num_users=('braze_id', 'nunique'))\nusers_per_campaign_tags_df = users_per_campaign_tags_df.reset_index(level=[\n 0, 1, 2])", "_____no_output_____" ] ], [ [ "# Calculate Engagement\n\nThe below codeblocks will return Messge Engagement rates for all channels. If the segment did not receive a channel it will simply return a value of zero under the engagement metric.\n\nThe following Message Engagement Rates will be returned:\n\n- Number of Users\n\n- Email Open Rate\n\n- Email Click Rate\n\n- Push Open Rate\n\n- In-App Message Click Rate\n\n- Message Conversion Rates (of all Conversion Criteria)\n\n- Content Card Click Rate\n\nMessage Engagement Rates will be returned by:\n\n- Segment\n\n- Channel\n\n- Channel Combo\n\n- Campaign Tag", "_____no_output_____" ] ], [ [ "segment_data_engagement_stats['campaign_engaged'] = segment_data_engagement_stats.campaigns_received.apply(\n lambda x: x.get('engaged') if type(x) != float else x)\nsegment_data_engagement_stats['opened_email'] = list(\n map(get_email_open_engagement, segment_data_engagement_stats['campaign_engaged']))\nsegment_data_engagement_stats['clicked_email'] = list(map(\n get_email_click_engagement, segment_data_engagement_stats['campaign_engaged']))\nsegment_data_engagement_stats['opened_push'] = list(\n map(get_push_engagement, segment_data_engagement_stats['campaign_engaged']))\nsegment_data_engagement_stats['clicked_iam'] = list(\n map(get_iam_engagement, segment_data_engagement_stats['campaign_engaged']))\nsegment_data_engagement_stats['converted'] = list(\n map(get_conversions, segment_data_engagement_stats['campaigns_received']))\nsegment_data_engagement_stats['converted'] = segment_data_engagement_stats.converted.fillna(\n value=False)\nsegment_data_engagement_stats['cards_clicked'] = segment_data_engagement_stats.cards_clicked.fillna(\n value='')\nsegment_data_engagement_stats.loc[:, 'cards_clicked'] = segment_data_engagement_stats.apply(\n get_cards_clicked, axis=1)", "_____no_output_____" ], [ "engagement_by_segment_preagg = segment_data_engagement_stats.groupby(\n ['segment_name', 'segment_id'])\nengagement_by_segment = engagement_by_segment_preagg.agg(\n num_users=('braze_id', 'nunique'), email_open_rate=('opened_email', calc_engagement), email_click_rate=('clicked_email', calc_engagement),\n push_open_rate=('opened_push', calc_engagement), iam_click_rate=('clicked_iam', calc_engagement), conversion_rate=('converted', calc_engagement), content_card_click_rate=('cards_clicked', calc_engagement))", "_____no_output_____" ], [ "engagement_by_segment_and_channel_preagg = segment_data_engagement_stats.groupby(\n ['segment_name', 'segment_id', 'channel'])\nengagement_by_segment_and_channel = engagement_by_segment_and_channel_preagg.agg(\n num_users=('braze_id', 'nunique'), email_open_rate=('opened_email', calc_engagement), email_click_rate=('clicked_email', calc_engagement),\n push_open_rate=('opened_push', calc_engagement), iam_click_rate=('clicked_iam', calc_engagement), conversion_rate=('converted', calc_engagement), content_card_click_rate=('cards_clicked', calc_engagement))", "_____no_output_____" ], [ "engagement_by_segment_and_channel_combo_preagg = segment_data_engagement_stats.groupby(\n ['segment_name', 'segment_id', 'channel_combo'])\nengagement_by_segment_and_channel_combo = engagement_by_segment_and_channel_combo_preagg.agg(\n num_users=('braze_id', 'nunique'), email_open_rate=('opened_email', calc_engagement), email_click_rate=('clicked_email', calc_engagement),\n push_open_rate=('opened_push', calc_engagement), iam_click_rate=('clicked_iam', calc_engagement), conversion_rate=('converted', calc_engagement), content_card_click_rate=('cards_clicked', calc_engagement))\nengagement_by_segment_and_channel_combo = engagement_by_segment_and_channel_combo", "_____no_output_____" ], [ "segment_data_engagement_stats_by_campaign_tags = segment_data_engagement_stats.explode(\n 'tags')\nengagement_by_segment_and_campaign_tag_preagg = segment_data_engagement_stats_by_campaign_tags.groupby([\n 'segment_name', 'segment_id', 'tags'])\nengagement_by_segment_and_campaign_tag = engagement_by_segment_and_campaign_tag_preagg.agg(\n num_users=('braze_id', 'nunique'), email_open_rate=('opened_email', calc_engagement), email_click_rate=('clicked_email', calc_engagement),\n push_open_rate=('opened_push', calc_engagement), iam_click_rate=('clicked_iam', calc_engagement), conversion_rate=('converted', calc_engagement), content_card_click_rate=('cards_clicked', calc_engagement))", "_____no_output_____" ] ], [ [ "# Rolling Retention\n\nThe below codeblocks will return Rolling Retetion Rates. You can view the Rolling Retention Methodology [here](https://www.braze.com/resources/articles/calculate-retention-rate). \n\nRolling Retention Rates will be returned by:\n\n- Segment\n\n- Channel\n\n- Channel Combo\n\n- Campaign Tag", "_____no_output_____" ] ], [ [ "segment_rolling_retention_temp = segment_rolling_retention_pre_apps.explode(\n 'apps')\nsegment_rolling_retention_temp = segment_rolling_retention_temp.dropna(subset=[\n 'apps'])", "_____no_output_____" ], [ "segment_rolling_retention_temp['first_used'] = segment_rolling_retention_temp['apps'].apply(\n lambda x: x.get('first_used'))\nsegment_rolling_retention_temp['last_used'] = segment_rolling_retention_temp['apps'].apply(\n lambda x: x.get('last_used'))\nsegment_rolling_retention_temp['platform'] = segment_rolling_retention_temp['apps'].apply(\n lambda x: x.get('platform'))", "_____no_output_____" ], [ "segment_rolling_retention_temp[['first_used', 'last_used']] = segment_rolling_retention_temp[[\n 'first_used', 'last_used']].apply(pd.to_datetime)\nsegment_rolling_retention_temp['day_num'] = (\n segment_rolling_retention_temp['last_used'] - segment_rolling_retention_temp['first_used']).dt.days", "_____no_output_____" ], [ "segment_rolling_retention_temp['day_num'] = segment_rolling_retention_temp['day_num'].astype(\n 'int')\nsegment_rolling_retention_raw = pd.pivot_table(segment_rolling_retention_temp,\n values=(\"braze_id\"),\n index=(\"segment_name\", 'segment_id',\n \"platform\"),\n columns=\"day_num\",\n aggfunc='nunique')\\\n .fillna(0)\n\nsegment_rolling_retention_raw = segment_rolling_retention_raw[segment_rolling_retention_raw\n .columns[::-1]].cumsum(axis=1)\nsegment_rolling_retention_raw = segment_rolling_retention_raw[\n segment_rolling_retention_raw.columns[::-1]]\nsegment_rolling_retention_raw[\"num_users\"] = segment_rolling_retention_raw[0]\nsegment_rolling_retention_raw = segment_rolling_retention_raw.groupby(\n ['segment_name', 'segment_id', 'platform']).sum()\nsegment_rolling_retention = pd.concat([segment_rolling_retention_raw[\"num_users\"],\n segment_rolling_retention_raw\n .drop([\"num_users\"], axis=1)\n .div(segment_rolling_retention_raw[\"num_users\"], axis=0)],\n axis=1).fillna(0)\n\nsegment_rolling_retention=segment_rolling_retention.drop(0,axis=1)", "_____no_output_____" ], [ "segment_engagement_user_data = segment_data_engagement_stats[[\n 'braze_id', 'segment_id', 'segment_name', 'apps', 'channel', 'tags', 'channel_combo']]\nsegment_engagement_data_for_retention = segment_engagement_user_data.explode(\n 'apps')\nsegment_engagement_data_for_retention = segment_engagement_data_for_retention.dropna(subset=[\n 'apps'])\nsegment_engagement_data_for_retention['platform'] = segment_engagement_data_for_retention['apps'].apply(\n lambda x: x.get('platform'))", "_____no_output_____" ], [ "segment_rolling_retention_by_engagement_temp = pd.merge(segment_rolling_retention_temp.reset_index(), segment_engagement_data_for_retention.reset_index(),\n how='left',\n left_on=[\n 'braze_id', 'platform', 'segment_id', 'segment_name'],\n right_on=[\n 'braze_id', 'platform', 'segment_id', 'segment_name'],\n suffixes=['_from_retention', '_from_engagement'])", "_____no_output_____" ], [ "segment_rolling_retention_by_engagement_temp['day_num'] = segment_rolling_retention_by_engagement_temp['day_num'].astype(\n 'int')\nsegment_rolling_retention_by_engagement_raw = pd.pivot_table(segment_rolling_retention_by_engagement_temp,\n values=(\n \"braze_id\"),\n index=(\n \"segment_name\", \"segment_id\", \"platform\", \"channel\"),\n columns=\"day_num\",\n aggfunc='nunique')\\\n .fillna(0)\n\nsegment_rolling_retention_by_engagement_raw = segment_rolling_retention_by_engagement_raw[segment_rolling_retention_by_engagement_raw\n .columns[::-1]].cumsum(axis=1)\nsegment_rolling_retention_by_engagement_raw = segment_rolling_retention_by_engagement_raw[\n segment_rolling_retention_by_engagement_raw.columns[::-1]]\nsegment_rolling_retention_by_engagement_raw[\"num_users\"] = segment_rolling_retention_by_engagement_raw[0]\nsegment_rolling_retention_by_engagement_raw = segment_rolling_retention_by_engagement_raw.groupby(\n ['segment_name', 'segment_id', 'platform', \"channel\"]).sum()\nsegment_rolling_retention_by_engagement = pd.concat([segment_rolling_retention_by_engagement_raw[\"num_users\"],\n segment_rolling_retention_by_engagement_raw\n .drop([\"num_users\"], axis=1)\n .div(segment_rolling_retention_by_engagement_raw[\"num_users\"], axis=0)],\n axis=1).fillna(0)\n\nsegment_rolling_retention_by_engagement=segment_rolling_retention_by_engagement.drop(0,axis=1)", "_____no_output_____" ], [ "segment_rolling_retention_by_engagement_temp['day_num'] = segment_rolling_retention_by_engagement_temp['day_num'].astype(\n 'int')\nsegment_campaign_tag_data_for_retention_temp = segment_rolling_retention_by_engagement_temp.explode(\n 'tags')\nsegment_campaign_tag_data_for_retention_temp = segment_campaign_tag_data_for_retention_temp.dropna(subset=[\n 'tags'])\nsegment_rolling_retention_by_campaign_tag_raw = pd.pivot_table(segment_campaign_tag_data_for_retention_temp,\n values=(\n \"braze_id\"),\n index=(\n \"segment_name\", \"segment_id\", \"platform\", \"tags\"),\n columns=\"day_num\",\n aggfunc='nunique')\\\n .fillna(0)\n\nsegment_rolling_retention_by_campaign_tag_raw = segment_rolling_retention_by_campaign_tag_raw[segment_rolling_retention_by_campaign_tag_raw\n .columns[::-1]].cumsum(axis=1)\nsegment_rolling_retention_by_campaign_tag_raw = segment_rolling_retention_by_campaign_tag_raw[\n segment_rolling_retention_by_campaign_tag_raw.columns[::-1]]\nsegment_rolling_retention_by_campaign_tag_raw[\"num_users\"] = segment_rolling_retention_by_campaign_tag_raw[0]\nsegment_rolling_retention_by_campaign_tag_raw = segment_rolling_retention_by_campaign_tag_raw.groupby(\n ['segment_name', 'segment_id', 'platform', \"tags\"]).sum()\nsegment_rolling_retention_by_campaign_tag = pd.concat([segment_rolling_retention_by_campaign_tag_raw[\"num_users\"],\n segment_rolling_retention_by_campaign_tag_raw\n .drop([\"num_users\"], axis=1)\n .div(segment_rolling_retention_by_campaign_tag_raw[\"num_users\"], axis=0)],\n axis=1).fillna(0)\nsegment_rolling_retention_by_campaign_tag =segment_rolling_retention_by_campaign_tag.drop(0,axis=1)\n", "_____no_output_____" ], [ "segment_rolling_retention_by_engagement_temp['day_num'] = segment_rolling_retention_by_engagement_temp['day_num'].astype(\n 'int')\nsegment_rolling_retention_by_channel_combo_raw = pd.pivot_table(segment_rolling_retention_by_engagement_temp,\n values=(\n \"braze_id\"),\n index=(\n \"segment_name\", \"segment_id\", \"platform\", \"channel_combo\"),\n columns=\"day_num\",\n aggfunc='nunique')\\\n .fillna(0)\n\nsegment_rolling_retention_by_channel_combo_raw = segment_rolling_retention_by_channel_combo_raw[segment_rolling_retention_by_channel_combo_raw\n .columns[::-1]].cumsum(axis=1)\nsegment_rolling_retention_by_channel_combo_raw = segment_rolling_retention_by_channel_combo_raw[\n segment_rolling_retention_by_channel_combo_raw.columns[::-1]]\nsegment_rolling_retention_by_channel_combo_raw[\"num_users\"] = segment_rolling_retention_by_channel_combo_raw[0]\nsegment_rolling_retention_by_channel_combo_raw = segment_rolling_retention_by_channel_combo_raw.groupby(\n ['segment_name', 'segment_id', 'platform', \"channel_combo\"]).sum()\nsegment_rolling_retention_by_channel_combo = pd.concat([segment_rolling_retention_by_channel_combo_raw[\"num_users\"],\n segment_rolling_retention_by_channel_combo_raw\n .drop([\"num_users\"], axis=1)\n .div(segment_rolling_retention_by_channel_combo_raw[\"num_users\"], axis=0)],\n axis=1).fillna(0)\n\nsegment_rolling_retention_by_channel_combo=segment_rolling_retention_by_channel_combo.drop(0,axis=1)\n\n", "_____no_output_____" ] ], [ [ "# Purchasing Stats\n\nThe following purchasing metrics will be returned in the first purchasing stats dataframe:\n\n- Number of Buyers\n\n- Number of Repeat Buyers\n\n- % Buyers\n\n- % Repeat Buyers\n\n- Number of Purchases\n\n- Total Revenue\n\n- Average Revenue per Buyer\n\n- Average time to Purchase\n\n- Purchases per Buyer\n\nThe second purchasing stats dataframe will return purchase retention rates.\n\nBoth purchasing stats dataframes will returned by:\n\n- Segment\n\n- Channel\n\n- Channel Combo\n\n- Campaign Tag", "_____no_output_____" ] ], [ [ "num_users = segment_df.braze_id.nunique()\nsegment_purchasing_stats_temp = segment_purchasing_stats_pre_apps.dropna(\n subset=['apps', 'purchases'])\nsegment_purchasing_dates = segment_purchasing_stats_pre_apps.dropna(\n subset=['apps', 'purchases'])\nsegment_purchasing_dates = segment_purchasing_dates.explode(\n 'purchases')\nsegment_purchasing_dates = segment_purchasing_dates.explode(\n 'apps')\n\nsegment_purchasing_stats_temp['num_purchases'] = segment_purchasing_stats_temp['purchases'].apply(\n lambda x: sum(map(itemgetter('count'), x)))\nsegment_purchasing_dates['first_purchase'] = segment_purchasing_dates['purchases'].apply(\n lambda x: x.get('first'))\nsegment_purchasing_dates['last_purchase'] = segment_purchasing_dates['purchases'].apply(\n lambda x: x.get('last'))\nsegment_purchasing_dates['first_session'] = segment_purchasing_dates['apps'].apply(\n lambda x: x.get('first_used'))\nsegment_purchasing_dates['first_purchase'] = pd.to_datetime(\n segment_purchasing_dates['first_purchase'])\nsegment_purchasing_dates['last_purchase'] = pd.to_datetime(\n segment_purchasing_dates['last_purchase'])\nsegment_purchasing_dates['first_session'] = pd.to_datetime(\n segment_purchasing_dates['first_session'])\n\n\nsegment_purchasing_dates_temp = segment_purchasing_dates.groupby(['segment_name', 'segment_id', 'braze_id']).agg(first_purchase_date=(\n 'first_purchase', 'min'), last_purchase_date=('last_purchase', 'max'), first_session_date=('first_session', 'min'))\nsegment_purchasing_dates_temp = segment_purchasing_dates_temp.reset_index(level=[\n 0, 1, 2])\nsegment_purchasing_stats_temp = pd.merge(segment_purchasing_stats_temp, segment_purchasing_dates_temp,\n how='left',\n left_on=[\n 'braze_id', 'segment_id', 'segment_name'],\n right_on=[\n 'braze_id', 'segment_id', 'segment_name'])\n\nsegment_purchasing_stats_temp['repeat_buyer'] = segment_purchasing_stats_temp[\n 'first_purchase_date'] != segment_purchasing_stats_temp['last_purchase_date']\nsegment_purchasing_stats_temp['repeat_buyer_id'] = segment_purchasing_stats_temp.apply(\n lambda row: row.braze_id if row.repeat_buyer == True else 'NaN', axis=1)\nsegment_purchasing_stats_temp['days_to_purchase'] = (\n segment_purchasing_stats_temp['first_purchase_date'] - segment_purchasing_stats_temp['first_session_date']).dt.seconds\nsegment_purchasing_stats_temp['days_to_purchase'] = segment_purchasing_stats_temp['days_to_purchase']/86400\nsegment_purchase_retention_temp = segment_purchasing_stats_temp\nsegment_purchase_data = segment_purchasing_stats_temp\n\nsegment_purchasing_stats_temp = segment_purchasing_stats_temp.groupby(['segment_name', 'segment_id']).agg(buyers=('braze_id', 'nunique'), repeat_buyers=('repeat_buyer_id', 'nunique'), num_purchases=(\n 'num_purchases', 'sum'), total_revenue=('total_revenue', 'sum'), avg_revenue_per_buyer=('total_revenue', 'mean'), avg_time_to_purchase=('days_to_purchase', 'mean'))\nsegment_purchasing_stats_temp['pct_repeat_buyers'] = round(\n segment_purchasing_stats_temp.repeat_buyers/segment_purchasing_stats_temp.buyers, 2)\nsegment_purchasing_stats_temp['purchases_per_buyer'] = round(\n segment_purchasing_stats_temp.num_purchases/segment_purchasing_stats_temp.buyers, 2)\nsegment_purchasing_stats_temp['revenue_per_item_purchased'] = round(\n segment_purchasing_stats_temp.total_revenue/segment_purchasing_stats_temp.num_purchases, 2)\nsegment_purchasing_stats_temp['purchases_per_user'] = round(\n segment_purchasing_stats_temp.num_purchases/num_users, 2)\nsegment_purchasing_stats_temp['pct_buyer'] = round(\n segment_purchasing_stats_temp.buyers/num_users, 2)\n\nsegment_purchasing_stats = segment_purchasing_stats_temp", "_____no_output_____" ], [ "segment_purchase_retention_temp['day_num'] = (\n segment_purchase_retention_temp['last_purchase_date'] - segment_purchase_retention_temp['first_purchase_date']).dt.days\n\nsegment_purchase_retention_temp['day_num'] = segment_purchase_retention_temp['day_num'].astype(\n 'int')\nsegment_purchase_retention_raw = pd.pivot_table(segment_purchase_retention_temp,\n values=(\"braze_id\"),\n index=(\"segment_name\",\n \"segment_id\"),\n columns=\"day_num\",\n aggfunc='nunique')\\\n .fillna(0)\n\n\nsegment_purchase_retention_raw = segment_purchase_retention_raw[segment_purchase_retention_raw\n .columns[::-1]].cumsum(axis=1)\nsegment_purchase_retention_raw = segment_purchase_retention_raw[\n segment_purchase_retention_raw.columns[::-1]]\nsegment_purchase_retention_raw[\"num_users\"] = segment_purchase_retention_raw[0]\nsegment_purchase_retention_raw = segment_purchase_retention_raw.groupby(\n ['segment_name', 'segment_id']).sum()\nsegment_purchase_retention = pd.concat([segment_purchase_retention_raw[\"num_users\"],\n segment_purchase_retention_raw\n .drop([\"num_users\"], axis=1)\n .div(segment_purchase_retention_raw[\"num_users\"], axis=0)],\n axis=1).fillna(0)\nsegment_purchase_retention=segment_purchase_retention.drop(0,axis=1)", "_____no_output_____" ], [ "segment_purchase_stats_by_engagement_temp = pd.merge(segment_purchase_data, segment_engagement_user_data,\n how='left',\n left_on=[\n 'braze_id', 'segment_id', 'segment_name'],\n right_on=[\n 'braze_id', 'segment_id', 'segment_name'],\n suffixes=['_from_retention', '_from_engagement'])\n\nsegment_purchase_stats_by_engagement_temp['day_num'] = (\n segment_purchase_stats_by_engagement_temp['last_purchase_date'] - segment_purchase_stats_by_engagement_temp['first_purchase_date']).dt.days\nsegment_purchase_stats_by_engagement_temp['channel'] = segment_purchase_stats_by_engagement_temp.channel.fillna(\n 'No Messages')\nsegment_purchase_stats_by_engagement_temp['channel_combo'] = segment_purchase_stats_by_engagement_temp.channel_combo.fillna(\n 'No Messages')", "_____no_output_____" ], [ "segment_purchase_stats_by_channel_temp = segment_purchase_stats_by_engagement_temp.groupby(['segment_name', 'segment_id', 'channel']).agg(buyers=('braze_id', 'nunique'), repeat_buyers=('repeat_buyer_id', 'nunique'), num_purchases=(\n 'num_purchases', 'sum'), total_revenue=('total_revenue', 'sum'), avg_revenue_per_buyer=('total_revenue', 'mean'), avg_time_to_purchase=('days_to_purchase', 'mean'), total_buyers=('braze_id', 'count'), total_repeat_buyers=('repeat_buyer_id', 'count'))\nsegment_purchase_stats_by_channel_temp['pct_repeat_buyers'] = round(\n segment_purchase_stats_by_channel_temp.repeat_buyers/segment_purchase_stats_by_channel_temp.buyers, 2)\nsegment_purchase_stats_by_channel_temp['purchases_per_buyer'] = round(\n segment_purchase_stats_by_channel_temp.num_purchases/segment_purchase_stats_by_channel_temp.total_buyers, 2)\nsegment_purchase_stats_by_channel_temp['revenue_per_item_purchased'] = round(\n segment_purchase_stats_by_channel_temp.total_revenue/segment_purchase_stats_by_channel_temp.num_purchases, 2)\nsegment_purchase_stats_by_channel = pd.merge(segment_purchase_stats_by_channel_temp, users_per_channel_df,\n how='left',\n left_on=[\n 'segment_name', 'segment_id','channel'],\n right_on=['segment_name', 'segment_id','channel'])\n\nsegment_purchase_stats_by_channel['pct_buyers'] = round(\n segment_purchase_stats_by_channel.buyers/segment_purchase_stats_by_channel.num_users, 2)\n\nsegment_purchase_stats_by_channel = segment_purchase_stats_by_channel[['segment_name', 'segment_id', 'channel', 'buyers', 'repeat_buyers', 'num_users', 'pct_buyers',\n 'pct_repeat_buyers', 'purchases_per_buyer', 'avg_revenue_per_buyer', 'avg_time_to_purchase', 'revenue_per_item_purchased']].set_index(['segment_name', 'segment_id', 'channel'])\n", "_____no_output_____" ], [ "segment_purchase_stats_by_channel_combo_temp = segment_purchase_stats_by_engagement_temp.groupby(['segment_name', 'segment_id', 'channel_combo']).agg(buyers=('braze_id', 'nunique'), repeat_buyers=('repeat_buyer_id', 'nunique'), num_purchases=(\n 'num_purchases', 'sum'), total_revenue=('total_revenue', 'sum'), avg_revenue_per_buyer=('total_revenue', 'mean'), avg_time_to_purchase=('days_to_purchase', 'mean'), total_buyers=('braze_id', 'count'), total_repeat_buyers=('repeat_buyer_id', 'count'))\n\nsegment_purchase_stats_by_channel_combo_temp['pct_repeat_buyers'] = round(\n segment_purchase_stats_by_channel_combo_temp.repeat_buyers/segment_purchase_stats_by_channel_combo_temp.buyers, 2)\nsegment_purchase_stats_by_channel_combo_temp['purchases_per_buyer'] = round(\n segment_purchase_stats_by_channel_combo_temp.num_purchases/segment_purchase_stats_by_channel_combo_temp.total_buyers, 2)\nsegment_purchase_stats_by_channel_combo_temp['revenue_per_item_purchased'] = round(\n segment_purchase_stats_by_channel_combo_temp.total_revenue/segment_purchase_stats_by_channel_combo_temp.num_purchases, 2)\nsegment_purchase_stats_by_channel_combo = pd.merge(segment_purchase_stats_by_channel_combo_temp, users_per_channel_combo_df,\n how='left',\n left_on=[\n 'segment_name', 'segment_id','channel_combo'],\n right_on=['segment_name', 'segment_id','channel_combo'])\n\nsegment_purchase_stats_by_channel_combo['pct_buyers'] = round(\n segment_purchase_stats_by_channel_combo.buyers/segment_purchase_stats_by_channel_combo.num_users, 2)\n\nsegment_purchase_stats_by_channel_combo = segment_purchase_stats_by_channel_combo[['segment_name', 'segment_id', 'channel_combo', 'buyers', 'repeat_buyers', 'num_users', 'pct_buyers',\n 'pct_repeat_buyers', 'purchases_per_buyer', 'avg_revenue_per_buyer', 'avg_time_to_purchase', 'revenue_per_item_purchased']].set_index(['segment_name', 'segment_id', 'channel_combo'])", "_____no_output_____" ], [ "segment_purchase_stats_by_campaign_tag_temp = segment_purchase_stats_by_engagement_temp.explode(\n 'tags')\nsegment_purchase_stats_by_campaign_tag_temp = segment_purchase_stats_by_campaign_tag_temp.groupby(['segment_name', 'segment_id', 'tags']).agg(buyers=('braze_id', 'nunique'), repeat_buyers=('repeat_buyer_id', 'nunique'), num_purchases=(\n 'num_purchases', 'sum'), total_revenue=('total_revenue', 'sum'), avg_revenue_per_buyer=('total_revenue', 'mean'), avg_time_to_purchase=('days_to_purchase', 'mean'), total_buyers=('braze_id', 'count'), total_repeat_buyers=('repeat_buyer_id', 'count'))\nsegment_purchase_stats_by_campaign_tag_temp['pct_repeat_buyers'] = round(\n segment_purchase_stats_by_campaign_tag_temp.repeat_buyers/segment_purchase_stats_by_campaign_tag_temp.repeat_buyers, 2)\nsegment_purchase_stats_by_campaign_tag_temp['purchases_per_buyer'] = round(\n segment_purchase_stats_by_campaign_tag_temp.num_purchases/segment_purchase_stats_by_campaign_tag_temp.total_buyers, 2)\nsegment_purchase_stats_by_campaign_tag_temp['revenue_per_item_purchased'] = round(\n segment_purchase_stats_by_campaign_tag_temp.total_revenue/segment_purchase_stats_by_campaign_tag_temp.num_purchases, 2)\nsegment_purchase_stats_by_campaign_tag = pd.merge(segment_purchase_stats_by_campaign_tag_temp, users_per_campaign_tags_df,\n how='left',\n left_on=[\n 'segment_name', 'tags'],\n right_on=['segment_name', 'tags'])\n\n\nsegment_purchase_stats_by_campaign_tag['pct_buyers'] = round(\n segment_purchase_stats_by_campaign_tag.buyers/segment_purchase_stats_by_campaign_tag.num_users, 2)\n\nsegment_purchase_stats_by_campaign_tag = segment_purchase_stats_by_campaign_tag[['segment_name', 'segment_id', 'tags', 'buyers', 'repeat_buyers', 'num_users', 'pct_buyers',\n 'pct_repeat_buyers', 'purchases_per_buyer', 'avg_revenue_per_buyer', 'avg_time_to_purchase', 'revenue_per_item_purchased']].set_index(['segment_name', 'segment_id', 'tags'])", "_____no_output_____" ], [ "segment_purchase_stats_by_engagement_temp['day_num'] = segment_purchase_stats_by_engagement_temp['day_num'].astype(\n 'int')\nsegment_purchase_retention_by_channel_raw = pd.pivot_table(segment_purchase_stats_by_engagement_temp,\n values=(\"braze_id\"),\n index=(\n \"segment_name\", \"segment_id\", \"channel\"),\n columns=\"day_num\",\n aggfunc='nunique')\\\n .fillna(0)\n\nsegment_purchase_retention_by_channel_raw = segment_purchase_retention_by_channel_raw[segment_purchase_retention_by_channel_raw\n .columns[::-1]].cumsum(axis=1)\nsegment_purchase_retention_by_channel_raw = segment_purchase_retention_by_channel_raw[\n segment_purchase_retention_by_channel_raw.columns[::-1]]\nsegment_purchase_retention_by_channel_raw[\"num_users\"] = segment_purchase_retention_by_channel_raw[0]\nsegment_purchase_retention_by_channel_raw = segment_purchase_retention_by_channel_raw.groupby(\n ['segment_name', 'segment_id', \"channel\"]).sum()\nsegment_purchase_retention_by_channel = pd.concat([segment_purchase_retention_by_channel_raw[\"num_users\"],\n segment_purchase_retention_by_channel_raw\n .drop([\"num_users\"], axis=1)\n .div(segment_purchase_retention_by_channel_raw[\"num_users\"], axis=0)],\n axis=1).fillna(0)\n\nsegment_purchase_retention_by_channel=segment_purchase_retention_by_channel.drop(0, axis=1)\n", "_____no_output_____" ], [ "segment_purchase_stats_by_engagement_temp['day_num'] = segment_purchase_stats_by_engagement_temp['day_num'].astype(\n 'int')\nsegment_purchase_retention_by_channel_combo_raw = pd.pivot_table(segment_purchase_stats_by_engagement_temp,\n values=(\n \"braze_id\"),\n index=(\n \"segment_name\", \"segment_id\", \"channel_combo\"),\n columns=\"day_num\",\n aggfunc='nunique')\\\n .fillna(0)\n\n\nsegment_purchase_retention_by_channel_combo_raw = segment_purchase_retention_by_channel_combo_raw[segment_purchase_retention_by_channel_combo_raw\n .columns[::-1]].cumsum(axis=1)\nsegment_purchase_retention_by_channel_combo_raw = segment_purchase_retention_by_channel_combo_raw[\n segment_purchase_retention_by_channel_combo_raw.columns[::-1]]\nsegment_purchase_retention_by_channel_combo_raw[\n \"num_users\"] = segment_purchase_retention_by_channel_combo_raw[0]\nsegment_purchase_retention_by_channel_combo_raw = segment_purchase_retention_by_channel_combo_raw.groupby(\n ['segment_name', 'segment_id', \"channel_combo\"]).sum()\nsegment_purchase_retention_by_channel_combo = pd.concat([segment_purchase_retention_by_channel_combo_raw[\"num_users\"],\n segment_purchase_retention_by_channel_combo_raw\n .drop([\"num_users\"], axis=1)\n .div(segment_purchase_retention_by_channel_combo_raw[\"num_users\"], axis=0)],\n axis=1).fillna(0)\n\nsegment_purchase_retention_by_channel_combo=segment_purchase_retention_by_channel_combo.drop(0,axis=1)\n", "_____no_output_____" ], [ "segment_purchase_stats_by_engagement_temp['day_num'] = segment_purchase_stats_by_engagement_temp['day_num'].astype(\n 'int')\nsegment_purchase_stats_by_campaign_tag_temp = segment_purchase_stats_by_engagement_temp.explode(\n 'tags')\nsegment_purchase_retention_by_campaign_tags_raw = pd.pivot_table(segment_purchase_stats_by_campaign_tag_temp,\n values=(\n \"braze_id\"),\n index=(\n \"segment_name\", \"segment_id\", \"tags\"),\n columns=\"day_num\",\n aggfunc='nunique')\\\n .fillna(0)\n\n### Get the cumulative sum of users based on \"last day\"\nsegment_purchase_retention_by_campaign_tags_raw = segment_purchase_retention_by_campaign_tags_raw[segment_purchase_retention_by_campaign_tags_raw\n .columns[::-1]].cumsum(axis=1)\nsegment_purchase_retention_by_campaign_tags_raw = segment_purchase_retention_by_campaign_tags_raw[\n segment_purchase_retention_by_campaign_tags_raw.columns[::-1]]\nsegment_purchase_retention_by_campaign_tags_raw[\n \"num_users\"] = segment_purchase_retention_by_campaign_tags_raw[0]\nsegment_purchase_retention_by_campaign_tags_raw = segment_purchase_retention_by_campaign_tags_raw.groupby(\n ['segment_name', 'segment_id', \"tags\"]).sum()\nsegment_purchase_retention_by_campaign_tags = pd.concat([segment_purchase_retention_by_campaign_tags_raw[\"num_users\"],\n segment_purchase_retention_by_campaign_tags_raw\n .drop([\"num_users\"], axis=1)\n .div(segment_purchase_retention_by_campaign_tags_raw[\"num_users\"], axis=0)],\n axis=1).fillna(0)\n\nsegment_purchase_retention_by_campaign_tags=segment_purchase_retention_by_campaign_tags.drop(0,axis=1)\n", "_____no_output_____" ] ], [ [ "# Session Stats\n\nThe following Session Engagement Metrics will be returned by the codeblocks below:\n\n- Number of Users\n- Sessions per User\n\nSession Engagement Metrics will returned by:\n\n- Segment\n\n- Channel\n\n- Channel Combo\n\n- Campaign Tag", "_____no_output_____" ] ], [ [ "segment_sessions_stats_temp = segment_sessions_stats_pre_apps.explode('apps')\nsegment_sessions_stats_temp = segment_sessions_stats_temp.dropna(subset=[\n 'apps'])\nsegment_sessions_stats_temp['sessions'] = segment_sessions_stats_temp['apps'].apply(\n lambda x: x.get('sessions'))\nsegment_sessions_stats_temp['platform'] = segment_sessions_stats_temp['apps'].apply(\n lambda x: x.get('platform'))", "_____no_output_____" ], [ "segment_sessions_stats_temp = segment_sessions_stats_temp.groupby(['segment_name', 'segment_id']).agg(\n num_users=(\"braze_id\", 'nunique'), total_sessions=('sessions', 'sum'))\nsegment_sessions_stats_temp['sessions_per_user'] = segment_sessions_stats_temp.total_sessions / \\\n segment_sessions_stats_temp.num_users\n\nsegment_sessions_stats = segment_sessions_stats_temp", "_____no_output_____" ], [ "segment_sessions_stats_by_engagement_temp = segment_engagement_user_data.explode(\n 'apps')\nsegment_sessions_stats_by_engagement_temp = segment_sessions_stats_by_engagement_temp.dropna(subset=[\n 'apps'])\nsegment_sessions_stats_by_engagement_temp['sessions'] = segment_sessions_stats_by_engagement_temp['apps'].apply(\n lambda x: x.get('sessions'))\nsegment_sessions_stats_by_engagement_temp['platform'] = segment_sessions_stats_by_engagement_temp['apps'].apply(\n lambda x: x.get('platform'))\n\nsegment_sessions_stats_by_channel_temp = segment_sessions_stats_by_engagement_temp.groupby(['segment_name', 'segment_id', 'channel']).agg(\n total_users=(\"braze_id\", 'count'), total_sessions=('sessions', 'sum'), num_users=(\"braze_id\", 'nunique'))\nsegment_sessions_stats_by_channel_temp = segment_sessions_stats_by_channel_temp.reset_index()\nsegment_sessions_stats_by_channel_temp['sessions_per_user'] = segment_sessions_stats_by_channel_temp.total_sessions / \\\n segment_sessions_stats_by_channel_temp.total_users\n\nsegment_sessions_stats_by_channel = segment_sessions_stats_by_channel_temp[[\n 'segment_name', 'segment_id', 'channel', 'num_users', 'sessions_per_user']].set_index(['segment_name', 'segment_id', 'channel'])", "_____no_output_____" ], [ "segment_sessions_stats_by_channel_combo_temp = segment_sessions_stats_by_engagement_temp.groupby(['segment_name', 'segment_id', 'channel_combo']).agg(\n total_users=(\"braze_id\", 'count'), total_sessions=('sessions', 'sum'), num_users=(\"braze_id\", 'nunique'))\nsegment_sessions_stats_by_channel_combo_temp = segment_sessions_stats_by_channel_combo_temp.reset_index()\nsegment_sessions_stats_by_channel_combo_temp['sessions_per_user'] = segment_sessions_stats_by_channel_combo_temp.total_sessions / \\\n segment_sessions_stats_by_channel_combo_temp.total_users\n\nsegment_sessions_stats_by_channel_combo = segment_sessions_stats_by_channel_combo_temp[[\n 'segment_name', 'segment_id', 'channel_combo', 'num_users', 'sessions_per_user']].set_index(['segment_name', 'segment_id', 'channel_combo'])", "_____no_output_____" ], [ "segment_sessions_stats_by_campaign_tag_temp = segment_sessions_stats_by_engagement_temp.explode(\n 'tags')\nsegment_sessions_stats_by_campaign_tag_temp = segment_sessions_stats_by_campaign_tag_temp.groupby(['segment_name', 'segment_id', 'tags']).agg(\n total_users=(\"braze_id\", 'count'), total_sessions=('sessions', 'sum'), num_users=(\"braze_id\", 'nunique'))\nsegment_sessions_stats_by_campaign_tag_temp = segment_sessions_stats_by_campaign_tag_temp.reset_index()\nsegment_sessions_stats_by_campaign_tag_temp['sessions_per_user'] = segment_sessions_stats_by_campaign_tag_temp.total_sessions / \\\n segment_sessions_stats_by_campaign_tag_temp.total_users\n\nsegment_sessions_stats_by_campaign_tag = segment_sessions_stats_by_campaign_tag_temp[[\n 'segment_name', 'segment_id', 'tags', 'num_users', 'sessions_per_user']].set_index(['segment_name', 'segment_id', 'tags'])", "_____no_output_____" ] ], [ [ "# Custom Event Stats\n\nThe following Custom Events Stats will be calculated:\n\n- Number of Users Completing the Custom Event\n\n- Number of Users\n\n- Total Count of Custom Event\n\n- % of Users Completing Custom Events\n\n- Custom Events per User\n\n- Avg. Days between each occurence of a Custom Event\n\n- Avg. Custom Event Completion per Day\n\nCustom Event stats dataframes will returned by:\n\n- Segment\n\n- Channel\n\n- Channel Combo\n\n- Campaign Tag", "_____no_output_____" ] ], [ [ "segment_custom_event_stats_temp = segment_custom_event_stats_pre_custom_event.explode(\n 'custom_events')", "_____no_output_____" ], [ "segment_custom_event_stats_temp['custom_event_name'] = list(\n map(get_custom_event_name, segment_custom_event_stats_temp['custom_events']))\nsegment_custom_event_stats_temp['custom_event_count'] = list(\n map(get_custom_event_count, segment_custom_event_stats_temp['custom_events']))\nsegment_custom_event_stats_temp['custom_event_first_date'] = list(map(\n get_custom_event_first_date, segment_custom_event_stats_temp['custom_events']))\nsegment_custom_event_stats_temp['custom_event_last_date'] = list(\n map(get_custom_event_last_date, segment_custom_event_stats_temp['custom_events']))", "_____no_output_____" ], [ "segment_custom_event_stats_temp[['custom_event_first_date', 'custom_event_last_date']] = segment_custom_event_stats_temp[[\n 'custom_event_first_date', 'custom_event_last_date']].apply(pd.to_datetime)\nsegment_custom_event_stats_temp['days_between_events'] = (\n segment_custom_event_stats_temp['custom_event_last_date'] - segment_custom_event_stats_temp['custom_event_first_date']).dt.days\nsegment_custom_event_stats_temp['custom_event_per_day'] = np.round(np.where(segment_custom_event_stats_temp['days_between_events'] > 0,\n segment_custom_event_stats_temp.custom_event_count/segment_custom_event_stats_temp.days_between_events, segment_custom_event_stats_temp.custom_event_count), 1)", "_____no_output_____" ], [ "total_segment_users_custom_event = segment_custom_event_stats_temp.braze_id.nunique()\nsegment_custom_event_stats_by_segment = segment_custom_event_stats_temp.groupby(\n ['segment_name', 'segment_id', 'custom_event_name']).agg(num_users_completing_custom_event=(\n 'braze_id', 'nunique'), total_custom_events=('custom_event_count', 'sum'), avg_days_between_events=('days_between_events', 'mean'), avg_custom_event_per_day=('custom_event_per_day', 'mean'))\nsegment_custom_event_stats_by_segment['custom_event_per_user'] = segment_custom_event_stats_by_segment.total_custom_events / \\\n total_segment_users_custom_event\nsegment_custom_event_stats_by_segment['pct_users_completing_custom_event'] = segment_custom_event_stats_by_segment.num_users_completing_custom_event / \\\n total_segment_users_custom_event\nsegment_custom_event_stats_by_segment['num_users'] = total_segment_users_custom_event\nsegment_custom_event_stats_by_segment = segment_custom_event_stats_by_segment[[\n 'num_users_completing_custom_event', 'num_users', 'total_custom_events', 'pct_users_completing_custom_event', 'custom_event_per_user', 'avg_days_between_events', 'avg_custom_event_per_day']]", "_____no_output_____" ], [ "segment_custom_event_stats_by_engagement_temp = pd.merge(segment_custom_event_stats_temp, segment_engagement_user_data,\n how='left',\n left_on=[\n 'braze_id', 'segment_id', 'segment_name'],\n right_on=[\n 'braze_id', 'segment_id', 'segment_name'],\n suffixes=['_from_custom_events', '_from_engagement'])\nsegment_custom_event_stats_by_engagement_temp['channel'] = segment_custom_event_stats_by_engagement_temp.channel.fillna(\n 'No Messages')\nsegment_custom_event_stats_by_engagement_temp['channel_combo'] = segment_custom_event_stats_by_engagement_temp.channel_combo.fillna(\n 'No Messages')\nsegment_custom_event_stats_by_engagement_temp['tags'] = segment_custom_event_stats_by_engagement_temp.tags.fillna(\n 'No Messages')", "_____no_output_____" ], [ "segment_custom_event_stats_by_segment_and_channel_temp = segment_custom_event_stats_by_engagement_temp.groupby(['segment_name', 'segment_id', 'channel', 'custom_event_name']).agg(num_users_completing_custom_event=(\n 'braze_id', 'nunique'), total_custom_events=('custom_event_count', 'sum'), avg_days_between_events=('days_between_events', 'mean'), avg_custom_event_per_day=('custom_event_per_day', 'mean'))\n\nsegment_custom_event_stats_by_segment_and_channel_temp = segment_custom_event_stats_by_segment_and_channel_temp.reset_index()\n\nsegment_custom_event_stats_by_segment_and_channel = pd.merge(segment_custom_event_stats_by_segment_and_channel_temp, users_per_channel_df,\n how='left',\n left_on=[\n 'segment_name', 'segment_id','channel'],\n right_on=['segment_name', 'segment_id','channel'])\nsegment_custom_event_stats_by_segment_and_channel['custom_event_per_user'] = segment_custom_event_stats_by_segment_and_channel.total_custom_events / \\\n segment_custom_event_stats_by_segment_and_channel.num_users\nsegment_custom_event_stats_by_segment_and_channel['pct_users_completing_custom_event'] = segment_custom_event_stats_by_segment_and_channel.num_users_completing_custom_event / \\\n segment_custom_event_stats_by_segment_and_channel.num_users\n\nsegment_custom_event_stats_by_segment_and_channel = segment_custom_event_stats_by_segment_and_channel[['segment_name', 'segment_id', 'channel','custom_event_name', 'num_users_completing_custom_event', 'num_users',\n 'total_custom_events', 'pct_users_completing_custom_event', 'custom_event_per_user', 'avg_days_between_events', 'avg_custom_event_per_day']].set_index(['segment_name', 'segment_id', 'channel'])\n", "_____no_output_____" ], [ "segment_custom_event_stats_by_segment_and_channel_combo_temp = segment_custom_event_stats_by_engagement_temp.groupby(['segment_name', 'segment_id', 'channel_combo', 'custom_event_name']).agg(num_users_completing_custom_event=(\n 'braze_id', 'nunique'), total_custom_events=('custom_event_count', 'sum'), avg_days_between_events=('days_between_events', 'mean'), avg_custom_event_per_day=('custom_event_per_day', 'mean'))\n\nsegment_custom_event_stats_by_segment_and_channel_combo_temp = segment_custom_event_stats_by_segment_and_channel_combo_temp.reset_index()\nsegment_custom_event_stats_by_segment_and_channel_combo = pd.merge(segment_custom_event_stats_by_segment_and_channel_combo_temp, users_per_channel_combo_df,\n how='left',\n left_on=[\n 'segment_name', 'segment_id','channel_combo'],\n right_on=['segment_name', 'segment_id','channel_combo'])\nsegment_custom_event_stats_by_segment_and_channel_combo['custom_event_per_user'] = segment_custom_event_stats_by_segment_and_channel_combo.total_custom_events / \\\n segment_custom_event_stats_by_segment_and_channel_combo.num_users\nsegment_custom_event_stats_by_segment_and_channel_combo['pct_users_completing_custom_event'] = segment_custom_event_stats_by_segment_and_channel_combo.num_users_completing_custom_event / \\\n segment_custom_event_stats_by_segment_and_channel_combo.num_users\n\nsegment_custom_event_stats_by_segment_and_channel_combo = segment_custom_event_stats_by_segment_and_channel_combo[['segment_name', 'segment_id', 'channel_combo', 'custom_event_name','num_users_completing_custom_event', 'num_users',\n 'total_custom_events', 'pct_users_completing_custom_event', 'custom_event_per_user', 'avg_days_between_events', 'avg_custom_event_per_day']].set_index(['segment_name', 'segment_id', 'channel_combo'])\n", "_____no_output_____" ], [ "segment_custom_event_stats_by_segment_and_campaign_tags_df = segment_custom_event_stats_by_engagement_temp.explode(\n 'tags')\nsegment_custom_event_stats_by_segment_and_campaign_tags_temp = segment_custom_event_stats_by_segment_and_campaign_tags_df.groupby(['segment_name', 'segment_id', 'tags', 'custom_event_name']).agg(num_users_completing_custom_event=(\n 'braze_id', 'nunique'), total_custom_events=('custom_event_count', 'sum'), avg_days_between_events=('days_between_events', 'mean'), avg_custom_event_per_day=('custom_event_per_day', 'mean'))\n\nsegment_custom_event_stats_by_segment_and_campaign_tags_temp = segment_custom_event_stats_by_segment_and_campaign_tags_temp.reset_index()\nsegment_custom_event_stats_by_segment_and_campaign_tags = pd.merge(segment_custom_event_stats_by_segment_and_campaign_tags_temp, users_per_campaign_tags_df,\n how='left',\n left_on=[\n 'segment_name', 'segment_id','tags'],\n right_on=['segment_name', 'segment_id','tags'])\nsegment_custom_event_stats_by_segment_and_campaign_tags['custom_event_per_user'] = segment_custom_event_stats_by_segment_and_campaign_tags.total_custom_events / \\\n segment_custom_event_stats_by_segment_and_campaign_tags.num_users\nsegment_custom_event_stats_by_segment_and_campaign_tags['pct_users_completing_custom_event'] = segment_custom_event_stats_by_segment_and_campaign_tags.num_users_completing_custom_event / \\\n segment_custom_event_stats_by_segment_and_campaign_tags.num_users\n\nsegment_custom_event_stats_by_segment_and_campaign_tags = segment_custom_event_stats_by_segment_and_campaign_tags[[\n 'segment_name', 'segment_id', 'tags', 'custom_event_name','num_users_completing_custom_event', 'num_users', 'total_custom_events', 'pct_users_completing_custom_event', 'custom_event_per_user', 'avg_days_between_events', 'avg_custom_event_per_day']].set_index(['segment_name', 'segment_id', 'tags'])\n", "_____no_output_____" ] ], [ [ "## Custom Events Used in Campaigns\n\nThe codeblock below will return all custom events that used in campaigns received by the selected segment.", "_____no_output_____" ] ], [ [ "campaign_details_custom_event_temp = campaign_details_df[[\n 'campaign_id', 'conversion_behaviors']]\ncampaign_details_custom_event_temp = campaign_details_custom_event_temp.dropna(\n subset=['conversion_behaviors'])\ncampaign_details_custom_event_temp = campaign_details_custom_event_temp.explode(\n 'conversion_behaviors')\n\ncampaign_details_custom_event_temp['custom_event_conversion_behavior'] = list(map(\n get_campaign_custom_event, campaign_details_custom_event_temp['conversion_behaviors']))\ncampaign_details_custom_event_temp = campaign_details_custom_event_temp.dropna(\n subset=['custom_event_conversion_behavior'])\ncampaign_details_custom_event = campaign_details_custom_event_temp[[\n 'campaign_id', 'custom_event_conversion_behavior']].drop_duplicates()\ncampaign_details_custom_event = campaign_details_custom_event.set_index(\n 'campaign_id')", "_____no_output_____" ] ], [ [ "# Segment Opt-In Rates\n\nThe codeblock below will return the opt-in rates for Push and Email for all users across the following platforms:\n\n- iOS\n- Android\n- Web", "_____no_output_____" ] ], [ [ "segment_opt_ins_temp = segment_opt_in_stats_pre_apps.explode('apps')\nsegment_opt_ins_temp = segment_opt_ins_temp.dropna(subset=['apps'])\nsegment_opt_ins_temp = segment_opt_ins_temp.explode('push_tokens')", "_____no_output_____" ], [ "segment_opt_ins_temp['notifications_enabled'] = list(\n map(get_notifications_enabled, segment_opt_ins_temp['push_tokens']))\nsegment_opt_ins_temp['token'] = list(\n map(get_token, segment_opt_ins_temp['push_tokens']))\nsegment_opt_ins_temp['push_token_platform'] = list(\n map(get_platform, segment_opt_ins_temp['push_tokens']))\nsegment_opt_ins_temp['app_platform'] = segment_opt_ins_temp['apps'].apply(\n lambda x: x.get('platform'))", "_____no_output_____" ], [ "segment_opt_ins_temp_android = segment_opt_ins_temp[segment_opt_ins_temp['app_platform'] == 'Android'].copy()\nsegment_opt_ins_temp_android['push_opted_in'] = segment_opt_ins_temp.apply(lambda x: True\n if x['notifications_enabled'] == True and x['token'] != \"None\" else False, axis=1)\nsegment_opt_ins_temp_android['email_opted_in'] = segment_opt_ins_temp.apply(lambda x: True\n if x['email_subscribe'] == 'opted_in' else False, axis=1)\n\n\nsegment_opt_ins_temp_ios = segment_opt_ins_temp[segment_opt_ins_temp['app_platform'] == 'iOS'].copy()\nsegment_opt_ins_temp_ios['push_opted_in'] = segment_opt_ins_temp.apply(lambda x: True\n if x['notifications_enabled'] == True and x['token'] != \"None\" else False, axis=1)\nsegment_opt_ins_temp_ios['email_opted_in'] = segment_opt_ins_temp.apply(lambda x: True\n if x['email_subscribe'] == 'opted_in' else False, axis=1)\n\nsegment_opt_ins_temp_web = segment_opt_ins_temp[segment_opt_ins_temp['app_platform'] == 'Web'].copy()\nsegment_opt_ins_temp_web['push_opted_in'] = segment_opt_ins_temp.apply(lambda x: True\n if x['notifications_enabled'] == True and x['token'] != \"None\" else False, axis=1)\nsegment_opt_ins_temp_web['email_opted_in'] = segment_opt_ins_temp.apply(lambda x: True\n if x['email_subscribe'] == 'opted_in' else False, axis=1)", "_____no_output_____" ], [ "segment_opt_ins_android_pre_agg = segment_opt_ins_temp_android.groupby(\n ['segment_id', 'segment_name', 'app_platform'])\nopt_ins_aggregator = {'push_opted_in': calc_engagement,\n 'email_opted_in': calc_engagement}\nsegment_opt_ins_android = segment_opt_ins_android_pre_agg.agg(\n opt_ins_aggregator)\nsegment_opt_ins_ios_pre_agg = segment_opt_ins_temp_ios.groupby(\n ['segment_id', 'segment_name', 'app_platform'])\nsegment_opt_ins_ios = segment_opt_ins_ios_pre_agg.agg(opt_ins_aggregator)\nsegment_opt_ins_web_pre_agg = segment_opt_ins_temp_web.groupby(\n ['segment_id', 'segment_name', 'app_platform'])\nsegment_opt_ins_web = segment_opt_ins_web_pre_agg.agg(opt_ins_aggregator)\nsegment_opt_ins = pd.concat(\n [segment_opt_ins_android, segment_opt_ins_ios, segment_opt_ins_web])", "_____no_output_____" ] ], [ [ "## Exporting Outputs to Excel\n\nPlease note that attempting to export dataframes that were not created will result in an error.", "_____no_output_____" ] ], [ [ "file_name = \"Segment Analytics {date}.xlsx\".format(date = datetime.now().date())\nwriter = pd.ExcelWriter(file_name, engine='xlsxwriter')\n\nengagement_by_segment.to_excel(writer, sheet_name='Eng. by Segment')\nengagement_by_segment_and_channel.to_excel(\n writer, sheet_name='Eng. by Channel')\nengagement_by_segment_and_channel_combo.to_excel(\n writer, sheet_name='Eng. by Channel Combo')\nengagement_by_segment_and_campaign_tag.to_excel(\n writer, sheet_name='Eng. by Campaign Tag')\n\nsegment_rolling_retention.to_excel(writer, sheet_name='Ret. by Segment')\nsegment_rolling_retention_by_engagement.to_excel(\n writer, sheet_name='Ret. by Channel')\nsegment_rolling_retention_by_channel_combo.to_excel(\n writer, sheet_name='Ret. by Channel Combo')\nsegment_rolling_retention_by_campaign_tag.to_excel(\n writer, sheet_name='Ret. by Campaign Tag')\n\n\nsegment_purchasing_stats.to_excel(writer, sheet_name='Purch. Stats by Segment')\nsegment_purchase_stats_by_channel.to_excel(\n writer, sheet_name='Purch. Stats by Channel')\nsegment_purchase_stats_by_channel_combo.to_excel(\n writer, sheet_name='Purch. Stats by Combo')\nsegment_purchase_stats_by_campaign_tag.to_excel(\n writer, sheet_name='Purch. Stats by Campaign Tag')\nsegment_purchase_retention.to_excel(writer, sheet_name='Purch. Ret by Segment')\nsegment_purchase_retention_by_channel.to_excel(\n writer, sheet_name='Purch. Ret by Channel')\nsegment_purchase_retention_by_channel_combo.to_excel(\n writer, sheet_name='Purch. Ret by Combo')\nsegment_purchase_retention_by_campaign_tags.to_excel(\n writer, sheet_name='Purch. Ret by Campaign Tag')\n\nsegment_sessions_stats.to_excel(writer, sheet_name='Sess. Stats by Segment')\nsegment_sessions_stats_by_channel.to_excel(\n writer, sheet_name='Sess. Stats by Channel')\nsegment_sessions_stats_by_channel_combo.to_excel(\n writer, sheet_name='Sess. Stats by Combo')\nsegment_sessions_stats_by_campaign_tag.to_excel(\n writer, sheet_name='Sess. Stats by Campaign Tag')\n\nsegment_custom_event_stats_by_segment.to_excel(\n writer, sheet_name='CE Stats by Segment')\nsegment_custom_event_stats_by_segment_and_channel.to_excel(\n writer, sheet_name='CE Stats by Channel')\nsegment_custom_event_stats_by_segment_and_channel_combo.to_excel(\n writer, sheet_name='CE Stats by Combo')\nsegment_custom_event_stats_by_segment_and_campaign_tags.to_excel(\n writer, sheet_name='CE Stats by Campaign Tag')\ncampaign_details_custom_event.to_excel(\n writer, sheet_name='CE Used in Campaigns')\n\nsegment_opt_ins.to_excel(writer, sheet_name='Opt-Ins by Segment')\n\nwriter.save()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
cb984dc2957d3a1984d8188193e944f79e95b47d
19,265
ipynb
Jupyter Notebook
Random Forest Algo.ipynb
chinmoyee-A/ML-Basics
11ee4aa4086008b0191c851783c767cc7b1e0fd9
[ "CC0-1.0" ]
1
2020-09-09T06:19:09.000Z
2020-09-09T06:19:09.000Z
Random Forest Algo.ipynb
chinmoyee-A/ML-Basics
11ee4aa4086008b0191c851783c767cc7b1e0fd9
[ "CC0-1.0" ]
null
null
null
Random Forest Algo.ipynb
chinmoyee-A/ML-Basics
11ee4aa4086008b0191c851783c767cc7b1e0fd9
[ "CC0-1.0" ]
null
null
null
71.884328
12,588
0.775863
[ [ [ "import numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt \nfrom sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "data = pd.read_csv(r'C:\\Users\\KIIT\\Desktop\\ML\\.pyb\\Position_Salaries.csv')", "_____no_output_____" ], [ "data.head(10)", "_____no_output_____" ], [ "real_x = data.iloc[:,1:2].values\nreal_y = data.iloc[:,2].values", "_____no_output_____" ], [ "reg = RandomForestRegressor(n_estimators=400,random_state=0)\nreg.fit(real_x,real_y)", "_____no_output_____" ], [ "y_pred = reg.predict([[6.5]])", "_____no_output_____" ], [ "y_pred", "_____no_output_____" ], [ "x_grid = np.arange(min(real_x),max(real_x), 0.01) #we are plotting here\nx_grid = x_grid.reshape((len(x_grid),1))\nplt.scatter(real_x,real_y,color='blue')\nplt.plot(x_grid,reg.predict(x_grid),color = 'green')\nplt.title('Decision Tree Regressor')\nplt.xlabel('pos level')\nplt.ylabel('salary')\nplt.show()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb98503392d5b4d0d1d90a771d6ddea96f625a7f
21,997
ipynb
Jupyter Notebook
Models/Transformer_Pytorch.ipynb
Raian-Rahman/Pytorch-Practice
a57ffdcbfc4fa2fb71aa7dd6c8b602b8960f94ea
[ "MIT" ]
null
null
null
Models/Transformer_Pytorch.ipynb
Raian-Rahman/Pytorch-Practice
a57ffdcbfc4fa2fb71aa7dd6c8b602b8960f94ea
[ "MIT" ]
null
null
null
Models/Transformer_Pytorch.ipynb
Raian-Rahman/Pytorch-Practice
a57ffdcbfc4fa2fb71aa7dd6c8b602b8960f94ea
[ "MIT" ]
null
null
null
60.597796
1,579
0.561531
[ [ [ "### Importing the libraries\n", "_____no_output_____" ] ], [ [ "import torch\nimport torch.nn as nn\nfrom torch import unsqueeze\n\nclass SelfAttention(nn.Module):\n def __init__(self, embed_size, heads):\n \"\"\"\n We are going to split the embed size between these heads. If we have 256 size embedding and 8 heads then we will have 32 embed size for each embedding\n \"\"\"\n super(SelfAttention,self).__init__()\n self.embed_size = embed_size\n self.heads = heads\n \n assert self.embed_size%self.heads == 0 , \"To make sure that embed size is properly divisible by heads\"\n \n self.head_dim = embed_size//heads\n\n \"\"\"\n Now we are defining the Query value and key vectors as Linear layers. \n We are setting bias = False, because we dont need that\n \"\"\"\n self.values = nn.Linear(self.head_dim, self.head_dim, bias = False)\n self.keys = nn.Linear(self.head_dim, self.head_dim, bias = False)\n self.queries = nn.Linear(self.head_dim, self.head_dim, bias = False)\n self.fc_out = nn.Linear(heads*self.head_dim, embed_size)\n\n def forward(self, values, keys, queries, mask):\n N = queries.shape[0]\n value_len, key_len, query_len = values.shape[1], keys.shape[1], queries.shape[1]\n\n \"\"\"\n split embedding into self.heads pieces\n \"\"\"\n values = values.reshape(N, value_len, self.heads, self.head_dim)\n keys = keys.reshape(N, key_len, self.heads, self.head_dim)\n queries = queries.reshape(N, query_len, self.heads, self.head_dim)\n\n values = self.values(values)\n keys = self.keys(keys)\n queries = self.queries(queries)\n\n #step 1: multiply query and key\n \n # queries shape : (N, query_len, heads, heads_dim)\n # keys shape: (N, key_len, heads, heads_dim)\n # energy shape: (N, heads,query_len = target source sentence, key_len = source sentence)\n \"\"\"\n As we have a batch matrix multiplier einsum is quite handy for it \n \"\"\"\n energy = torch.einsum(\"nqhd,nkhd->nhqk\", [queries,keys]) #it is used for matrix multiplication where we have several other dimensions \n \n\n if mask is not None:\n energy = energy.masked_fill(mask==0, float(\"-1e20\"))\n\n attention = torch.softmax(energy/(self.embed_size**(1/2)),dim=3)\n\n\n #attention shape: (N,heads, query_len, key_len)\n #value shape: (N, value_len, heads, heads_dim)\n #out shape: (N, Query_len, heads, heads_dim)\n out = torch.einsum(\"nhql,nlhd->nqhd\", [attention,values])\n\n\n #concatanation part \n out = out.reshape(N,query_len, self.heads*self.head_dim)\n\n out = self.fc_out(out)\n return out", "_____no_output_____" ] ], [ [ "### Transformer Block", "_____no_output_____" ] ], [ [ "class TransformerBlock(nn.Module):\n \"\"\"\n embedding -> multiheaded_attention -> add&norm -> feed forward -> add&norm \n \"\"\"\n def __init__(self, embed_size, heads, dropout, forward_expansion):\n super(TransformerBlock,self).__init__()\n self.attention = SelfAttention(embed_size=embed_size, heads = heads)\n self.norm1 = nn.LayerNorm(embed_size) #layernorm and batchnorm are almost similar...but layer norm has more computation\n self.norm2 = nn.LayerNorm(embed_size)\n\n self.feed_forward = nn.Sequential(\n nn.Linear(embed_size, forward_expansion*embed_size),\n nn.ReLU(),\n nn.Linear(forward_expansion*embed_size,embed_size)\n )\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, value, key, query, mask):\n attention = self.attention(value, key, query, mask)\n\n \"\"\"\n we needed a skip connection. query is for the skip connection\n \"\"\"\n x = self.dropout(self.norm1(attention + query))\n forward = self.feed_forward(x)\n out = self.dropout(self.norm2(forward+x))\n print(out)\n return out", "_____no_output_____" ], [ "class Encoder(nn.Module):\n def __init__(self, src_vocab_size, embed_size, num_layers, heads, device, forward_expansion, dropout, max_length):\n \"\"\"\n Encoder block takes a lot of parameters due to hyperparameter. The parameters are explained below:\n ---------------------------------------------------------------------------------------------------\n src_vocab_size = size of source vocabulary \n embed_size = dimension of embedding \n num_layers = number of transformer layer in encoder\n heads = number of heads in multiheads \n device = the device on which we want to train\n forward_expansion = the ratio by which we want to expand the size\n dropout = dropout probability\n max_length = max sentence length. \n maximum length of string to ensure positional embedding which is requeired for ensuring we have attention. \n What transformer does is we wnat to ensure that some sort of sequence is maintained even is the layer does not have any recurrent unit. It helps the transformer for ensuring parallelization\n \"\"\"\n super(Encoder,self).__init__()\n self.embed_size = embed_size\n self.device = device\n self.word_embedding = nn.Embedding(src_vocab_size, embed_size)\n self.positional_embedding = nn.Embedding(max_length, embed_size)\n\n\n self.layers = nn.ModuleList(\n [ \n TransformerBlock(embed_size=embed_size, heads=heads, dropout=dropout, forward_expansion=forward_expansion) for _ in range(num_layers)\n ] \n )\n self.dropout = nn.Dropout(dropout)\n\n \n def forward(self, x, mask):\n \n N, seq_length = x.shape\n\n positions = torch.arange(0,seq_length).expand(N,seq_length).to(self.device)\n\n out = self.dropout(self.word_embedding(x)+self.positional_embedding(positions))\n\n for layer in self.layers:\n out = layer(out,out,out,mask)\n \n return out", "_____no_output_____" ], [ "class DecoderBlock(nn.Module):\n def __init__(self, embed_size, heads, forward_expansion, dropout, device):\n super(DecoderBlock,self).__init__()\n self.attention = SelfAttention(embed_size, heads)\n self.norm = nn.LayerNorm(embed_size)\n self.transformer_block = TransformerBlock(embed_size, heads, dropout, forward_expansion)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, value, key, src_mask, trg_mask):\n \"\"\"\n Decoder block takes a lot of parameters. The parameters are explained below:\n ----------------------------------------------------------------------------\n x : input \n value, key : for self_attention\n src_mask: source mask. Although it is optional still we need it. For example, let we have more than one example in the input. In those cases src_mask is needed to make all the sentences equal also we dont need to to extra computations for the masks that are padded\n trg_mask: trg_mask is required to make sure that everything works fine\n \"\"\"\n attention = self.attention(x,x,x,trg_mask)\n query = self.dropout(self.norm(attention+x))\n out = self.transformer_block(value, key, query, src_mask)\n\n return out\n", "_____no_output_____" ], [ "class Decoder(nn.Module):\n def __init__(self, trg_vocab_size, embed_size, num_layers, heads, forward_expansion, dropout, device, max_length):\n super(Decoder,self).__init__()\n self.device = device\n self.word_embedding = nn.Embedding(trg_vocab_size, embed_size)\n self.positional_embedding = nn.Embedding(max_length,embed_size)\n\n self.layers = nn.ModuleList(\n [DecoderBlock(embed_size, heads, forward_expansion, dropout, device) for _ in range(num_layers)]\n )\n\n self.fc_out = nn.Linear(embed_size, trg_vocab_size)\n self.dropout = nn.Dropout(dropout)\n \n def forward(self, x, enc_out, src_mask, trg_mask):\n N, seq_length = x.shape\n positions = torch.arange(0,seq_length).expand(N,seq_length).to(self.device)\n x = self.dropout(self.word_embedding(x)+self.positional_embedding(positions))\n\n for layer in self.layers:\n x = layer(x,enc_out, enc_out, src_mask, trg_mask)\n\n out = self.fc_out(x)\n\n return out\n", "_____no_output_____" ], [ "class Transformer(nn.Module):\n def __init__(self, src_vocab_size, trg_vocab_size, src_pad_idx, trg_pad_idx, embed_size = 256, num_layers = 6, forward_expansion = 4, heads = 8, dropout = 0, device = \"cuda\", max_length = 100):\n super(Transformer,self).__init__()\n self.encoder = Encoder(src_vocab_size,embed_size, num_layers, heads, device, forward_expansion, dropout, max_length)\n self.decoder = Decoder(trg_vocab_size,embed_size, num_layers, heads, forward_expansion, dropout, device, max_length)\n self.src_pad_idx = src_pad_idx\n self.trg_pad_idx = trg_pad_idx\n\n self.device = device\n\n\n def make_src_mask(self,src):\n src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)\n return src_mask.to(self.device)\n\n def make_trg_mask(self,trg):\n N, trg_len = trg.shape\n\n trg_mask = torch.tril(torch.ones((trg_len,trg_len))).expand(N,1,trg_len, trg_len)\n\n return trg_mask.to(self.device)\n\n def forward(self, src, trg):\n src_mask = self.make_src_mask(src)\n trg_mask = self.make_trg_mask(trg)\n\n enc_src = self.encoder(src, src_mask)\n out = self.decoder(trg, enc_src, src_mask, trg_mask)\n\n return out\n\n\n", "_____no_output_____" ], [ "if __name__ == '__main__':\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n x = torch.tensor([[1,5,6,4,3,9,5,2,0],[1,8,7,3,4,5,6,7,2]]).to(device)\n trg = torch.tensor([[1,7,4,3,5,9,2,0],[1,8,7,3,4,5,6,2]]).to(device)\n\n src_pad_idx = 0\n\n trg_pad_idx = 0\n src_vocab_size = 10\n trg_vocab_Size = 10\n\n model = Transformer(src_vocab_size, trg_vocab_Size, src_pad_idx, trg_pad_idx).to(device)\n\n out = model(x,trg[:,:-1])\n print(out.shape)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb986c41c53d7463cc21f22a3e894fef01c03dea
19,464
ipynb
Jupyter Notebook
jupyter/BERTQA.ipynb
tspannhw/djl
fadf64824036d9c570c7a98a559f92989639f47a
[ "Apache-2.0" ]
1
2019-12-02T12:59:24.000Z
2019-12-02T12:59:24.000Z
jupyter/BERTQA.ipynb
vrakesh/djl
f59460ceddea68bcf947e24e7af88e927547f38d
[ "Apache-2.0" ]
null
null
null
jupyter/BERTQA.ipynb
vrakesh/djl
f59460ceddea68bcf947e24e7af88e927547f38d
[ "Apache-2.0" ]
null
null
null
39.40081
755
0.605734
[ [ [ "# DJL BERT Inference Demo\n\n## Introduction\n\nIn this tutorial, you walk through running inference using DJL on a [BERT](https://towardsdatascience.com/bert-explained-state-of-the-art-language-model-for-nlp-f8b21a9b6270) QA model trained with MXNet. \nYou can provide a question and a paragraph containing the answer to the model. The model is then able to find the best answer from the answer paragraph.\n\nExample:\n```text\nQ: When did BBC Japan start broadcasting?\n```\n\nAnswer paragraph:\n```text\nBBC Japan was a general entertainment channel, which operated between December 2004 and April 2006.\nIt ceased operations after its Japanese distributor folded.\n```\nAnd it picked the right answer:\n```text\nA: December 2004\n```\n", "_____no_output_____" ], [ "## Preparation\n\nThis tutorial requires the installation of Java Kernel. To install the Java Kernel, see the [README](https://github.com/awslabs/djl/blob/master/jupyter/README.md).", "_____no_output_____" ] ], [ [ "%maven ai.djl:api:0.2.0\n%maven ai.djl.mxnet:mxnet-engine:0.2.0\n%maven ai.djl:repository:0.2.0\n%maven ai.djl.mxnet:mxnet-model-zoo:0.2.0\n%maven org.slf4j:slf4j-api:1.7.26\n%maven org.slf4j:slf4j-simple:1.7.26\n%maven net.java.dev.jna:jna:5.3.0", "_____no_output_____" ] ], [ [ "### Include MXNet engine dependency\n\nThis tutorial uses MXNet engine as its backend. MXNet has different [build flavor](https://mxnet.apache.org/get_started?version=v1.5.1&platform=linux&language=python&environ=pip&processor=cpu) and it is platform specific.\nPlease read [here](https://github.com/awslabs/djl/blob/master/examples/README.md#engine-selection) for how to select MXNet engine flavor.", "_____no_output_____" ] ], [ [ "String classifier = System.getProperty(\"os.name\").startsWith(\"Mac\") ? \"osx-x86_64\" : \"linux-x86_64\";\n\n%maven ai.djl.mxnet:mxnet-native-mkl:jar:${classifier}:1.6.0-a", "_____no_output_____" ] ], [ [ "### Import java packages by running the following:", "_____no_output_____" ] ], [ [ "import java.io.*;\nimport java.nio.charset.*;\nimport java.nio.file.*;\nimport java.util.*;\nimport com.google.gson.*;\nimport com.google.gson.annotations.*;\nimport ai.djl.*;\nimport ai.djl.inference.*;\nimport ai.djl.metric.*;\nimport ai.djl.mxnet.zoo.*;\nimport ai.djl.mxnet.zoo.nlp.qa.*;\nimport ai.djl.repository.zoo.*;\nimport ai.djl.ndarray.*;\nimport ai.djl.ndarray.types.*;\nimport ai.djl.training.util.*;\nimport ai.djl.translate.*;\nimport ai.djl.util.*;\n", "_____no_output_____" ] ], [ [ "Now that all of the prerequisites are complete, start writing code to run inference with this example.\n\n## Load the model and input\n\nThe model requires three inputs:\n\n- word indices: The index of each word in a sentence\n- word types: The type index of the word. All Questions will be labelled with 0 and all Answers will be labelled with 1.\n- sequence length: You need to limit the length of the input. In this case, the length is 384\n- valid length: The actual length of the question and answer tokens\n\n**First, load the input**\n", "_____no_output_____" ] ], [ [ "var question = \"When did BBC Japan start broadcasting?\";\nvar resourceDocument = \"BBC Japan was a general entertainment Channel.\\n\" +\n \"Which operated between December 2004 and April 2006.\\n\" +\n \"It ceased operations after its Japanese distributor folded.\";\n\nQAInput input = new QAInput(question, resourceDocument, 384);", "_____no_output_____" ] ], [ [ "Then load the model and vocabulary. Create a variable `model` by using the `ModelZoo` as shown in the following code. ", "_____no_output_____" ] ], [ [ "Map<String, String> criteria = new ConcurrentHashMap<>();\ncriteria.put(\"backbone\", \"bert\");\ncriteria.put(\"dataset\", \"book_corpus_wiki_en_uncased\");\nZooModel<QAInput, String> model = MxModelZoo.BERT_QA.loadModel(criteria, new ProgressBar());", "_____no_output_____" ] ], [ [ "## Run inference\nOnce the model is loaded, you can call `Predictor` and run inference as follows", "_____no_output_____" ] ], [ [ "Predictor<QAInput, String> predictor = model.newPredictor();\nString answer = predictor.predict(input);\nanswer", "_____no_output_____" ] ], [ [ "Running inference on DJL is that easy. In the example, you use a model from the `ModelZoo`. However, you can also load the model on your own and use custom classes as the input and output. The process for that is illustrated in greater detail later in this tutorial. ", "_____no_output_____" ], [ "## Dive deep into Translator\n\nInference in deep learning is the process of predicting the output for a given input based on a pre-defined model. \nDJL abstracts away the whole process for ease of use. It can load the model, perform inference on the input, and provide \noutput. DJL also allows you to provide user-defined inputs. The workflow looks like the following:\n\n![image](https://github.com/awslabs/djl/blob/master/examples/docs/img/workFlow.png?raw=true)\n\nThe red block (\"Images\") in the workflow is the input that DJL expects from you. The green block (\"Images \nbounding box\") is the output that you expect. Because DJL does not know which input to expect and which output format that you prefer, DJL provides the `Translator` interface so you can define your own \ninput and output. \n\nThe `Translator` interface encompasses the two white blocks: Pre-processing and Post-processing. The pre-processing \ncomponent converts the user-defined input objects into an NDList, so that the `Predictor` in DJL can understand the \ninput and make its prediction. Similarly, the post-processing block receives an NDList as the output from the \n`Predictor`. The post-processing block allows you to convert the output from the `Predictor` to the desired output \nformat. \n\n### Pre-processing\n\nNow, you need to convert the sentences into tokens. You can use `BertDataParser.tokenizer` to convert questions and answers into tokens. Then, use `BertDataParser.formTokens` to create Bert-Formatted tokens. Once you have properly formatted tokens, use `parser.token2idx` to create the indices. \n\nThe following code block converts the question and answer defined earlier into bert-formatted tokens and creates word types for the tokens. ", "_____no_output_____" ] ], [ [ "// Create token lists for question and answer\nList<String> tokenQ = BertDataParser.tokenizer(question.toLowerCase());\nList<String> tokenA = BertDataParser.tokenizer(resourceDocument.toLowerCase());\nint validLength = tokenQ.size() + tokenA.size();\nSystem.out.println(\"Question Token: \" + tokenQ);\nSystem.out.println(\"Answer Token: \" + tokenA);\nSystem.out.println(\"Valid length: \" + validLength);", "_____no_output_____" ] ], [ [ "Normally, words/sentences are represented as indices instead of Strings for training. They typically work like a vector in a n-dimensional space. In this case, you need to map them into indices. The form tokens also pad the sentence to the required length.", "_____no_output_____" ] ], [ [ "// Create Bert-formatted tokens\nList<String> tokens = BertDataParser.formTokens(tokenQ, tokenA, 384);\n// Convert tokens into indices in the vocabulary\nBertDataParser parser = model.getArtifact(\"vocab.json\", BertDataParser::parse);\nList<Integer> indices = parser.token2idx(tokens);", "_____no_output_____" ] ], [ [ "Finally, the model needs to understand which part is the Question and which part is the Answer. Mask the tokens as follows:\n```\n[Question tokens...AnswerTokens...padding tokens] => [000000...11111....0000]\n```", "_____no_output_____" ] ], [ [ "// Get token types\nList<Float> tokenTypes = BertDataParser.getTokenTypes(tokenQ, tokenA, 384);", "_____no_output_____" ] ], [ [ "To properly convert them into `float[]` for `NDArray` creation, here is the helper function:", "_____no_output_____" ] ], [ [ "/**\n * Convert a List of Number to float array.\n *\n * @param list the list to be converted\n * @return float array\n */\npublic static float[] toFloatArray(List<? extends Number> list) {\n float[] ret = new float[list.size()];\n int idx = 0;\n for (Number n : list) {\n ret[idx++] = n.floatValue();\n }\n return ret;\n}\n\nfloat[] indicesFloat = toFloatArray(indices);\nfloat[] types = toFloatArray(tokenTypes);", "_____no_output_____" ] ], [ [ "Now that you have everything you need, you can create an NDList and populate all of the inputs you formatted earlier. You're done with pre-processing! \n\n#### Construct `Translator`\n\nYou need to do this processing within an implementation of the `Translator` interface. `Translator` is designed to do pre-processing and post-processing. You must define the input and output objects. It contains the following two override classes:\n- `public NDList processInput(TranslatorContext ctx, I)`\n- `public String processOutput(TranslatorContext ctx, O)`\n\nEvery translator takes in input and returns output in the form of generic objects. In this case, the translator takes input in the form of `QAInput` (I) and returns output as a `String` (O). `QAInput` is just an object that holds questions and answer; We have prepared the Input class for you.", "_____no_output_____" ], [ "Armed with the needed knowledge, you can write an implementation of the `Translator` interface. `BertTranslator` uses the code snippets explained previously to implement the `processInput`method. For more information, see [`NDManager`](https://javadoc.djl.ai/api/0.2.0/ai/djl/ndarray/NDManager.html).\n\n```\nmanager.create(Number[] data, Shape)\nmanager.create(Number[] data)\n```\n\nThe `Shape` for `data0` and `data1` is (num_of_batches, sequence_length). For `data2` is just 1.", "_____no_output_____" ] ], [ [ "\npublic class BertTranslator implements Translator<QAInput, String> {\n private BertDataParser parser;\n private List<String> tokens;\n private int seqLength;\n\n BertTranslator(BertDataParser parser) {\n this.parser = parser;\n this.seqLength = 384;\n }\n \n @Override\n public Batchifier getBatchifier() {\n return null;\n }\n\n @Override\n public NDList processInput(TranslatorContext ctx, QAInput input) throws IOException {\n BertDataParser parser = ctx.getModel().getArtifact(\"vocab.json\", BertDataParser::parse);\n // Pre-processing - tokenize sentence\n // Create token lists for question and answer\n List<String> tokenQ = BertDataParser.tokenizer(question.toLowerCase());\n List<String> tokenA = BertDataParser.tokenizer(resourceDocument.toLowerCase());\n \n // Calculate valid length (length(Question tokens) + length(resourceDocument tokens))\n var validLength = tokenQ.size() + tokenA.size();\n \n // Create Bert-formatted tokens\n tokens = BertDataParser.formTokens(tokenQ, tokenA, 384);\n \n if (tokens == null) {\n throw new IllegalStateException(\"tokens is not defined\");\n }\n \n // Convert tokens into indices in the vocabulary\n List<Integer> indices = parser.token2idx(tokens);\n // Get token types\n List<Float> tokenTypes = BertDataParser.getTokenTypes(tokenQ, tokenA, 384);\n\n NDManager manager = ctx.getNDManager();\n \n // Using the manager created, create NDArrays for the indices, types, and valid length.\n // in that order. The type of the NDArray should all be float\n NDArray indicesNd = manager.create(toFloatArray(indices), new Shape(1, 384));\n indicesNd.setName(\"data0\");\n NDArray typesNd = manager.create(toFloatArray(tokenTypes), new Shape(1, 384));\n typesNd.setName(\"data1\");\n NDArray validLengthNd = manager.create(new float[]{validLength});\n validLengthNd.setName(\"data2\");\n\n NDList list = new NDList(3);\n list.add(indicesNd);\n list.add(typesNd);\n list.add(validLengthNd);\n \n return list;\n }\n\n @Override\n public String processOutput(TranslatorContext ctx, NDList list) {\n NDArray array = list.singletonOrThrow();\n NDList output = array.split(2, 2);\n // Get the formatted logits result\n NDArray startLogits = output.get(0).reshape(new Shape(1, -1));\n NDArray endLogits = output.get(1).reshape(new Shape(1, -1));\n // Get Probability distribution\n NDArray startProb = startLogits.softmax(-1);\n NDArray endProb = endLogits.softmax(-1);\n int startIdx = (int) startProb.argMax(1).getFloat();\n int endIdx = (int) endProb.argMax(1).getFloat();\n return tokens.subList(startIdx, endIdx + 1).toString();\n }\n }", "_____no_output_____" ] ], [ [ "Congrats! You have created your first Translator! We have pre-filled the `processOutput()` that will process the `NDList` and return it in a desired format. `processInput()` and `processOutput()` offer the flexibility to get the predictions from the model in any format you desire. \n\n\nWith the Translator implemented, you need to bring up the predictor that uses your `Translator` to start making predictions. You can find the usage for `Predictor` in the [Predictor Javadoc](https://javadoc.djl.ai/api/0.2.0/ai/djl/inference/Predictor.html). Create a translator and use the `question` and `resourceDocument` provided previously.", "_____no_output_____" ] ], [ [ "String predictResult = null;\n\nQAInput input = new QAInput(question, resourceDocument, 384);\nBertTranslator translator = new BertTranslator(parser);\n\n// Create a Predictor and use it to predict the output\ntry (Predictor<QAInput, String> predictor = model.newPredictor(translator)) {\n predictResult = predictor.predict(input);\n}\n\nSystem.out.println(question);\nSystem.out.println(predictResult);", "_____no_output_____" ] ], [ [ "Based on the input, the following result will be shown:\n```\n[december, 2004]\n```\nThat's it! \n\nYou can try with more questions and answers. Here are the samples:\n\n**Answer Material**\n\nThe Normans (Norman: Nourmands; French: Normands; Latin: Normanni) were the people who in the 10th and 11th centuries gave their name to Normandy, a region in France. They were descended from Norse (\"Norman\" comes from \"Norseman\") raiders and pirates from Denmark, Iceland and Norway who, under their leader Rollo, agreed to swear fealty to King Charles III of West Francia. Through generations of assimilation and mixing with the native Frankish and Roman-Gaulish populations, their descendants would gradually merge with the Carolingian-based cultures of West Francia. The distinct cultural and ethnic identity of the Normans emerged initially in the first half of the 10th century, and it continued to evolve over the succeeding centuries.\n\n\n**Question**\n\nQ: When were the Normans in Normandy?\nA: 10th and 11th centuries\n\nQ: In what country is Normandy located?\nA: france", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb989278ad23e070009d9174016aac2ff5a7e3a5
31,016
ipynb
Jupyter Notebook
bubble/notebooks/Run-Bubble-Train.DQN.ipynb
cp105/ai611_project
2d0bbd8052a6425eefc7301e18ddf9ad4404a8fb
[ "Apache-2.0" ]
1
2020-05-18T03:18:11.000Z
2020-05-18T03:18:11.000Z
bubble/notebooks/Run-Bubble-Train.DQN.ipynb
cp105/ai611_project
2d0bbd8052a6425eefc7301e18ddf9ad4404a8fb
[ "Apache-2.0" ]
null
null
null
bubble/notebooks/Run-Bubble-Train.DQN.ipynb
cp105/ai611_project
2d0bbd8052a6425eefc7301e18ddf9ad4404a8fb
[ "Apache-2.0" ]
1
2020-11-02T08:46:32.000Z
2020-11-02T08:46:32.000Z
43.318436
1,666
0.602463
[ [ [ "# Run Train of Bubble-Agent (DQN)\n\n- Team: TToBoT\n- Member: { Sejun, Steve, Victor } @kaist\n\n## Objective\n\n- run training simultaneously w/ notebook\n- to compare the performance of traing \n\n\n## For Competition\n\n1. prepare the final trained IQN Model (checkpoint w/ 100 iteration)\n2. need to customize of env.step()\n - it should work only 1 live. (later, we can use 3 lives)\n - need to enumerate all stage (1~99) level w/ at least 10 (250,000 x 10) (8hr x 100 = 800/33d) iteration. (model should be same)\n - using origin step(). do train w/ random level, do loop (iteration) forever! (final training)\n3. in final competion, it will load the latest checkpoint for initial Model paramters.\n4. win the competition!!\n", "_____no_output_____" ] ], [ [ "import os, sys, gin\n\n# use parent folder as shared lib path..\nif \"../\" not in sys.path:\n sys.path.append(\"../\")\n\n# major libraries\nimport gin.tf\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom absl import flags\nimport numpy as np\nimport tensorflow as tf\n\n# show tf version.\nprint('! tf.ver = {}'.format(tf.__version__))", "! tf.ver = 1.15.2\n" ], [ "# Globals\n# BASE_PATH = './!experimental_results_bubble/run3'\n\n# let Dopamine .py files to be imported as modules in Jupiter notebook\nmodule_path = os.path.abspath(os.path.join('../dopamine'))\nif module_path not in sys.path:\n sys.path.append(module_path)\n print(module_path)", "/tf/dopamine\n" ], [ "# try to load `Dopamine` libraries\nimport bubble\nfrom dopamine.colab import utils as colab_utils", "_____no_output_____" ] ], [ [ "## Train Bubble w/ DQN", "_____no_output_____" ] ], [ [ "# @title Load the configuration for DQN.\n\n# DQN_PATH = os.path.join(BASE_PATH, 'rainbow')\n# Modified from dopamine/agents/dqn/config/dqn_cartpole.gin\n\n# CONFIG FOR DQN (see @bubble/dqn_nature.gin)\ngin_config = '''\n# run_experiment\n# python -um dopamine.discrete_domains.train --base_dir=/tmp/bubble --gin_files='bubble/dqn_nature.gin'\n# python -um dopamine.discrete_domains.train --base_dir=/tmp/bubble --gin_files='bubble/dqn_nature.gin' --gin_bindings='DQNAgent.tf_device=\"/cpu:*\"'\n \n\n# Hyperparameters used in Mnih et al. (2015).\nimport dopamine.discrete_domains.atari_lib\nimport dopamine.discrete_domains.run_experiment\nimport dopamine.agents.dqn.dqn_agent\nimport dopamine.replay_memory.circular_replay_buffer\nimport gin.tf.external_configurables\n\nimport bubble.retro_lib\nimport bubble.bubble_agent\n\nretro_lib.create_retro_environment.game_name = 'BubbleBobble'\nretro_lib.create_retro_environment.level = 1\nRunner.create_environment_fn = @retro_lib.create_retro_environment\ncreate_agent.agent_name = 'dqn'\nRetroPreprocessing.wall_offset = 0\n\nDQNAgent.gamma = 0.99\nDQNAgent.update_horizon = 1\nDQNAgent.min_replay_history = 50000 # agent steps\nDQNAgent.update_period = 4\nDQNAgent.target_update_period = 10000 # agent steps\nDQNAgent.epsilon_train = 0.1\nDQNAgent.epsilon_eval = 0.05\nDQNAgent.epsilon_decay_period = 1000000 # agent steps\nDQNAgent.tf_device = '/gpu:1' # use '/cpu:*' for non-GPU version\nDQNAgent.optimizer = @tf.train.RMSPropOptimizer()\n\ntf.train.RMSPropOptimizer.learning_rate = 0.00025\ntf.train.RMSPropOptimizer.decay = 0.95\ntf.train.RMSPropOptimizer.momentum = 0.0\ntf.train.RMSPropOptimizer.epsilon = 0.00001\ntf.train.RMSPropOptimizer.centered = True\n\n# atari_lib.create_atari_environment.game_name = 'Pong'\n# Deterministic ALE version used in the DQN Nature paper (Mnih et al., 2015).\n# atari_lib.create_atari_environment.sticky_actions = False\n# create_agent.agent_name = 'dqn'\nRunner.num_iterations = 200\nRunner.training_steps = 250000 # agent steps\nRunner.evaluation_steps = 125000 # agent steps\nRunner.max_steps_per_episode = 27000 # agent steps\n\nAtariPreprocessing.terminal_on_life_loss = True\n\nWrappedReplayBuffer.replay_capacity = 1000000\nWrappedReplayBuffer.batch_size = 32\n'''\n\n# parse this config\ngin.parse_config(gin_config, skip_unknown=False)\n", "pygame 1.9.6\nHello from the pygame community. https://www.pygame.org/contribute.html\n" ], [ "# Train DQN on Cartpole\n#dqn_runner = create_runner(DQN_PATH, schedule='continuous_train')\n#print('\\n\\n\\nStart Training...\\n\\n\\n')\n#dqn_runner.run_experiment()\n#print('\\n\\n\\nDone training\\n\\n\\n')\n#dqn4 (5/28) - reward := -0.01 + 1*K - 3*D + log(S,100) + 5*L\n#dqn5 (6/02) - same reward, but wall_offset = 0\n#dqn7 (6/04) - final reward\nDQN_PATH = '/tmp/bubble_dqn7'\n\n# import main run()\nfrom dopamine.discrete_domains import run_experiment\n\n# config main file\ngin_files = []\n# bindings.....\ngin_bindings = ['Runner.evaluation_steps=0']\n\n# # code from train.main()\n# tf.logging.set_verbosity(tf.logging.INFO)\n# run_experiment.load_gin_configs(gin_files, gin_bindings)\n# runner = run_experiment.create_runner(DQN_PATH)\n\n# # start run\n# runner.run_experiment()\n", "_____no_output_____" ] ], [ [ "## Thread for updating status", "_____no_output_____" ] ], [ [ "# Thread for update canvas\nimport threading, time\ndef get_ioloop():\n import IPython, zmq\n ipython = IPython.get_ipython()\n if ipython and hasattr(ipython, 'kernel'):\n return zmq.eventloop.ioloop.IOLoop.instance()\n# The IOloop is shared\nioloop = get_ioloop()\n# Main Thread\nclass MyThread(threading.Thread):\n '''Thread for drawing into canvas in live'''\n def __init__(self, sleep = 0.5, name = 'my'):\n super().__init__()\n self._quit = threading.Event()\n self.sleep = 0.5\n self.name = name\n self.start() \n def run(self):\n while not self._quit.isSet():\n def update_progress():\n if self._quit.isSet():\n return\n self.display()\n time.sleep(self.sleep)\n ioloop.add_callback(update_progress)\n print(\"! T[{}].Quit()\".format(self.name))\n def quit(self):\n self._quit.set()\n def display(self):\n pass\n\n# display basic \nfrom ipycanvas import Canvas\ncanvas = Canvas(width=640, height=480)\nif canvas:\n canvas.stroke_text('hello canvas! -------------', 0, 10)\n# show canvas in here.\ncanvas", "_____no_output_____" ], [ "# Helper for Canvas\n#canvas.fill_style = 'green'\n#canvas.fill_rect(25, 25, 100, 100)\n#canvas.clear_rect(45, 45, 60, 60)\ndef drawPlot2Canvas(fig = None, x=0, y=0):\n '''draw current plt to canvas at (x,y)'''\n fig = plt.gcf() if fig is None else fig\n plt.close() # not to update on screen.\n fig.canvas.draw() # draw fig to canvas\n arr = np.array(fig.canvas.renderer._renderer)\n print('! arr = {}'.format(np.shape(arr)))\n h, w, d = np.shape(arr)\n print('! w,h,d = {}'.format(w))\n cv = Canvas(width=w, height=h)\n cv.put_image_data(arr, 0, 0)\n cv.stroke_rect(x, y, x+w-1, y+h-1)\n canvas.clear_rect(x,y,x+w,y+h)\n canvas.draw_image(cv, x, y)\ndef drawText2Canvas(txt='msg!', x=10, y=10):\n w,h,o = 200,10,10\n #canvas.fill_style = 'green'\n #canvas.fill_rect(x, y-o, x+w, y+h-o)\n canvas.clear_rect(x, y-o, x+w, y+h-o)\n canvas.stroke_text(txt, x, y)\n# draw plot....\nfig = plt.figure(1)\nplt.plot([[1,3],[3,3],[7,1]])\n# draw plot-to-canvas\ndrawPlot2Canvas(fig, x=0)\ndrawText2Canvas('hello world')", "! arr = (288, 432, 4)\n! w,h,d = 432\n" ], [ "#drawText2Canvas('......................')", "_____no_output_____" ] ], [ [ "### support Multi-Processing", "_____no_output_____" ] ], [ [ "from multiprocessing import Process, Queue\n# process list\nproc_list = []\nproc_queue = None\n\n# train function\ndef processTrain(name = 'train', Q = None):\n global gin_files, gin_bindings, DQN_PATH\n from dopamine.discrete_domains import run_experiment\n Q.put('init!') if Q else None\n tf.logging.set_verbosity(tf.logging.INFO)\n run_experiment.load_gin_configs(gin_files, gin_bindings)\n runner = run_experiment.create_runner(DQN_PATH)\n # access to env\n env = runner._environment\n o = env.reset()\n Q.put('! o({}) = {}'.format(type(o), o[0:10,0,]))\n Q.put('start!') if Q else None\n runner.run_experiment()\n Q.put('! P[{}].stop()'.format(name))\n\n# train thread\ndef startProcessTrain(target = None):\n global proc_queue, proc_list\n target = target if target is not None else processTrain\n proc_queue = Queue() if proc_queue is None else proc_queue\n proc = Process(target = target, args = ('T0', proc_queue))\n proc_list.append(proc)\n proc.start()\n return proc\n\n# stop(or kill) processes\ndef stopProcess():\n global proc_list\n for proc in proc_list:\n proc.terminate()\n proc.join()", "_____no_output_____" ], [ "# trainer = startProcessTrain()", "_____no_output_____" ], [ "# stop\n# stopProcess()\n# show process\n# !ps -ax | grep python\n# proc_queue", "_____no_output_____" ] ], [ [ "### MyTrainer and MyThread", "_____no_output_____" ] ], [ [ "from dopamine.discrete_domains import run_experiment\n# MyRunner for Train\n# - report every episode status.\nclass MyRunner(run_experiment.Runner):\n def __init__(self, base_dir, create_agent_fn):\n '''initialize runner'''\n super(MyRunner, self).__init__(base_dir, create_agent_fn)\n self._load_logger()\n def _run_one_episode(self):\n '''override to post episode status'''\n global proc_queue\n episode_length, episode_return = super(MyRunner, self)._run_one_episode()\n data = {'episode':{'length': episode_length, 'return': episode_return }}\n #proc_queue.put('! epsode[len,ret] = {},{}'.format(episode_length, episode_return))\n proc_queue.put(data)\n return episode_length, episode_return\n def _load_logger(self):\n '''load logger to save into file'''\n import logging, os\n # get TF logger\n log = logging.getLogger('tensorflow')\n log.setLevel(logging.DEBUG) \n # create file handler which logs even debug messages\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n fh = logging.FileHandler(os.path.join(DQN_PATH, 'tensorflow.log'))\n fh.setLevel(logging.INFO)\n fh.setFormatter(formatter)\n log.addHandler(fh)\n\n\n#! start runner\ndef startMyRunner(name = 'train', Q = None):\n global gin_files, gin_bindings, DQN_PATH\n from dopamine.discrete_domains import run_experiment\n Q.put('! start: my-runner') if Q else None\n tf.logging.set_verbosity(tf.logging.INFO)\n run_experiment.load_gin_configs(gin_files, gin_bindings)\n runner = MyRunner(DQN_PATH, run_experiment.create_agent)\n runner.run_experiment()\n Q.put('! P[{}].stop()'.format(name)) if Q else None\n#! start process of runner\nstartProcessTrain(target = startMyRunner)", "_____no_output_____" ] ], [ [ "#### Train Results (01/Jun/2020) \n```pre\nINFO:tensorflow:Starting iteration 87\nINFO:tensorflow:Average undiscounted return per training episode: 19.29\nINFO:tensorflow:Average training steps per second: 98.36\nINFO:tensorflow:Starting iteration 88\nINFO:tensorflow:Average undiscounted return per training episode: 17.34\nINFO:tensorflow:Starting iteration 89\nINFO:tensorflow:Average undiscounted return per training episode: 18.19\nINFO:tensorflow:Starting iteration 90\nINFO:tensorflow:Average undiscounted return per training episode: 16.46\nINFO:tensorflow:Starting iteration 91\nINFO:tensorflow:Average undiscounted return per training episode: 18.53\nINFO:tensorflow:Starting iteration 92\nINFO:tensorflow:Average undiscounted return per training episode: 18.22\nINFO:tensorflow:Starting iteration 99\nINFO:tensorflow:Average undiscounted return per training episode: 17.893\nINFO:tensorflow:Starting iteration 100\nINFO:tensorflow:Average undiscounted return per training episode: 18.24\nINFO:tensorflow:Starting iteration 101\nINFO:tensorflow:Average undiscounted return per training episode: 19.01\nINFO:tensorflow:Starting iteration 102\nINFO:tensorflow:Average undiscounted return per training episode: 19.94\nINFO:tensorflow:Starting iteration 103\nINFO:tensorflow:Average undiscounted return per training episode: 17.44\nINFO:tensorflow:Starting iteration 104\nINFO:tensorflow:Average undiscounted return per training episode: 17.876\nINFO:tensorflow:Starting iteration 105\nINFO:tensorflow:Average undiscounted return per training episode: 17.42\nINFO:tensorflow:Starting iteration 106\nINFO:tensorflow:Average undiscounted return per training episode: 17.595\nINFO:tensorflow:Starting iteration 107\nINFO:tensorflow:Average undiscounted return per training episode: 17.779\n```", "_____no_output_____" ] ], [ [ "# MyThread for status display\nclass MyTrainStatus(MyThread):\n def __init__(self):\n super().__init__(name='status')\n self.episodes = np.array([[0,0]])\n print('! MyTrainStatus({})'.format(self.name))\n def display(self):\n global canvas, proc_queue, plt\n episodes = []\n # pop all queue...\n while not proc_queue.empty():\n msg = proc_queue.get()\n if msg and 'episode' in msg:\n E = msg['episode']\n episodes.append([E['length'], E['return']])\n # print('>> episodes = {}'.format(episodes))\n # draw plot if len > 0\n if len(episodes) > 0:\n arr = np.array(episodes)\n print('>> arr = {}'.format(arr))\n # draw plot...\n if 1>0:\n self.episodes = np.vstack((self.episodes, arr))\n #print('>> self.episodes = {}'.format(self.episodes)) \n #fig = plt.figure(1)\n #plt.plot(self.episodes)\n fig, ax1 = plt.subplots()\n ax2 = ax1.twinx()\n ax1.plot(self.episodes[:,0], 'g-')\n ax2.plot(self.episodes[:,1], 'b-')\n ax1.set_xlabel('episode count')\n ax1.set_ylabel('length', color='g')\n ax2.set_ylabel('return', color='b')\n drawPlot2Canvas(fig)\n \n#! start thread for status\ntstatus = MyTrainStatus()", "! MyTrainStatus(status)\n" ], [ "episode_length, episode_return = 1,3\nmsg = {'episode':{'length': episode_length, 'return': episode_return }}\nproc_queue.put(msg)\nprint('> msg.org = {}'.format(msg))", "_____no_output_____" ], [ "# stop - thread of status\ntstatus.quit() if tstatus else None", "_____no_output_____" ], [ "# stop - process of train\nstopProcess()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
cb989a472e560d75baa16e3e5fcd7f5c149f6306
18,558
ipynb
Jupyter Notebook
Bitcoin using Tweets 1.ipynb
smit2k14/Bitcoin-Analysis
9a8dfa525152b359795539fc465b860de7a1e3e4
[ "MIT" ]
null
null
null
Bitcoin using Tweets 1.ipynb
smit2k14/Bitcoin-Analysis
9a8dfa525152b359795539fc465b860de7a1e3e4
[ "MIT" ]
null
null
null
Bitcoin using Tweets 1.ipynb
smit2k14/Bitcoin-Analysis
9a8dfa525152b359795539fc465b860de7a1e3e4
[ "MIT" ]
null
null
null
32.9627
2,156
0.475428
[ [ [ "from keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, LSTM, Conv1D, MaxPooling1D, Dropout, Activation\nfrom keras.layers.embeddings import Embedding\n\nimport nltk\nimport time\nimport string\nimport numpy as np\nimport pandas as pd\nimport html,re\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.stem.porter import *\nfrom sklearn.model_selection import train_test_split\n\nimport plotly.offline as py\nimport plotly.graph_objs as go\npy.init_notebook_mode(connected=True)\n\nfrom google.colab import drive\ndrive.mount('/content/drive/')", "Using TensorFlow backend.\n" ], [ "nltk.download('stopwords')\nstopwords = nltk.corpus.stopwords.words(\"english\")\nstopwords.extend('rt')", "[nltk_data] Downloading package stopwords to /root/nltk_data...\n[nltk_data] Package stopwords is already up-to-date!\n" ], [ "#Sorting out the comments\n\nTwComments = pd.read_csv ('/content/drive/My Drive/Extras/tweetsbitcoin.csv',delimiter=\",\", index_col=None)\nTwComments = TwComments.dropna() \nTwComments=TwComments.drop_duplicates()\nTwComments=TwComments.sort_values(['date','time'], ascending=[True,True])", "/usr/local/lib/python3.6/dist-packages/IPython/core/interactiveshell.py:2718: DtypeWarning:\n\nColumns (4) have mixed types. Specify dtype option on import or set low_memory=False.\n\n" ], [ "i = TwComments[(TwComments.date == '2017-09-30')].index\nTwComments = TwComments.drop(i)\nTwComments[\"DateTime\"]=pd.to_datetime(TwComments['date'] + ' ' + TwComments['time'])\nTwComments[\"hour\"] = TwComments.DateTime.dt.hour\nTwComments[\"day\"] = TwComments.DateTime.dt.weekday_name\nTwComments[\"DateTime\"] = TwComments.DateTime.values.astype(np.int64) // 10 ** 9\nTwComments[\"TimeDiff\"]= TwComments[\"DateTime\"] - (TwComments[\"DateTime\"] % 86400)\nTwComments = TwComments[TwComments.TimeDiff > 0]", "_____no_output_____" ], [ "startTime = int(round(time.time()*60))\ntcomm=pd.DataFrame()\ngrouped_terms = TwComments.groupby([\"TimeDiff\"])", "_____no_output_____" ], [ "#This is done to combine the tweets in that hour into a list\ni = 0\ntweets = []\nfor name, group in grouped_terms:\n t = []\n for row, data in group.iterrows():\n t.append(data['tweet'])\n tweets.append(''.join(t))", "_____no_output_____" ], [ "Price = pd.read_csv ('/content/drive/My Drive/Extras/1Daybinanceprices.csv',delimiter=\"\\t\", index_col=None)", "_____no_output_____" ], [ "Price[\"Date\"] = pd.to_datetime(Price['ClTime'], unit = 'ms')\nPrice = Price.sort_values(['OpTime'], ascending=True)", "_____no_output_____" ], [ "def preprocess(tweets):\n t = html.unescape(tweets)\n z = lambda x: re.compile('\\#').sub('', re.compile('RT @').sub('@', x).strip())\n t = z(t)\n tweet = ' '.join(re.sub(\"(@[_A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\",\" \",t).split())\n return tweet\n\n#Tokenizing the tweets\n\ndef tokenizer(tweets):\n tweet = \" \".join(re.split(\"[^a-zA-Z.,!?]*\", tweets.lower())).strip()\n stemmer = PorterStemmer()\n tweets = [stemmer.stem(tweet.lower()) for tweet in tweets.split()]\n return tweets\n\ndef basic_tokenizer(tweet):\n #Same as tokenize but without the stemmer\n tweet = \" \".join(re.split(\"[^a-zA-Z.,!?]*\", tweet.lower())).strip()\n return tweet.split()", "_____no_output_____" ], [ "for i,t in enumerate(tweets):\n tweets[i] = tokenizer(preprocess(t))", "/usr/lib/python3.6/re.py:212: FutureWarning:\n\nsplit() requires a non-empty pattern match.\n\n" ], [ "vocabulary_size = 1000000\ntokenizer = Tokenizer(num_words= vocabulary_size)\ntokenizer.fit_on_texts(tweets)\n\nsequences = tokenizer.texts_to_sequences(tweets)\ndata = pad_sequences(sequences, maxlen=15000)", "_____no_output_____" ], [ "Price['PriceDiff'] = Price['Close']-Price['Open']", "_____no_output_____" ], [ "price_diff = []\nfor p in Price['PriceDiff']:\n if p >= 0:\n price_diff.append(1)\n else:\n price_diff.append(0)", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(data, price_diff, test_size=0.1, shuffle=False)", "_____no_output_____" ], [ "model = Sequential()\nmodel.add(Embedding(1000000, 100, input_length=15000))\nmodel.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])", "_____no_output_____" ], [ "model.fit(X_train, y_train, epochs=10)", "Epoch 1/10\n328/328 [==============================] - 485s 1s/step - loss: 0.0047 - acc: 0.9970\nEpoch 2/10\n328/328 [==============================] - 488s 1s/step - loss: 0.0037 - acc: 1.0000\nEpoch 3/10\n328/328 [==============================] - 480s 1s/step - loss: 0.0030 - acc: 1.0000\nEpoch 4/10\n328/328 [==============================] - 492s 1s/step - loss: 0.0022 - acc: 1.0000\nEpoch 5/10\n328/328 [==============================] - 489s 1s/step - loss: 0.0021 - acc: 1.0000\nEpoch 6/10\n328/328 [==============================] - 478s 1s/step - loss: 0.0029 - acc: 1.0000\nEpoch 7/10\n328/328 [==============================] - 480s 1s/step - loss: 0.0014 - acc: 1.0000\nEpoch 8/10\n328/328 [==============================] - 491s 1s/step - loss: 0.0013 - acc: 1.0000\nEpoch 9/10\n328/328 [==============================] - 490s 1s/step - loss: 0.0016 - acc: 1.0000\nEpoch 10/10\n328/328 [==============================] - 478s 1s/step - loss: 0.0011 - acc: 1.0000\n" ], [ "y = model.predict(X_test)", "_____no_output_____" ], [ "t = y\nfor i,z in enumerate(y):\n if z>0.4:\n t[i]=1\n else:\n t[i]=0", "_____no_output_____" ], [ "t = list(map(int, t))\nprint(t)\nsu = 0", "[1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0]\n" ], [ "print(23/37)", "0.6216216216216216\n" ], [ "print(y_test)", "[1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1]\n" ], [ " ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb98a47188d0c8bb04f56d8931699b690c49452c
702,096
ipynb
Jupyter Notebook
notebooks/demo.ipynb
Lukez-pi/mushi_lz
a60d9d44834de56b764940c83562451b2db9e245
[ "MIT" ]
null
null
null
notebooks/demo.ipynb
Lukez-pi/mushi_lz
a60d9d44834de56b764940c83562451b2db9e245
[ "MIT" ]
null
null
null
notebooks/demo.ipynb
Lukez-pi/mushi_lz
a60d9d44834de56b764940c83562451b2db9e245
[ "MIT" ]
null
null
null
1,126.959872
302,308
0.9581
[ [ [ "import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "`mushi` demo\n==\n\nThis notebook demonstrates how to use the Python module [`mushi`](https://github.com/harrispopgen/mushi/blob/master/mushi.py) for...\n\n## Inferring mutation spectrum history (and demography)\n\n>*the thing I came for:*\\\n*the wreck and not the story of the wreck*\\\n*the thing itself and not the myth*\n>\n> – Adrienne Rich, Diving Into the Wreck", "_____no_output_____" ], [ "### Canon ball metaphor stolen from Erick Matsen\n\n![](canon.svg)\n\nIn this metaphor, the pile of canonballs represents the mutation spectra we compute from SNPs, and the canon represents the mutational process.\nJust as the history of the canon's firing rate and direction explains where we find the piles, the history of the mutation process explains the SNP mutation spectra we find in modern genomes.", "_____no_output_____" ], [ "We will use `mushi` to infer history of the mutation process, which we can think of as the mutation rate function over time for each triplet mutation type.\nIn `mushi`, we use coalescent theory and optimization techniques to learn about this history from the $k$-SFS.", "_____no_output_____" ], [ "### $3$-SFS from the 1000 Genomes Finnish population (previously computed with [`mutyper ksfs`](https://github.com/harrispopgen/mutyper))\nLoad the $k$-SFS", "_____no_output_____" ] ], [ [ "from mushi.ksfs import kSFS\nksfs = kSFS(file='../example_data/3-SFS.EUR.FIN.tsv')", "_____no_output_____" ] ], [ [ "Plot the population variant spectrum (summing the $k$-SFS over sample frequency)", "_____no_output_____" ] ], [ [ "ksfs.as_df().sum(0).plot.bar(figsize=(17, 3))\nplt.xticks(family='monospace')\nplt.ylabel('number of variants')\nplt.show()", "_____no_output_____" ] ], [ [ "Plot the total SFS (summing the $k$-SFS over mutation types)", "_____no_output_____" ] ], [ [ "ksfs.plot_total()\nplt.yscale('log')", "_____no_output_____" ] ], [ [ "plot k-SFS composition as a scatter (a color for each mutation type)", "_____no_output_____" ] ], [ [ "ksfs.plot(clr=True)\nplt.show()", "_____no_output_____" ] ], [ [ "...and as a heatmap (a column for each mutation type)", "_____no_output_____" ] ], [ [ "g = ksfs.clustermap(figsize=(17, 7), col_cluster=False, xticklabels=True, cmap='RdBu_r', rasterized=True, robust=True)\ng.ax_heatmap.set_xticklabels(g.ax_heatmap.get_xmajorticklabels(), fontsize = 9, family='monospace')\nplt.show()", "_____no_output_____" ] ], [ [ "We will also need the masked genome size for each mutation type, which we've also previously computed with `mutyper targets`. This defines mutational target sizes.", "_____no_output_____" ] ], [ [ "masked_genome_size = pd.read_csv(f'../example_data/masked_size.tsv', sep='\\t', header=None, index_col=0)\nmasked_genome_size.index.name='mutation type'\n\nmasked_genome_size.plot.bar(figsize=(6, 3), legend=False)\nplt.xticks(family='monospace')\nplt.ylabel('mutational target size (sites)')\nplt.show()", "_____no_output_____" ] ], [ [ "With this we can compute the number of SNPs per target in each mutation type. Notice the enrichment of C>T transitions at CpG sites.", "_____no_output_____" ] ], [ [ "normalized_hit_rates = ksfs.as_df().sum(0).to_frame(name='variant count')\nnormalized_hit_rates['target size'] = [int(masked_genome_size.loc[context])\n for context, _ in normalized_hit_rates['variant count'].index.str.split('>')]\n\n(normalized_hit_rates['variant count'] /\n normalized_hit_rates['target size']).plot.bar(figsize=(17, 3), legend=False)\nplt.xticks(family='monospace')\nplt.ylabel('variants per target')\nplt.show()", "_____no_output_____" ] ], [ [ "To compute the total mutation rate in units of mutations per masked genome per generation, we multiply an estimate of the site-wise rate by the target size", "_____no_output_____" ] ], [ [ "μ0 = 1.25e-8 * masked_genome_size[1].sum()\nμ0", "_____no_output_____" ] ], [ [ "To render time in years rather than generations, we use an estimate of the generation time", "_____no_output_____" ] ], [ [ "t_gen = 29", "_____no_output_____" ] ], [ [ "### Joint coalescent inference of demography and mutation spectrum history\n\nTo access time-calibrated mutation spectrum histories, we first need to estimate the demographic history, since this defines the diffusion timescale of the coalescent process.", "_____no_output_____" ], [ "We first define a grid of times will represent history on, measured retrospectively from the present in units of Wright-Fisher generations.", "_____no_output_____" ] ], [ [ "t = np.logspace(np.log10(1), np.log10(200000), 200)", "_____no_output_____" ] ], [ [ "We now run the optimization, setting a few parameters to control how complicated we let the histories look.", "_____no_output_____" ] ], [ [ "ksfs.infer_history(t, μ0, alpha_tv=1e2, alpha_spline=3e3, alpha_ridge=1e-10,\n beta_rank=1e1, beta_tv=7e1, beta_spline=1e1, beta_ridge=1e-10,\n tol=1e-11)", "inferring η(t)\ninitial objective -8.183982e+07\niteration 433, objective -8.193e+07, relative change 3.134e-12 \nrelative change in objective function 3.1e-12 is within tolerance 1e-11 after 433 iterations\ninferring μ(t) conditioned on η(t)\ninitial objective -4.937803e+07\niteration 318, objective -4.939e+07, relative change 8.697e-12 \nrelative change in objective function 8.7e-12 is within tolerance 1e-11 after 318 iterations\n" ] ], [ [ "Hopefully you agree that was fast 🏎", "_____no_output_____" ], [ "We'll now check that the demography has a few features we expect in the Finnish population: the out-of-Africa bottleneck shared by all Eurasians, a later bottleneck associated with northward migration, and exponential population growth toward the present.", "_____no_output_____" ], [ "- The plot on the left will show fit to the SFS\n- The plot on the right will show the inferred haploid effective population size history.", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(10, 5))\nplt.subplot(121)\nksfs.plot_total()\nplt.yscale('log')\nplt.subplot(122)\nksfs.eta.plot(t_gen=t_gen)\nplt.xlim([1e3, 1e6])\nplt.show()", "_____no_output_____" ] ], [ [ "Now let's take a look at the inferred mutation spectrum history (MuSH).\n- The plot on the left will show the measured $k$-SFS composition (points) and the fit from `mushi` (lines)\n- The plot on the right will show the inferred MuSH", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(16, 5))\nplt.subplot(121) \nksfs.plot(clr=True) \nplt.subplot(122)\nksfs.μ.plot(t_gen=t_gen, clr=True, alpha=0.75)\nksfs.μ.plot(('TCC>TTC',), t_gen=t_gen, clr=True, lw=5)\nplt.xscale('log')\nplt.xlim([1e3, 1e6]) \nplt.show()", "_____no_output_____" ] ], [ [ "We can also plot the MuSH as a heatmap with the y axis representing time.", "_____no_output_____" ] ], [ [ "g = ksfs.μ.clustermap(t_gen=t_gen, figsize=(17, 7), col_cluster=True, xticklabels=True, robust=False, cmap='RdBu_r')\ng.ax_heatmap.set_xticklabels(g.ax_heatmap.get_xmajorticklabels(), fontsize = 9, family='monospace')\ng.ax_heatmap.set_ylim([172, 58])\nplt.show()", "_____no_output_____" ] ], [ [ "Now that you have a MuSH, you can start answering questions about mutation spectrum history!🤸‍", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb98abe5c3a58b2379b8ebbda870045e11585c2f
6,890
ipynb
Jupyter Notebook
ex_data_structures_BRADY.ipynb
bradynordstrom/ComputationalThinking_Gov_1
6d41807fed8dfa52fe76915e8cf7c32a54631432
[ "MIT" ]
null
null
null
ex_data_structures_BRADY.ipynb
bradynordstrom/ComputationalThinking_Gov_1
6d41807fed8dfa52fe76915e8cf7c32a54631432
[ "MIT" ]
null
null
null
ex_data_structures_BRADY.ipynb
bradynordstrom/ComputationalThinking_Gov_1
6d41807fed8dfa52fe76915e8cf7c32a54631432
[ "MIT" ]
null
null
null
24.519573
212
0.422932
[ [ [ "names=[\"Tomás\", \"Pauline\", \"Pablo\", \"Bjork\",\"Alan\",\"Juana\"]\nwoman=[False,True,False,False,False,True]\nages=[32,33,28,30,32,27]\ncountry=[\"Chile\", \"Senegal\", \"Spain\", \"Norway\",\"Peru\",\"Peru\"]\neducation=[\"Bach\", \"Bach\", \"Master\", \"PhD\",\"Bach\",\"Master\"]", "_____no_output_____" ], [ "import pandas as pd\ndata={'name':names,'woman':woman,'ages':ages,'country':country,'education':education}\ndata", "_____no_output_____" ], [ "friends=pd.DataFrame.from_dict(data)\nfriends", "_____no_output_____" ], [ "# Who is the oldest person in this group of friends\nfriends[friends.ages==max(friends.ages)].name", "_____no_output_____" ], [ "# How many people are 32?\nlen(friends[friends.ages==32])", "_____no_output_____" ], [ "# How many are not Peruvian? (use two different codes) <-Not sure if you wanted two unique codes or not but I only know how to use len to count so I used two lines of code and hope that is what you meant\nPeru1=[\"Peru\"]\nlen(friends[~friends.country.isin(Peru1)])", "_____no_output_____" ], [ "# Who is the person with the highest level of education?\nfriends[(friends.education==max(friends.education))].name", "_____no_output_____" ], [ "# what is the sex of the oldest person in the group\nfriends[friends.ages==max(friends.ages)].woman.name\n", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb98c32980ebb4ae768c7c2ec9c85d6b06dcda69
4,942
ipynb
Jupyter Notebook
neural_network.ipynb
freekgj/Recognizing_a_lie
27fa86d6e5bb94db829da8180b5050d84e1d8e1c
[ "MIT" ]
1
2021-11-19T13:56:49.000Z
2021-11-19T13:56:49.000Z
neural_network.ipynb
freekgj/Recognizing_a_lie
27fa86d6e5bb94db829da8180b5050d84e1d8e1c
[ "MIT" ]
null
null
null
neural_network.ipynb
freekgj/Recognizing_a_lie
27fa86d6e5bb94db829da8180b5050d84e1d8e1c
[ "MIT" ]
null
null
null
24.834171
101
0.561918
[ [ [ "import cv2\nimport glob\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\n\nfrom keras import optimizers\nfrom tensorflow.keras import layers, models\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator", "_____no_output_____" ], [ "# Get info of image\nimage_path = glob.glob('./Data/Picture_data/PT/' + '*jpg')[0]\nimage = cv2.imread(image_path)\nimg_height, img_weight, _ = image.shape\nbatch_size = 16", "_____no_output_____" ], [ "# Turn trainingsdataset into tensorflowdataset\ntrain_images = tf.keras.preprocessing.image_dataset_from_directory(\ndirectory='Data/Picture_data/', \nlabels='inferred', \nlabel_mode = 'categorical',\ncolor_mode='rgb',\nbatch_size=batch_size, \nimage_size = (img_height, img_weight),\nshuffle=True, \nvalidation_split=0.2,\nseed = 123,\nsubset=\"training\"\n)", "_____no_output_____" ], [ "# Turn validationdataset into tensorflowdataset\nvalidation_images = tf.keras.preprocessing.image_dataset_from_directory(\ndirectory='Data/Picture_data/', \nlabels='inferred', \nlabel_mode = 'categorical',\ncolor_mode='rgb',\nbatch_size=2, \nimage_size = (img_height, img_weight),\nshuffle=True, \nvalidation_split=0.1,\nseed = 123,\nsubset=\"validation\"\n)", "_____no_output_____" ], [ "# Normalize all data\ndef normalize(image, label):\n image = tf.cast(image/255, tf.float32)\n return image, label\n\ntrain_images = train_images.map(normalize)\nvalidation_images = validation_images.map(normalize)", "_____no_output_____" ], [ "# Define model\n\nmodel = models.Sequential()\nmodel.add(layers.Conv2D(filters=64, kernel_size=3, input_shape=(img_height, img_weight, 3)))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3,3), activation='relu'))\nmodel.add(layers.MaxPool2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3,3), activation='relu'))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(4, activation='softmax'))", "_____no_output_____" ], [ "# Summary of model\nmodel.summary()", "_____no_output_____" ], [ "model.compile(optimizer = 'adam',\n loss = 'categorical_crossentropy',\n metrics=['accuracy'])", "_____no_output_____" ], [ "# Fit the model\nhistory = model.fit(train_images, validation_data=validation_images, epochs=1)", "_____no_output_____" ], [ "# Presenting the results\nplt.plot(history.history['accuracy'], label='accuracy')\n#plt.plot(history.history['validation_accuracy'], label='validation_accuracy')\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.ylim([0, 1])\nplt.legend(loc='upper right')\ntest_loss, test_acc = model.evaluate(validation_images)", "_____no_output_____" ], [ "print('Accuracy', test_acc)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb98c96810a2ecb20151e1953cec8aefcbd36c24
976,341
ipynb
Jupyter Notebook
scratchpads/Q-uarantine.ipynb
paultirlisan/pyzx
e737fbb5bcca2f25060bc12c0b9c887798b1b6b0
[ "Apache-2.0" ]
219
2018-07-04T09:50:27.000Z
2022-03-30T11:39:53.000Z
scratchpads/Q-uarantine.ipynb
paultirlisan/pyzx
e737fbb5bcca2f25060bc12c0b9c887798b1b6b0
[ "Apache-2.0" ]
74
2018-07-17T13:56:49.000Z
2022-03-30T16:11:56.000Z
scratchpads/Q-uarantine.ipynb
paultirlisan/pyzx
e737fbb5bcca2f25060bc12c0b9c887798b1b6b0
[ "Apache-2.0" ]
73
2018-07-10T13:58:38.000Z
2022-02-15T14:59:45.000Z
185.334282
162,831
0.351015
[ [ [ "import sys, os; sys.path.append('..')\nimport pyzx as zx\nimport random\nimport math\nfrom fractions import Fraction\n%config InlineBackend.figure_format = 'svg'", "_____no_output_____" ], [ "c = zx.qasm(\"\"\"\nqreg q[3];\ncx q[0], q[1];\n\"\"\")\nzx.d3.draw(c)", "_____no_output_____" ], [ "c = zx.qasm(\"\"\"\nqreg q[2];\nrx(0.5*pi) q[1];\nt q[0];\ncx q[0], q[1];\ncx q[1], q[0];\ncx q[0], q[1];\ntdg q[1];\nrx(-0.5*pi) q[0];\n\"\"\")\nzx.d3.draw(c)", "_____no_output_____" ], [ "c.gates", "_____no_output_____" ], [ "g = c.to_graph()\ng", "_____no_output_____" ], [ "zx.d3.draw(g)", "_____no_output_____" ], [ "zx.simplify.spider_simp(g)\nzx.d3.draw(g)", "spider_simp: 1. 1. 2 iterations\n" ], [ "zx.full_reduce(g)\nzx.d3.draw(g)", "_____no_output_____" ], [ "g = zx.sqasm(\"\"\"\nqreg S[1];\nqreg q[3];\nt S[0];\ncx q[0], S[0];\ncx q[1], S[0];\ncx q[2], S[0];\n\"\"\")\nzx.d3.draw(g)", "_____no_output_____" ], [ "g = zx.sqasm(\"\"\"\nqreg S[2];\nqreg q[3];\nt S[0];\ncx q[0], S[0];\ncx q[1], S[0];\ncx q[2], S[0];\ntdg S[1];\ncx q[0], S[1];\ncx q[1], S[1];\ncx q[2], S[1];\n\"\"\")\nzx.d3.draw(g)", "_____no_output_____" ], [ "zx.clifford_simp(g)\nzx.d3.draw(g)", "_____no_output_____" ], [ "zx.full_reduce(g)\nzx.d3.draw(g)", "_____no_output_____" ], [ "g = zx.Circuit.load(\"test.qsim\").to_graph()\nzx.d3.draw(g)", "_____no_output_____" ], [ "g1 = g.copy()\ng1.map_qubits([\n (0,0), (1, 0), (2, 0), (3, 0),\n (0,1), (1, 1), (2, 1), (3, 1),\n (0,2), (1, 2), (2, 2), (3, 2),\n (0,3), (1, 3), (2, 3), (3, 3)\n])\nzx.d3.draw(g1)", "_____no_output_____" ], [ "zx.full_reduce(g1)\nzx.d3.draw(g1)", "_____no_output_____" ], [ "def t_optimiser(c):\n g = c.to_graph()\n g = zx.simplify.teleport_reduce(g)\n c_opt = zx.Circuit.from_graph(g).split_phase_gates().to_basic_gates()\n return zx.optimize.basic_optimization(c_opt).to_basic_gates()", "_____no_output_____" ], [ "c = zx.Circuit.load('../circuits/Fast/grover_5.qc')\nzx.d3.draw(c.to_graph())\nprint(zx.tcount(c))", "_____no_output_____" ], [ "c1 = t_optimiser(c)\nzx.d3.draw(c1.to_graph())\nprint(zx.tcount(c1))", "_____no_output_____" ], [ "c.verify_equality(c1)", "_____no_output_____" ], [ "c2 = c1.copy()\nc2.add_circuit(c.adjoint())\ng = c2.to_graph()\nzx.simplify.full_reduce(g)\nzx.d3.draw(g)", "_____no_output_____" ], [ "c1.gates[10] = zx.gates.T(6, adjoint=True)", "_____no_output_____" ], [ "c.verify_equality(c1)", "_____no_output_____" ], [ "c2 = c1.copy()\nc2.add_circuit(c.adjoint())\ng = c2.to_graph()\nzx.simplify.full_reduce(g)\nzx.d3.draw(g)", "_____no_output_____" ], [ "g = zx.Circuit.load('../circuits/Fast/hwb6.qc').to_graph()\nzx.d3.draw(g)\nprint(zx.tcount(g))", "_____no_output_____" ], [ "zx.simplify.full_reduce(g)\nzx.d3.draw(g)\nprint(zx.tcount(g))", "_____no_output_____" ], [ "g.apply_state(\"++---+-\")\ng.apply_effect(\"+011-1-\")\nzx.simplify.full_reduce(g)\nprint(zx.tcount(g))\nzx.drawing.arrange_scalar_diagram(g)\nzx.d3.draw(g)", "33\n" ], [ "def compute_decomp(g):\n if zx.tcount(g) >= 6:\n gsum = zx.simulate.replace_magic_states(g)\n gsum.reduce_scalar()\n terms = 0\n vals = 0\n for g1 in gsum.graphs:\n t,v = compute_decomp(g1)\n terms += t\n vals += v\n return (terms, vals)\n else:\n return (2 ** math.ceil(zx.tcount(g)/2), g.to_matrix())", "_____no_output_____" ], [ "math.ceil(2**(0.468 * zx.tcount(g)))", "_____no_output_____" ], [ "compute_decomp(g)", "_____no_output_____" ], [ "zx.simulate.calculate_path_sum(g)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb98c9b7e12ba30bbeb326a7449ade50a3d26d64
54,283
ipynb
Jupyter Notebook
0003-Aqua_0.7_operator_redesign.ipynb
dongreenberg/rfcs
2ae94fbdd42ef1a7701b3830194f5de84e3f2db4
[ "Apache-2.0" ]
null
null
null
0003-Aqua_0.7_operator_redesign.ipynb
dongreenberg/rfcs
2ae94fbdd42ef1a7701b3830194f5de84e3f2db4
[ "Apache-2.0" ]
null
null
null
0003-Aqua_0.7_operator_redesign.ipynb
dongreenberg/rfcs
2ae94fbdd42ef1a7701b3830194f5de84e3f2db4
[ "Apache-2.0" ]
1
2020-03-25T19:17:52.000Z
2020-03-25T19:17:52.000Z
43.4264
833
0.638156
[ [ [ "# Aqua 0.7 Operator Redesign\n_17-Jan-19, donny@_", "_____no_output_____" ], [ "| **Status** | **Accepted** |\n|:------------------|:----------------------------------------------|\n| **RFC #** | 0003 |\n| **Authors** | Donny Greenberg ([email protected]) |\n| **Deprecates** | NA |\n| **Submitted** | 2020-01-17 |\n| **Updated** | 2020-01-23 |\n\n## Purpose\nTo improve the transparency, ease of understanding, and programming power of Aqua’s operator logic and usage. Specifically, to reconcile with the Terra operator hierarchy and make the Aqua algorithmic flow more visible, explicit, and extensible.\n\nThroughout this doc, we rely on definitions of Operators roughly derived from the first chapter of John Watrous's \"The Theory of Quantum Information,\" with a focus on Square Operators over binary alphabets.", "_____no_output_____" ], [ "## Background: Motivation and Opportunities\nThe representation of matrices sparsely as linear combinations of Pauli operators is critical in many quantum algorithms. As such, the Operator classes are the workhorses of Aqua today (0.6.2), containing both the expectation value and evolution logic used by most of its algorithms.\n\nHowever, there are several opportunities for improvement:\n* **Basic Construction & Rapid Protoyping:** Aqua's Operators were initially built as procedural infrastructure rather than first-class programming primitives. Improvements to syntax and interfaces can enable the succinctness and power typical of mathematical Operator language\n* **Separation of Operator Math and Operator Algorithms**\n * Ease of understanding: The \"Operator algorithm\" logic - the ExpectationValue, Evolution, grouping, and symmetry analysis - is mostly spread across the 3000-line operator hierarchy, and is very branchy for different modes of execution\n * Ease of extension: Modification to the expectation value, evolution, grouping, and symmetry logic is a core use case (e.g. the [CVaR expectation](https://arxiv.org/abs/1907.04769), [linear combination evolution](https://arxiv.org/abs/1202.5822), or the many recent papers on [Pauli grouping](https://www.nature.com/articles/nature23879)), but not explicitly supported today\n* **Smooth Borders with Broader Qiskit**\n * Terra's `quantum_info` module also supports operator math, but is mostly matrix-based\n * **Remote Operator Algorithms:** Aer's fast ExpectationValue is not transparently or cleanly interchangeable with Aqua's local ExpectationValue today. The concept of an Algorithm not provided by Aqua is not yet defined to support this type of interchangeability cleanly", "_____no_output_____" ], [ "### Present State of Operators in Qiskit\n\nBoth Aqua and Terra include suites of modules to support Operator math, but do so very differently.\n\n* Aqua\n * Operators are focused primarily on the procedural requirements of algorithmic execution\n * Modules are very large and include hundreds of lines of procedural algorithm code\n * Interfaces were not initial built for end-user usage as a programming primitive, and are therefore wordy and difficult for users to understand\n * Syntax is not built for rapid prototyping and lacks syntactic power of mathematical Operator language\n * Primarily focused on Pauli-basis Operators\n * WeightedPauli - $2^n\\times 2^n$ Operators sparsely represented as complex combination of Paulis\n * MatrixOperator in the standard basis with $2^n\\times 2^n$ elements was initially built for performance improvements which are no longer relevant\n * Only dependency on Terra is through Pauli module, but this is largely symbolic (not an inexorable component)\n* Terra\n * Operator math is mostly built around QCVV (Quantum Characterization Verification & Validation) and open Quantum systems modelling use cases\n * Support for Channel, Choi, Superoperator, Kraus, etc.\n * Operators are largely matrix-based and therefore do not support the Pauli-basis operations necessary to non-exponentially execute quantum algorithms\n * Used by: \n * Aqua, 29 dependencies - Only Pauli module\n * Aer, 10 dependencies\n * Ignis, 2 dependencies\n* Ignis includes a `clifford.py` module somewhat specific to characterization needs.", "_____no_output_____" ], [ "### Aqua Present Usage (0.6.2)\n\nWithin Aqua, the primary uses of Operators are:\n* Qubit Observable (Hamiltonian, Cost Function, etc.) Construction\n * Used as sparse representations of large observables when constructing problems in Chemistry, Physics, Optimization, and Finance today\n * Also often a translation step between domain-specific problems and Quantum hardware-addressable equivalents\n* ExpectationValues\n * Primarily used in VQE (and derivatives QAOA, UCCSD, etc.) as a device-executable cost function of the ansatz state\n * Expectation values can only be taken of Operators in the Pauli basis on Quantum hardware\n * Also present in the \"Evolution of Hamiltonian\" algorithm, which is simply state evolution by one operator followed by an expectation value by another operator\n* State Evolution\n * Used in QPE (and derivatives HHL, iQPE, etc.) as a Quantum circuit-representable matrix exponentiation\n * Used in UCCSD and QAOA ansatze and EOH algorithm as representation of system dynamics to simulate time evolution of a system on quantum hardware\n * Evolution can only be taken by Operators in the Pauli basis on Quantum hardware", "_____no_output_____" ], [ "#### Other Important Aqua Operator Features\n\n* __Grouping__ - Grouping is a technique to reduce the number of circuit evaluations required to compute an ExpectationValue based on mutually commuting Paulis in the Operator decomposition.\n* __Tapering__ - Tapering is a technique to remove qubits from a Hamiltonian of interest by identifying Z2 symmetries in the Hamiltonian.\n* __Gradients__ - Many variational algorithms are improved dramatically when exact gradients of gate parameters with respect to the cost function observable are computed analytically rather than numerically. Aqua can compute these gradients and provide them to the optimizer directly.", "_____no_output_____" ], [ "### Aqua Present (0.6.2) Operator Object Model and Hierarchy\n\nAqua's Operators are organized as follows:\n* `qiskit.aqua.operators`\n * base_operator.py: `BaseOperator(ABC)`\n * matrix_operator.py: `MatrixOperator(BaseOperator)`\n * weighted_pauli_operator.py: `WeightedPauliOperator(BaseOperator)`, __and__ `Z2Symmetries`\n * tpb_grouped_weighted_pauli_operator.py: `TPBGroupedWeightedPauliOperator(WeightedPauliOperator)`, essentially a wrapper around `WeightedPauliOperator` for backward compatibility.\n * pauli_graph: `PauliGraph`\n * op_converter.py: `to_weighted_pauli_operator(operator)`, `to_matrix_operator(operator)`, `to_tpb_grouped_weighted_pauli_operator(operator, grouping_func, **kwargs)`\n * common.py: Utility functions, inc. `evolution_instruction`, `pauli_measurement(circuit, pauli, qr, cr, barrier=False)`, `measure_pauli_z(data, pauli)`, `covariance(data, pauli_1, pauli_2, avg_1, avg_2)`, etc.\n* `qiskit.chemistry` __- OUT OF SCOPE OF THIS DOC__\n * fermionic_operator.py: `FermionicOperator`, contains `jordan_wigner`, `parity`, `bravyi_kitaev` Fermion-to-qubit operator mappings.\n * bksf.py: Another mapping\n * `.core`\n * chemistry_operator.py: `ChemistryOperator(ABC)`\n * hamiltonian.py: `Hamiltonian(ChemistryOperator)`", "_____no_output_____" ], [ "### Terra Present (0.11.0) Operator Object Model and Hierarchy\n\nTerra's Operators are organized as follows:\n* `qiskit.quantum_info`\n * `.operators`\n * base_operator.py, pauli.py, operator.py (matrix operator), measures.py (`process_fidelity`), predicates.py (`is_unitary_matrix`, `is_hermitian_matrix`, `matrix_equal`, etc.), quaternion.py\n * `.channel`\n * quantum_channel.py (base), chi.py, choi.py, kraus.py, ptm.py, stinespring.py, superop.py, transformations.py\n * `.states`\n * quantum_state.py (base), densitymatrix.py, statevector.py, measures.py (`state_fidelity`), states.py (`basis_state`, `projector`, `purity`)\n * `.analysis`\n * average.py - ExpectationValue of diagonal operator\n * make_observable.py - Convert an observable in matrix form to dictionary form\n \n#### WeightedPauliOperator Not Available in Terra\n\nTerra does not contain any of the logic for working in the Pauli-basis implemented in Aqua today, and is not interoptable with Aqua's operator algorithms. As such, these utilities are only accessible to Aqua users.", "_____no_output_____" ], [ "### Operator Construction and Manipulation Present State\n\nThe center of Qiskit's algorithmic Operator logic is the WeightedPauli, being the only non-exponential scaling operator basis available today (the only other being the standard basis).\n\nQiskit supports several methods of WeightedPauli operator construction, none of which are self explanatory to a new user:", "_____no_output_____" ] ], [ [ "# from qiskit.quantum_info.operators import WeightedPauliOperator\nfrom qiskit.aqua.operators import WeightedPauliOperator, MatrixOperator, op_converter\nfrom qiskit.quantum_info.operators import Pauli", "_____no_output_____" ], [ "pauli_op = WeightedPauliOperator([\n [.5, Pauli.from_label('IX')],\n [.2, Pauli.from_label('ZY')],\n [.1j, Pauli.from_label('ZZ')],\n])", "_____no_output_____" ], [ "pauli_op = WeightedPauliOperator.from_list(\n paulis=[Pauli.from_label('IX'),\n Pauli.from_label('ZY'),\n Pauli.from_label('ZZ')],\n weights=[.5, .2, .1j])", "_____no_output_____" ], [ "mat = [[0. +0.1j, 0.5-0.2j, 0. +0.j , 0. +0.j ],\n [0.5+0.2j, 0. -0.1j, 0. +0.j , 0. +0.j ],\n [0. +0.j , 0. +0.j , 0. -0.1j, 0.5+0.2j],\n [0. +0.j , 0. +0.j , 0.5-0.2j, 0. +0.1j]]\nmat_op = MatrixOperator(mat)\npauli_op_from_mat = op_converter.to_weighted_pauli_operator(mat_op)\npauli_op == pauli_op_from_mat", "_____no_output_____" ] ], [ [ "Classical matrices can be exported for classical usage, again if the user already knows the Operator hierarchy somewhat well:", "_____no_output_____" ] ], [ [ "op_converter.to_matrix_operator(pauli_op).matrix.toarray()", "_____no_output_____" ] ], [ [ "Composition uses the `*` operator, while Terra's operators and Python use `@`.", "_____no_output_____" ] ], [ [ "3*pauli_op + .2j*pauli_op == (3+.2j)*pauli_op", "_____no_output_____" ], [ "print((pauli_op * pauli_op).print_details())", "II\t(0.28+0j)\nZZ\t0j\nZY\t0j\nIX\t0j\n\n" ] ], [ [ "### Aqua's ExpectationValue is Procedural and Inextensible\n\nAqua's ExpectationValue is not contained within a single function or module, but rather split into several functions without a clear interface or flow for user usage. This is due to structural constraints in Aqua which are no longer present, where the algorithm requiring the expectation value held the backend object and could run circuits, but the operator could not. We encourage the reader to scan lines [361-395 of Aqua 6.1 VQE’s](https://github.com/Qiskit/qiskit-aqua/blob/stable/qiskit/aqua/algorithms/adaptive/vqe/vqe.py#L361) ExpectationValue calculation to try to understand where and how the expectation is computed. We’ve been asked by numerous Aqua users to explain how this code works, and most do not attempt to use it on their own.\n\nThe following is the shortest possible way to write an expectation value in Aqua. Note that it fundamentally requires the user to understand a certain execution flow, the correct functions to use to do this, and how those functions work with their execution mode. This takes a few hours to understand at least, often days. Further, there are no hints that a change from the Z basis for each Pauli is being performed here, or matrix multiplication if the system chooses to do that instead.", "_____no_output_____" ] ], [ [ "from qiskit.aqua.operators import WeightedPauliOperator\nfrom qiskit.aqua.components.variational_forms import RY\nfrom qiskit.quantum_info import Pauli\nfrom qiskit import BasicAer, execute, QuantumCircuit\nfrom qiskit.circuit import Parameter\nqasm_sim = BasicAer.get_backend('qasm_simulator')", "_____no_output_____" ], [ "op = WeightedPauliOperator([\n [.5, Pauli.from_label('IX')],\n [.2j, Pauli.from_label('ZY')],\n])\ncircuit = QuantumCircuit(2)\ncircuit.h([0,1])\n\nevaluation_circuits = op.construct_evaluation_circuit(wave_function=circuit, statevector_mode=False)\nresult = execute(evaluation_circuits, qasm_sim).result()\nexpect, std = op.evaluate_with_result(result=result, statevector_mode=False)\nexpect", "_____no_output_____" ] ], [ [ "#### Alternative Expectation Values and the Aer Expectation Value\n\nBecause the ExpectationValue logic is embedded directly in the Operator, modifications to the ExpectationValue (e.g. CVaR) are impossible without editing the Operator directly with heavy branching or duplicating the entire Operator. This branching is already in effect within Aqua, automatically choosing between several execution modes mostly opaquely to the user. This is also the case for grouping, evolution, and symmetry logic.\n\nThe most dramatic example of this is the Aer-provided fast ExpectationValue simulation, which is so buried into the Operator it is effectively a super-superuser feature today. It was introduced quickly to achieve critical performance gains, but must be formalized to become a true first-class feature.\n* In Aqua, there is no simple way to specify which ExpectationValue algorithm the user wants, Aer or otherwise, and most users do not know that the Aer Expectation Exists\n* Aer's ExpectationValue is woven throughout the core operator code in a way that is branchy, inexorable, and difficult for users to understand and control\n* A new ExpectationValue, such as one provided by BasicAer or IBMQProvider, would simply introduce additional branches following the existing style", "_____no_output_____" ], [ "### Aqua's State Evolution is Inextensible and Difficult to Navigate\n\nEvolution is somewhat more succinct, but more difficult to navigate in code. The logic for evolution is distributed over several branchy static modules, and the evolution is pre-compiled as a CNOT-chain circuit, which is often not the ideal evaluation format (e.g. matrix multiplication if simulating, or Swap Networks).", "_____no_output_____" ] ], [ [ "from qiskit.circuit import Parameter\n\nop = WeightedPauliOperator([\n [.5, Pauli.from_label('IX')],\n [.2, Pauli.from_label('ZY')],\n])\ncircuit = QuantumCircuit(2)\n\nθ = Parameter('θ')\ninstr = op.evolve_instruction(evo_time=θ)\ncircuit.append(instr, [0,1])\nprint(circuit.draw(fold=4000))\nprint('Decomposed:')\ncircuit.decompose().draw(fold=4000)", " ┌─────────────────┐\nq_0: |0>┤0 ├\n │ Evolution^1(θ) │\nq_1: |0>┤1 ├\n └─────────────────┘\nDecomposed:\n" ] ], [ [ "## Requirements and Design\n\n1. Location and Ownership\n 1. Operators\n 1. Provider-specific Algorithms\n1. Object Model\n 1. Operator Definition - Primitives and Composites\n 1. Algorithms Definition - Primitives and Composite Operations\n 1. Parameterization and Eagerness\n1. Changes to Terra\n1. Changes to Aqua\n 1. Algorithms as composite Operations\n 1. Circuit Execution Algorithms\n 1. Expectation Algorithms\n 1. Evolution Algorithms\n 1. Other Primitive Algorithms", "_____no_output_____" ], [ "### Location and Ownership in Qiskit\n\nGiven the presence of Operator logic in both Aqua and Terra, there are several options for their placement within Qiskit. The primary considerations here relate to which master branch tests them, who owns what in the case of breakage, and who owns what in the case of design.\n\nIn addition, some remote Operator algorithms are being discussed, with one already in production - the Aer Expectation Value. The location of these algorithms is also an important question.", "_____no_output_____" ], [ "#### Operator Location Considerations\n\n* The Operator's centrality to Aqua means relying on an external library is a big overhead\n * Reliance on Terra has created frequent firedrills because behavior and interfaces change without integration testing\n * Firedrills are very difficult to troubleshoot because presently there is no integration testing between Terra and Aqua or design review to check whether a change will have downstream implications\n * Operator is so central to Aqua that it will require strong ownership by the Aqua team, constant maintenance and changes\n* Centralized Operator primitives can simplify interfaces across Qiskit\n * By accepting a common Operator format derived from Terra, methods in different areas of Qiskit can communicate in a consistent format without dependencies\n * For example, Aer's expectation value can take a circuit and an Operator, rather than depend on Aqua to define its interface, or rely on an informal interface (e.g. lists) which must be validated\n* Terra and Aqua's respective Operators can be delineated somewhat cleanly\n * Aqua and Terra's operators are seemingly used by completely different users for very different tasks (QA&A vs. QCVV or circuit analysis)\n * Terra's Operators are primarily matrix-based, while Aqua's are primarily composites of sparse representations (e.g. sums of Paulis or Circuits)\n * Though some are definitely shared, such as Pauli\n* Operators and Gates may need to be reconciled at some point\n * The X, Y, and Z Paulis are not different from the X, Y, and Z Gates\n * Both the gate and operator models include functionality for converting unitary matrices to circuit operations", "_____no_output_____" ], [ "#### Operator Location Options\n\n**A.** Move Aqua Operators into Terra, with:\n1. Joint ownership by Aqua team\n2. Aqua integration tests run on Terra's master branch (e.g. pulling in Aqua's master branch to execute tests). _Unit tests alone are not sufficient, as they are usually modified along with breaking changes to pass._\n3. Aligned release cycles so Aqua does not need to scramble to release when Terra does\n\n**Big-A.** Combine Aqua and Terra into a single repo and jointly own Operators\n\n**B.** Move all operators and states into Aqua, jointly owned by Terra team\n\n**C.** Leave Operators split between Aqua and Terra, with dependency on Terra for some primitives (QuantumCircuit, Pauli), with joint ownership and Aqua integration testing\n\n##### **Decision:** Following a discussion in Aqua Design Review, option **A** will be pursued for the remainder of this doc.", "_____no_output_____" ], [ "#### Provider-Specific Algorithm Location Options (Decision)\n\n**A.** Remote algorithms live in provider repo, and are tested and released at provider’s discretion\n\n**B.** Remote algorithms live in Aqua, with Aqua integration testing of functionality in provider repo\n\n**C.** Remote algorithms live in Aqua, with agreed upon interface to enforce consistency, and data interchange (e.g. an Operator format defined in Terra) tested in provider repo", "_____no_output_____" ], [ "### Object Model and Hierarchy \n\nWhat is an Operator _to a QA&A (Quantum Algorithms & Applications) programmer?_\n\nIgnoring the Physical definition of an Operator for a moment, as a _Quantum programming primitive,_ the Operator is:\n\n* __Recursively defined__ - Operators can be one of several _primitives_ - e.g. Matrix, Pauli, Clifford, QuantumCircuit, or an arbitrary combination of these primitives, e.g. Addition, Tensor, Composition. \n * It makes complete mathematical sense to add two primitives together, e.g. `(my_matrix+my_circuit)@my_pauli`. In classical programming, this would be like `5.7 + \"pickle\"`.\n* __Both code and data__ - The Operator encodes both data (e.g. a matrix for eigensolution or a wavefunction being prepared) and computation (measure my wavefunction in this basis). There is little distinction between the two in Quantum programming.\n* __Linear__ - The Operator is a recursively powerful construct, allowing algorithmic rearrangement not typically allowed in classical computation. \n * `op1(op2(A,B)) == op1(op2(A)), op2(B))` in many cases, e.g. Expectation(A+B). \n * The idea that `program(a*circuita + b*circuitb)` gives a mathematically valid result is highly surprising.\n* __Algorithmically ubiquitous__ - Every quantum algorithm uses Operators. Algorithms are nearly always defined in literature by Operator operations. This language is rigorous, accepted, and compact. \n* __Eagerly Computable__ - In most cases, Operator computation can be partially compiled as parameters become available, allowing improved performance, functional modularity (e.g. passing a ready-to-run algorithm), and inspection transparency. For example:\n * A circuit can be compiled to a Qobj with parameters missing, to be filled in later\n * The full list of circuits necessary to execute an algorithm can be prepared pending some operator coefficients\n * A full algorithm can be prepared and passed to a user pending the insertion of some subcomponent (a choice of ExpectationValue algorithm) or parameters", "_____no_output_____" ], [ "#### Operator Definition: Primitives and Combinations\n\nOperators can be _primitives_ or _combinations._ Primitives are base-level Operator representations which are not defined in terms of other primitives, but can be converted into one another with some computational work. Combinations are Operators which are constructed from functions of multiple primitives, such as sums and tensors. Combinations store the primitives from which they are constructed. Note that many Gates are present in other classes of primitives, and this must be reconciled as a follow-on to this redesign. The following should all be modules in the Operator hierarchy:\n\n* Primitives\n * Matrix\n * Pauli - X, Y, Z, I\n * QuantumCircuit, Gate\n * Clifford\n * Projector - Ze, O, P, M\n * Stabilizer\n * Graph State - Stored as a graph\n * QuantumCircuit - Implicitly starts from |0⟩⟨0|\n * Others (follow-on): ZX, MPS, Dirac Matrix, Gell-Mann matrix\n* Combinations\n * OpSum - Generalization of WeightedPauli. Stores a list of Operators of equal dimension and complex weights\n * OpComposition - Stores a list of Operators which are all of equal dimension\n * OpKron - Stores a list of Operators of any size\n * OpVec - Stores a list of Operators of any size\n * OpExp - Stores a single Operator, acting as a placeholder for some Evolution algorithm to replace later\n * OpCombo - custom, user-defined recombination function\n", "_____no_output_____" ] ], [ [ "from qiskit.aqua.operators.pauli import X, Y, Z, I\nop_new = .5*(I^X) + .2*(Z^Y) + .1j*(Z^Z)\nop_new == pauli_op", "_____no_output_____" ] ], [ [ "Note that to support the above, the existing Pauli in Terra would need to support Tensor, sum, and scalar multiplication which can return an OpSum and OpKron.\n\nThe following overload operations are also desirable:\n* Operator composition using `@` overload\n * __Decision:__ deprecate the `*` overload for composition?\n* Power (`**3`), kronpower (`^3`)", "_____no_output_____" ] ], [ [ "(pauli_op^2)**2 == (pauli_op^pauli_op)@(pauli_op^pauli_op)", "_____no_output_____" ], [ "from qiskit.aqua.ansatz import Ry\nfrom qiskit.aqua.operators.projectors import Ze, O, P\n\nansatz = Ry(qubits=2, depth=3) @ (P^(-.1*O + 3*Ze))\n # This is an OpSum of two circuits!", "_____no_output_____" ] ], [ [ "#### Algorithms Definition: Primitives and Composites\n\nOperations on Operators also can be described as primitives or combinations of such. Primitives are computations which can be performed directly on some available computation engine, such as Numpy or Quantum Hardware, while composites are constructed from piping primitives together. Algorithms accept only _specific primitives,_ so an algorithm taking a Pauli vs. one taking a matrix are fundamentally different, but are also defined over certain combinations of their input primitives. For example, a Change-of-Basis Expectation Value is defined to accept a Pauli and a Projector (or QuantumCircuit acting as one from Zero implicitly), but can also accept sums, tensors, and vectorizations of Paulis and Projectors. If an unsupported primitive, such as Matrix or OpComposition were passed in, an exception would be thrown.\n\n* Primitives\n * Classical sum, product, tensor, trace, etc.\n * Z-Basis QuantumCircuit measurement / Trace (traditional QASM backend)\n * Primitive Conversion - Pauli to matrix, matrix to Pauli, etc.\n * Evolution Conversion - Trotter, Suzuki, etc.\n * Pauli Sum, Composition, Tensor\n * Change of Basis - Pauli, Fourier\n * Optimizers\n * External functions, such as Drivers or imports\n* Composites\n * ExpectationValue\n * Existing Aqua Algorithms: VQE, QPE, HHL, etc.\n * Gradients\n \nOver time, we have found that it is easiest to describe the behavior of Algorithms in terms of the flow of Operators through various components and subroutines. This description is naturally recursive, and considerably easier to understand than the present presentation of algorithmic flow in Aqua.", "_____no_output_____" ], [ "To demonstrate this, consider the following VQE coded from scratch in this model:", "_____no_output_____" ] ], [ [ "ansatz = Ry(qubits=2, depth=3) @ (P^P)\n # Ansatz state = Ry(θ)|++⟩\nhamiltonian = 3*(I^Z) + .4j*(X^Z)\nexpectation = PauliExpectation(ansatz, hamiltonian, backend)\nprint(expectation.run({ansatz.params: np.zeroes(len(ansatz.params))})) \n # Print starting expectation\n\ngradient = ParamShiftGradient(expectation)\noptimizer = AQGD(initial_point=np.zeroes(len(ansatz.params)))\nmy_vqe = AQGD(cost_fn=expectation.run, grad_fn=gradient.run)\nmin_eig = my_vqe.run()", "_____no_output_____" ] ], [ [ "#### Parameterization and Eagerness\n\nOperators and algorithms can be _parameterized,_ or missing some key information in order to execute. For Operators these may be sum coefficients, evolution times, QuantumCircuit parameters, and more. For Algorithms these may be input operators, execution parameters, or instances of algorithms used in computation which cannot be inferred by default (e.g. backend on which to execute, optimizer, etc.).\n\n##### Eager Parameterization+Execution Interface Options:\n\nAn algorithm should execute as soon as it has filled the parameters necessary to do so. This is called **Eager Execution.** In a similar vein, OpSum can be seen as eagerly waiting for the contained operators to be summable, e.g. replaced with scalars by an expectation value. (**Decision**) Some interface options for eagerness:\n\n**Option A**: Algorithms should be **callable** with a parameter dictionary, triggering a breadth-first search to parameterize any sub-objects with the parameter dictionary. This may be too much hocus pocus and difficult for implementers of algorithms to understand. A user may want to parameterize without executing, so an `execute` parameter should be available in the parameterization function.", "_____no_output_____" ] ], [ [ "my_op = Parameter('t1')*(Z^Z) + .6*(X^I)\nmy_vqe = VQE(backend=Parameter('backend'), \n operator=my_op, \n ansatz=Ry(qubits=2, reps=3), \n optimizer=SLSQP(initial_point=Parameter('initial_rotations')))\nmy_vqe({'t1': .2j, 'backend': Aer.get_backend('qasm_simulator')}) \n # Didn't return anything yet\nrots = np.zeros(len(my_vqe.ansatz.params))\nmin_eig = my_vqe({'initial_rotations': rots})\n # Now a value is returned, and other execution information can be found inside the object", "_____no_output_____" ], [ " # Alternatively\nmy_vqe({'initial_rotations': rots}, execute=False)\nmin_eig = my_vqe()", "_____no_output_____" ] ], [ [ "**Option B:** Algorithms should have a `.run(param_dict)` method which accepts parameters and performs the breadth-first parameterization. The form factor of this would be similar to the above, but with `run()` instead of direct function calls. This has the benefit of some backward compatibility. \n\n**Option C:** Algorithms should support separate parameterization and execution functions. This is the most explicit, but is clunky in an eager execution regime, where execution is automatic if the algorithm is sufficiently parameterized.\n\nAll of an Algorithm or Operator's pending Parameters should be recursively returned by a `.params` function. _(Tentative)_ A `deepcopy` option should be available to return a deep copy of the algorithm with the desired parameterization, rather than parameterize the algorithm in-place (this is evaluated with `execute=False` by default).", "_____no_output_____" ], [ "##### Eager Partial Computation\n\nAqua should be **eager** in partial computation while some parameters necessary for execution are not yet available, to allow for inspection transparency and performance. For example, once backend information is available, circuits should be transpiled for the backend or otherwise prepared for execution. This can avoid many transpilations or preparations later if the circuits are duplicated for Operator composition, as in Change-of-Basis expectation values or gradients.\n\nThe choice of which partial computation to perform is left to the algorithm, so only worthwhile partial computations are performed. If parameters change, re-preparing the partial computation can be expensive, so a `lazy` parameter should be available in the callable function.", "_____no_output_____" ], [ "### Changes to Terra\n\nThe `quantum_info` directory should be organized as follows:\n\n * channel\n * ...\n * matrix.py **- Decision: Rename operator.py to matrix.py or matrix_op.py?**\n * pauli.py\n * clifford.py **- Decision: Use the Ignis's Clifford?**\n * projector.py\n * stabilizer.py\n * Composites\n * op_sum.py, op_composite.py, op_kron.py, op_vec.py, op_exp.py\n\nIn addition to the functionality detailed in [Object Model and Hierarchy](#Object-Model-and-Hierarchy) above, Terra should support the following for all of the above Non-matrix-based operators:\n* `to_matrix()` - Method to allow quick access to unscalable classical tools, e.g. numpy eigensolution\n* `to_quantum_circuits()` - returns a single or list of quantum circuits and coefficients representing the full Operator, including any distributive composition, tensors, etc.\n* Trace, Partial Trace, Determinant, Norms, Adjoints - Where possible, linear algebra should be easily accessible ", "_____no_output_____" ], [ "##### Follow-on: Terra Reconciliation Between Operators and Gates\n\nTerra's Operators and Gates are currently fully distinct from one another. The X, Y, Z, Clifford Gates, Evolution by a matrix-specified Unitary (UnitaryGate), and more are direct overlaps between the two, but not interoperable. At some point, Terra should address this difference to allow Operators to be inserted onto a circuit, maintain only a single set of primitive unitaries, allow Gates to be composed with Operators, etc.", "_____no_output_____" ], [ "### Changes to Aqua\n\nThe changes to Aqua are basically just to \n* deprecate the Operators after moving their logic into Terra, \n* change the Aqua algorithms to rely on the new Terra operators,\n* break up the Expectation, Evolution, circuit execution, and gradient code to be first-class algorithms users can extend and understand,\n* and change the exsting Aqua algorithms to rely on these new algorithms.", "_____no_output_____" ], [ "##### Change Algorithms to rely on Terra operators and new Operator algorithms\n\nIn particular, algorithms should be accessible with only Terra-defined inputs (meaning constructed using Terra alone) to provide a seamless experience between Terra and Aqua usage, and extensible interfaces. For example, a VQE should be runnable by passing only a parameterized QuantumCircuit and Terra-defined Operator, allowing a provider or collaborator to share a custom VQE without an unnecessary dependency on Aqua. In particular, this allows the Aer Expectation Value to be defined with the same interface as Aqua's Pauli Expectation, without a dependency on Aqua.", "_____no_output_____" ], [ "##### Circuit Execution Algorithms - **Decision: Name - CircuitExecution? QCExecute? QuantumMeasureZ? RunCircuit?**\n\nCircuit execution is a utility in Aqua today, mediated by the QuantumInstance, which most users do not understand, and growing increasingly branchy to accommodate more and more execution variants. Measurement error mitigation, noisy simulation setup, hardware API fault handling, and more all fall into the same execution flow in various branches. \n\nCircuit execution is an algorithm for sampling a circuit's expectation in exponentially many ${Z, I}^{\\otimes n}$ bases, but is not reflected an an algorithm today. It should be promoted to be a first-class algorithm to be more transparent and compartmental, wherein for example, code for simulation and code for execution on hardware can be kept distinct. A CircuitExecution Algorithm accepts a backend and interacts with it in some well-defined way - in way breaking up and organizing of the functionality of the QuantumInstance. Some examples of CircuitExecution algorithms are:\n1. QuantumHardware - An Execution algorithm tailored for execution on remote hardware, including fault handling, slicing to limit job sizes, etc. Can stack up a queue of circuits for batch execution, or accept a list of jobids to use as the first n results objects, allowing the user to reuse results from a terminated execution.\n1. IdealSimulator - Algorithm tailored for execution in ideal simulation.\n1. NoisySimulator - Utility for querying a Hardware backend's properties and providing a noisy simulator using Aer's \"noise config from device\" functionality.\n1. ErrorMitigatedExecutor - OUT OF SCOPE, BEING COVERED IN ANOTHER DOC.\n\nIf none is explicitly specified, Aqua should aggressively guess the preferred execution algorithm for the user given the backend and other execution parameters.", "_____no_output_____" ], [ "##### Expectation Algorithms\n\nAqua should support the following ExpectationValue algorithms. An `ExpectationBase` class should allow automatic selection of an evolution algorithm by default if none is specified - e.g. if the user has Aer installed, VQE will use the AerExpectation by default instead of QASM execution. Other possible expectation values include:\n1. PauliExpectation (Change-of-Basis)\n1. CVaRExpectation\n1. AerExpectation - relies on Aer's fast expectation feature\n1. MatrixExpectation\n1. (Tentative) BasicAerExpectation\n1. RichardsonExpectation - OUT OF SCOPE, BEING COVERED IN ANOTHER DOC.\n\n##### Grouping\n\nGrouping is an important feature within the PauliExpectation in Aqua today, but is not used by default, and has an interface which is not obvious. Grouping should be moved into the PauliExpectation, with a simple interface for the user to specify whether to group the Paulis, or how aggresively to do so. By default, the PauliExpectation should group Paulis as aggressively as is performant on the given execution backend.", "_____no_output_____" ], [ "##### Circuit Evolution Algorithms\n\nAnd similarly for Evolution, a variety of algorithms should be available for converting a OpExp composite operator into a sum, composition, etc. More specifically, circuit evolution algorithms take an OpExp placeholder and return operators which approximate the value of the exponentiation. For example, the PauliEvolution accepts a Pauli and returns a QuantumCircuit representing the unitary evolution of that Pauli. An `EvolutionBase` class should allow automatic selection of an evolution algorithm by default if none is specified.\n\n1. PauliEvolution (Change-of-Basis)\n1. SumEvolution\n 1. Trotter\n 1. Suzuki\n1. MatrixEvolution\n1. (Tentative) [LinCombEvolution](https://arxiv.org/abs/1202.5822)\n1. (Tentative) AerEvolution\n1. (Tentative) BasicAerEvolution\n\n##### Other algorithms to build out into first-class Algorithm groups\n1. Converters - convert lazily between Operator types\n1. Gradient\n1. Optimization", "_____no_output_____" ], [ "## Timeline and Gameplan\n\nStage 1: Implement new Operators in Terra with thorough unit and integration tests.\n\nStage 2: Implement Operator algorithms in Aqua, relying on Terra Operators\n\nStage 3: Migrate Aqua algorithms to rely on new Operator algorithms and new Terra Operators\n\nStage 4: Deprecate Present Aqua Operators (0.7 release)\n\nStage 5: Delete Present Aqua Operators (0.8 release)", "_____no_output_____" ], [ "## ⚰️⚰️⚰️⚰️⚰️⚰️ Graveyard ⚰️⚰️⚰️⚰️⚰️⚰️", "_____no_output_____" ], [ "### Other Benefits of OperatorFlow\n\n* Obedient Eager Evaluation - Best of Eager and Lazy evaluation:\n * Partially evaluate whatever you can with the parameters you have\n * Allows transparency, inspection, rapid prototyping (e.g. Users couldn't find circuits or operator when working through JSON dictionaries)\n * Performance - partially compiled algorithms save massive amounts of compilation and deepcopy time\n * But not too early, not compiling preemptively for a possible parameter value\n * Objects can be returned without being totally incorrectly constructed for the next step or engine (e.g. building massive CNOT chains for UCCSD simulations)\n * Intractable but possible computations (e.g. convert to matrix and solve) are avoided\n* Natural, Powerful, and Self-defining Programming Interfaces\n * __An algorithm's behavior is simply defined by the operator primitives it accepts and returns__\n * Nesting of algorithms is identical to user algorithm execution\n* Ubiquitous parameters, and obvious interface for Optimization\n * OpCombo coefficients, primitive parameters, and algorithm parameters can all be parameterized\n * Algorithms of any level of completeness can be returned\n * Optimization is universal - simply pass a nearly-complete algorithm to an optimizer and the callable interface executes when the optimizer provides the parameters", "_____no_output_____" ], [ "#### Grouping\n\nAqua's grouping functionality is only relevant to ExpectationValues today.", "_____no_output_____" ] ], [ [ "qaoa_cost_op = WeightedPauliOperator([\n [.5, Pauli.from_label('ZIZ')],\n [.2, Pauli.from_label('ZZI')],\n [.1j, Pauli.from_label('IZZ')],\n])\n\ngrouped_cost_op = TPBGroupedWeightedPauliOperator.sorted_grouping(qaoa_cost_op)\ngrouped_cost_op._basis", "_____no_output_____" ], [ "class VQE(QuantumAlgorithm):\n def __init__(self, operator, var_form, optimizer,\n initial_point=None, backend=backend, callback=None, ...):\n ...\n self._expectation_value = ExpectationValue(self._operator, self._backend)\n \n def _energy_evaluation(self, params):\n circuits = self._var_form.construct_circuit(params)\n energy, stdev = self._expectation_value.run(circuits)\n return energy", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
cb98caa2c12a6f7f55cc9c39f912e6f538f65746
157,026
ipynb
Jupyter Notebook
mflix-python/notebooks/basic_aggregation.ipynb
ricardodemauro/MongoUniversity_M220P
378534614990b3b28a446a63977425a372bf2fe4
[ "MIT" ]
null
null
null
mflix-python/notebooks/basic_aggregation.ipynb
ricardodemauro/MongoUniversity_M220P
378534614990b3b28a446a63977425a372bf2fe4
[ "MIT" ]
null
null
null
mflix-python/notebooks/basic_aggregation.ipynb
ricardodemauro/MongoUniversity_M220P
378534614990b3b28a446a63977425a372bf2fe4
[ "MIT" ]
null
null
null
64.75299
986
0.541025
[ [ [ "import pymongo\nfrom bson.json_util import dumps\nuri = \"mongodb+srv://m220student2:[email protected]/test\"\nclient = pymongo.MongoClient(uri)\nmflix = client.mflix\nmovies = mflix.movies", "_____no_output_____" ], [ "sam_raimi_cursor = movies.find( { \"directors\": \"Sam Raimi\" } )", "_____no_output_____" ], [ "sam_raimi_cursor", "_____no_output_____" ], [ "list(sam_raimi_cursor)", "_____no_output_____" ], [ "match_stage = {\"$match\": { \"directors\": \"Sam Raimi\" } }\npipeline = [\n match_stage\n]\nsam_raimi_aggregation = movies.aggregate( pipeline )", "_____no_output_____" ], [ "sam_raimi_aggregation", "_____no_output_____" ], [ "list(sam_raimi_aggregation)", "_____no_output_____" ], [ "sam_raimi_cursor = movies.find(\n { \"directors\": \"Sam Raimi\" },\n { \"_id\": 0, \"title\": 1, \"cast\": 1 }\n)\n\nprint(dumps(sam_raimi_cursor, indent=2))", "[\n {\n \"title\": \"Within the Woods\",\n \"cast\": [\n \"Bruce Campbell\",\n \"Ellen Sandweiss\",\n \"Mary Valenti\",\n \"Scott Spiegel\"\n ]\n },\n {\n \"title\": \"The Evil Dead\",\n \"cast\": [\n \"Bruce Campbell\",\n \"Ellen Sandweiss\",\n \"Richard DeManincor\",\n \"Betsy Baker\"\n ]\n },\n {\n \"title\": \"Crimewave\",\n \"cast\": [\n \"Louise Lasser\",\n \"Paul L. Smith\",\n \"Brion James\",\n \"Sheree J. Wilson\"\n ]\n },\n {\n \"title\": \"Evil Dead II\",\n \"cast\": [\n \"Bruce Campbell\",\n \"Sarah Berry\",\n \"Dan Hicks\",\n \"Kassie Wesley DePaiva\"\n ]\n },\n {\n \"title\": \"Darkman\",\n \"cast\": [\n \"Liam Neeson\",\n \"Frances McDormand\",\n \"Colin Friels\",\n \"Larry Drake\"\n ]\n },\n {\n \"title\": \"Army of Darkness\",\n \"cast\": [\n \"Bruce Campbell\",\n \"Embeth Davidtz\",\n \"Marcus Gilbert\",\n \"Ian Abercrombie\"\n ]\n },\n {\n \"title\": \"The Quick and the Dead\",\n \"cast\": [\n \"Sharon Stone\",\n \"Gene Hackman\",\n \"Russell Crowe\",\n \"Leonardo DiCaprio\"\n ]\n },\n {\n \"title\": \"A Simple Plan\",\n \"cast\": [\n \"Bill Paxton\",\n \"Bridget Fonda\",\n \"Billy Bob Thornton\",\n \"Brent Briscoe\"\n ]\n },\n {\n \"title\": \"For Love of the Game\",\n \"cast\": [\n \"Kevin Costner\",\n \"Kelly Preston\",\n \"John C. Reilly\",\n \"Jena Malone\"\n ]\n },\n {\n \"title\": \"Spider-Man\",\n \"cast\": [\n \"Tobey Maguire\",\n \"Willem Dafoe\",\n \"Kirsten Dunst\",\n \"James Franco\"\n ]\n },\n {\n \"title\": \"The Gift\",\n \"cast\": [\n \"Cate Blanchett\",\n \"Giovanni Ribisi\",\n \"Keanu Reeves\",\n \"Katie Holmes\"\n ]\n },\n {\n \"title\": \"Spider-Man 2\",\n \"cast\": [\n \"Tobey Maguire\",\n \"Kirsten Dunst\",\n \"James Franco\",\n \"Alfred Molina\"\n ]\n },\n {\n \"title\": \"Spider-Man 3\",\n \"cast\": [\n \"Tobey Maguire\",\n \"Kirsten Dunst\",\n \"James Franco\",\n \"Thomas Haden Church\"\n ]\n },\n {\n \"title\": \"Drag Me to Hell\",\n \"cast\": [\n \"Alison Lohman\",\n \"Justin Long\",\n \"Lorna Raver\",\n \"Dileep Rao\"\n ]\n },\n {\n \"title\": \"Oz the Great and Powerful\",\n \"cast\": [\n \"James Franco\",\n \"Mila Kunis\",\n \"Rachel Weisz\",\n \"Michelle Williams\"\n ]\n }\n]\n" ], [ "match_stage = { \"$match\": { \"directors\": \"Sam Raimi\" } }\nproject_stage = { \"$project\": { \"_id\": 0, \"title\": 1, \"cast\": 1 } }\n\npipeline = [\n match_stage,\n project_stage\n]\n\nsam_raimi_aggregation = movies.aggregate( pipeline )\n\nprint(dumps(sam_raimi_aggregation, indent=2))", "[\n {\n \"title\": \"Within the Woods\",\n \"cast\": [\n \"Bruce Campbell\",\n \"Ellen Sandweiss\",\n \"Mary Valenti\",\n \"Scott Spiegel\"\n ]\n },\n {\n \"title\": \"The Evil Dead\",\n \"cast\": [\n \"Bruce Campbell\",\n \"Ellen Sandweiss\",\n \"Richard DeManincor\",\n \"Betsy Baker\"\n ]\n },\n {\n \"title\": \"Crimewave\",\n \"cast\": [\n \"Louise Lasser\",\n \"Paul L. Smith\",\n \"Brion James\",\n \"Sheree J. Wilson\"\n ]\n },\n {\n \"title\": \"Evil Dead II\",\n \"cast\": [\n \"Bruce Campbell\",\n \"Sarah Berry\",\n \"Dan Hicks\",\n \"Kassie Wesley DePaiva\"\n ]\n },\n {\n \"title\": \"Darkman\",\n \"cast\": [\n \"Liam Neeson\",\n \"Frances McDormand\",\n \"Colin Friels\",\n \"Larry Drake\"\n ]\n },\n {\n \"title\": \"Army of Darkness\",\n \"cast\": [\n \"Bruce Campbell\",\n \"Embeth Davidtz\",\n \"Marcus Gilbert\",\n \"Ian Abercrombie\"\n ]\n },\n {\n \"title\": \"The Quick and the Dead\",\n \"cast\": [\n \"Sharon Stone\",\n \"Gene Hackman\",\n \"Russell Crowe\",\n \"Leonardo DiCaprio\"\n ]\n },\n {\n \"title\": \"A Simple Plan\",\n \"cast\": [\n \"Bill Paxton\",\n \"Bridget Fonda\",\n \"Billy Bob Thornton\",\n \"Brent Briscoe\"\n ]\n },\n {\n \"title\": \"For Love of the Game\",\n \"cast\": [\n \"Kevin Costner\",\n \"Kelly Preston\",\n \"John C. Reilly\",\n \"Jena Malone\"\n ]\n },\n {\n \"title\": \"Spider-Man\",\n \"cast\": [\n \"Tobey Maguire\",\n \"Willem Dafoe\",\n \"Kirsten Dunst\",\n \"James Franco\"\n ]\n },\n {\n \"title\": \"The Gift\",\n \"cast\": [\n \"Cate Blanchett\",\n \"Giovanni Ribisi\",\n \"Keanu Reeves\",\n \"Katie Holmes\"\n ]\n },\n {\n \"title\": \"Spider-Man 2\",\n \"cast\": [\n \"Tobey Maguire\",\n \"Kirsten Dunst\",\n \"James Franco\",\n \"Alfred Molina\"\n ]\n },\n {\n \"title\": \"Spider-Man 3\",\n \"cast\": [\n \"Tobey Maguire\",\n \"Kirsten Dunst\",\n \"James Franco\",\n \"Thomas Haden Church\"\n ]\n },\n {\n \"title\": \"Drag Me to Hell\",\n \"cast\": [\n \"Alison Lohman\",\n \"Justin Long\",\n \"Lorna Raver\",\n \"Dileep Rao\"\n ]\n },\n {\n \"title\": \"Oz the Great and Powerful\",\n \"cast\": [\n \"James Franco\",\n \"Mila Kunis\",\n \"Rachel Weisz\",\n \"Michelle Williams\"\n ]\n }\n]\n" ], [ "unwind_stage = { \"$unwind\": \"$directors\" }\n\ngroup_stage = {\n \"$group\": {\n \"_id\": {\n \"director\": \"$directors\"\n },\n \"average_rating\": { \"$avg\": \"$imdb.rating\" }\n }\n}\n\nsort_stage = {\n \"$sort\": { \"average_rating\": -1 }\n}\n\n# create pipeline from four different stages\npipeline = [\n unwind_stage,\n group_stage,\n sort_stage\n]\n\n# aggregate using pipeline\ndirector_ratings = movies.aggregate(pipeline)\n\n# iterate through the resulting cursor\nlist(director_ratings)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb98cee7382e700d8c67110dfa60dcd5deedd8da
9,634
ipynb
Jupyter Notebook
workflow/pdos_bader_analysis/pdos_anal.ipynb
raulf2012/PROJ_IrOx_OER
56883d6f5b62e67703fe40899e2e68b3f5de143b
[ "MIT" ]
1
2022-03-21T04:43:47.000Z
2022-03-21T04:43:47.000Z
workflow/pdos_bader_analysis/pdos_anal.ipynb
raulf2012/PROJ_IrOx_OER
56883d6f5b62e67703fe40899e2e68b3f5de143b
[ "MIT" ]
null
null
null
workflow/pdos_bader_analysis/pdos_anal.ipynb
raulf2012/PROJ_IrOx_OER
56883d6f5b62e67703fe40899e2e68b3f5de143b
[ "MIT" ]
1
2021-02-13T12:55:02.000Z
2021-02-13T12:55:02.000Z
20.898048
68
0.421528
[ [ [ "# PDOS data analysis and plotting\n---", "_____no_output_____" ], [ "### Import Modules", "_____no_output_____" ] ], [ [ "import os\nprint(os.getcwd())\nimport sys\n\nimport plotly.graph_objs as go\n\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n\n# #########################################################\nfrom methods import get_df_features_targets\n\nfrom proj_data import scatter_marker_props, layout_shared", "_____no_output_____" ] ], [ [ "### Read Data", "_____no_output_____" ] ], [ [ "df_features_targets = get_df_features_targets()", "_____no_output_____" ] ], [ [ "\n", "_____no_output_____" ] ], [ [ "# df_features_targets.columns.tolist()\n\npband_indices = df_features_targets[[\n (\n 'features',\n # 'oh',\n 'o',\n 'p_band_center',\n )\n ]].dropna().index.tolist()\n\n\ndf_i = df_features_targets.loc[\n pband_indices\n ][[\n (\"targets\", \"g_oh\", \"\"),\n (\"targets\", \"g_o\", \"\"),\n (\"targets\", \"g_o_m_oh\", \"\"),\n\n (\"targets\", \"e_oh\", \"\"),\n (\"targets\", \"e_o\", \"\"),\n (\"targets\", \"e_o_m_oh\", \"\"),\n\n (\"features\", \"o\", \"p_band_center\"),\n ]]", "_____no_output_____" ], [ "# pband_indices = \ndf_features_targets[[\n (\n 'features',\n # 'oh',\n 'o',\n 'p_band_center',\n )\n ]]\n\n # ]].dropna().index.tolist()", "_____no_output_____" ], [ "# assert False", "_____no_output_____" ], [ "# df_features_targets.shape", "_____no_output_____" ], [ "# (288, 7)\n# (311, 7)\n# (312, 7)\n# (316, 7)\n\n# df_i.shape", "_____no_output_____" ], [ "# assert False", "_____no_output_____" ], [ "# df_i[\"\"]\n\ndf = df_i\ndf = df[\n (df[\"features\", \"o\", \"p_band_center\"] > -3.5) &\n (df[\"features\", \"o\", \"p_band_center\"] < -2.) &\n # (df[\"\"] == \"\") &\n # (df[\"\"] == \"\") &\n [True for i in range(len(df))]\n ]\ndf_i = df", "_____no_output_____" ], [ "x = df_i[\"features\", \"o\", \"p_band_center\"]\n# y = df_i[\"targets\", \"g_oh\", \"\"]\n# y = df_i[\"targets\", \"g_o\", \"\"]\ny = df_i[\"targets\", \"g_o_m_oh\", \"\"]\n# y = df_i[\"targets\", \"e_o_m_oh\", \"\"]\n\n\n\n\nres = stats.linregress(x, y)\ny_new_fit = res.intercept + res.slope * x\n\n\ndef colin_fit(p_i):\n g_o_m_oh_i = 0.94 * p_i + 3.58\n return(g_o_m_oh_i)\n\ntrace_colin_fit = go.Scatter(\n x=[-6, 0],\n y=[colin_fit(-6), colin_fit(0)],\n mode=\"lines\",\n name=\"Colin fit (G_OmOH = 0.94 * p_i + 3.58)\",\n )\n\ntrace_my_fit = go.Scatter(\n x=x,\n y=y_new_fit,\n mode=\"lines\",\n name=\"Colin fit (G_OmOH = 0.94 * p_i + 3.58)\",\n )\n\ny_new_fit\n\ntrace = go.Scatter(\n x=x, y=y,\n mode=\"markers\",\n name=\"My DFT data\",\n )", "_____no_output_____" ], [ "x_i = x.to_numpy()\nX = x_i.reshape(-1, 1)", "_____no_output_____" ], [ "import numpy as np\nfrom sklearn.linear_model import LinearRegression\n# X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])\n# y = 1 * x_0 + 2 * x_1 + 3\n# y = np.dot(X, np.array([1, 2])) + 3\nreg = LinearRegression().fit(X, y)\nreg.score(X, y)\n\nprint(\nreg.coef_,\nreg.intercept_,\n)\n\n# reg.predict(np.array([[3, 5]]))\ny_pred_mine = reg.predict(\n [[-6], [2]],\n )", "_____no_output_____" ], [ "trace_my_fit = go.Scatter(\n x=[-6, 2],\n y=y_pred_mine,\n mode=\"lines\",\n name=\"My fit (G_OmOH = 0.75 * p_i + 3.55)\",\n )", "_____no_output_____" ], [ "data = [trace, trace_colin_fit, trace_my_fit]\n\n# data = [trace, trace_colin_fit, trace_my_fit]", "_____no_output_____" ], [ "layout_mine = go.Layout(\n\n showlegend=False,\n\n xaxis=go.layout.XAxis(\n title=go.layout.xaxis.Title(\n text=\"ε<sub>2p</sub>\",\n ),\n range=[-6, 0, ]\n ),\n\n yaxis=go.layout.YAxis(\n title=go.layout.yaxis.Title(\n text=\"ΔE<sub>O-OH</sub>\",\n ),\n range=[-3, 4, ]\n ),\n\n )\n\n\n# #########################################################\nlayout_shared_i = layout_shared.update(layout_mine)", "_____no_output_____" ], [ "fig = go.Figure(data=data, layout=layout_shared_i)\nfig.show()", "_____no_output_____" ] ], [ [ "\n\n", "_____no_output_____" ] ], [ [ "# df_i", "_____no_output_____" ], [ "# df_features_targets", "_____no_output_____" ], [ "# (0.94 * 0 + 3.58) - (0.94 * 3 + 3.58)", "_____no_output_____" ], [ "# 0.94 * 0.3", "_____no_output_____" ], [ "# res.intercept", "_____no_output_____" ], [ "# res.slope", "_____no_output_____" ], [ "# layout = go.Layout(\n\n# xaxis=go.layout.XAxis(\n# title=go.layout.xaxis.Title(\n# text=\"ε<sub>2p</sub>\",\n# ),\n# ),\n\n# yaxis=go.layout.YAxis(\n# title=go.layout.yaxis.Title(\n# text=\"ΔE<sub>O-OH</sub>\",\n# ),\n# ),\n\n# )", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "raw", "code", "raw", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
cb98d3d35873047ecf69147ca589c3fd128f420d
1,045,674
ipynb
Jupyter Notebook
tutorial/tutorial.ipynb
ycopin/probfit
50d45633e90af7ba564d9026b24914b042d1e0cd
[ "MIT" ]
22
2018-10-31T16:03:36.000Z
2022-02-17T11:22:51.000Z
tutorial/tutorial.ipynb
ycopin/probfit
50d45633e90af7ba564d9026b24914b042d1e0cd
[ "MIT" ]
46
2018-10-24T10:13:06.000Z
2022-03-22T11:15:31.000Z
tutorial/tutorial.ipynb
ycopin/probfit
50d45633e90af7ba564d9026b24914b042d1e0cd
[ "MIT" ]
18
2015-04-07T00:31:20.000Z
2018-09-14T12:35:04.000Z
222.294643
52,589
0.848107
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
cb98e6a500ad6b201f13ca5476b5e7bbce67f468
120,880
ipynb
Jupyter Notebook
metrics/CORTX_Metrics_Explore.ipynb
sarthakarora1208/cortx
bcd87c79b8743167b14af27b1bf8ff5cc48e99a3
[ "Apache-2.0" ]
552
2020-09-24T18:16:09.000Z
2022-03-25T06:21:55.000Z
metrics/CORTX_Metrics_Explore.ipynb
sarthakarora1208/cortx
bcd87c79b8743167b14af27b1bf8ff5cc48e99a3
[ "Apache-2.0" ]
722
2020-09-24T19:48:44.000Z
2022-03-31T17:42:41.000Z
metrics/CORTX_Metrics_Explore.ipynb
sarthakarora1208/cortx
bcd87c79b8743167b14af27b1bf8ff5cc48e99a3
[ "Apache-2.0" ]
442
2020-09-24T14:24:21.000Z
2022-03-25T10:40:16.000Z
130.964247
15,767
0.665511
[ [ [ "### Basic Functions for Interactively Exploring the CORTX Metrics Stored in Pickles", "_____no_output_____" ] ], [ [ "%cd /home/johnbent/cortx/metrics\nimport cortx_community\nimport cortx_graphing\nimport os\nfrom github import Github\ngh = Github(os.environ.get('GH_OATH'))\nstx = gh.get_organization('Seagate')\nrepos = cortx_community.get_repos()\nps = cortx_community.PersistentStats()", "/home/johnbent/cortx/metrics\n" ], [ "# a function which can test the progress of a running scrape_metrics.py process\ndef check_scan_progress(date,ps):\n done=0\n for repo in ps.get_repos():\n (a,b)=ps.get_latest(repo)\n if b == date:\n done+=1\n #print(\"Last report for %s is %s\" % (repo,b))\n print(\"%d out of %d repos have been scanned\" % (done,len(ps.get_repos())))", "_____no_output_____" ], [ "# a function for comparing a field in a repo over time\n# for example, if you want to see the change in innersource_committers over time, you can use this function\ndef compare_fields(ps,repo,field,verbose=False):\n last = None\n first = None\n for date in sorted(ps.stats[repo].keys()):\n try:\n last = ps.stats[repo][date][field]\n except KeyError:\n pass # many fields didn't always exist so might not appear in old stats\n if first is None:\n first = last\n if verbose:\n print(\"%s -> %s\" % (date, last))\n print(\"Difference between first and last is: %s\" % (first-last))\n print(\"Difference between last and first is: %s\" % (last-first))", "_____no_output_____" ], [ "compare_fields(ps,'GLOBAL','external_committers',verbose=True)\n#print(ps.get_values('GLOBAL','external_committers'))", "2020-05-19 -> None\n2020-07-06 -> set()\n2020-10-07 -> {'cotigao', 'jan--f', 'daniarherikurniawan'}\n2020-11-03 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2020-12-20 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2020-12-21 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2020-12-23 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2020-12-24 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2020-12-26 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2020-12-29 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2020-12-30 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-02 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-06 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-09 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-10 -> {'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-14 -> {'steven-chien', 'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-16 -> {'steven-chien', 'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-19 -> {'steven-chien', 'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-23 -> {'steven-chien', 'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-01-30 -> {'xuning97', 'raikrahul', 'cotigao', 'jan--f', 'daniarherikurniawan'}\n2021-02-06 -> {'ashayshirwadkar', 'raikrahul', 'cotigao', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\n2021-02-13 -> {'ashayshirwadkar', 'raikrahul', 'cotigao', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\n2021-02-18 -> {'ashayshirwadkar', 'raikrahul', 'cotigao', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\n2021-02-20 -> {'mmukul', 'ashayshirwadkar', 'raikrahul', 'cotigao', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\n2021-02-22 -> {'ashayshirwadkar', 'raikrahul', 'cotigao', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\n2021-03-01 -> {'ashayshirwadkar', 'raikrahul', 'cotigao', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\n2021-03-06 -> {'ashayshirwadkar', 'raikrahul', 'cotigao', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\n2021-03-25 -> {'ashayshirwadkar', 'raikrahul', 'cotigao', 'rohitkolapkar', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\nDifference between first and last is: set()\nDifference between last and first is: {'ashayshirwadkar', 'raikrahul', 'cotigao', 'rohitkolapkar', 'daniarherikurniawan', 'xuning97', 'jan--f', 'selsayed'}\n" ], [ "# manually add some values so they can be in executive report\nrepo='GLOBAL'\nslack_members_key='slack_total_members'\nslack_wau_key='slack_weekly_ave_active'\nnewsletter_key='newsletter_subscribers'\nwebinar_key='webinar_attendees'\nintegrations_key='integrations'\ndates=ps.get_dates(repo)\nec_key='external_committers'\nep_key='external_participants'\nhc_key='hackathon_committers'\nhp_key='hackathon_participants'\neup_key='eu_r&d_participants'\n\n# the helper function to load them\ndef add_stats(List,Key):\n for item in List:\n date=item[0]\n value=item[1]\n print(\"Repo\",repo,\"Date\",date,\"Stat\",Key,\"Value\",value)\n ps.add_stat(date=date,repo=repo,stat=Key,value=value)\n\n# the date strings to use\njun_date='2020-07-06'\nsep_date='2020-10-07'\noct_date='2020-11-03'\nnov_date='2020-12-21'\ndec_date='2020-12-30'\nlast_date='2021-01-06' # must add metrics for the latest date or else they don't show up in plots . . . \n\ndef update_slack():\n # adding the slack metrics for executive report\n slack_members =[(jun_date,97), (sep_date,212), (nov_date,329), (dec_date,355),(last_date,355)]\n slack_weekly_average_active=[(jun_date,11.3),(sep_date,53.0),(nov_date,69.9),(dec_date,59.9),(last_date,59.9)]\n add_stats(slack_members,slack_members_key)\n add_stats(slack_weekly_average_active,slack_wau_key)\n print(ps.get_values(repo,slack_members_key))\n print(ps.get_values(repo,slack_wau_key))\ndef update_newsletter():\n # adding the newsletter metrics for executive report\n dec_date='2020-12-30'\n feb_date='2021-02-13'\n newsletter_feb=set([\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\" [email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\"])\n newsletter_members_dec=set([\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\",\"[email protected]\"])\n print(len(newsletter_feb))\n newsletter = [(sep_date,429),(oct_date,459),(nov_date,477),(dec_date,newsletter_members_dec),(last_date,newsletter_members_dec)]\n newsletter = [(feb_date,newsletter_feb)]\n add_stats(newsletter,newsletter_key)\n print(ps.get_values_as_numbers(repo,newsletter_key))\n\ndef update_webinar():\n # adding the webinar metrics for executive report\n webinar_nov=set([\"Andrei Zheregelia\",\"Andrew List\",\"Andriy Tkachuk\",\"Anitoliy Bilenko\",\"Anthony Toccco\",\"Ben Wason\",\"Charles Kunkel\",\"Chetan Deshmukh\",\"Chetan Kumar\",\"Clay Curry\",\"Daniar Kurniawan\",\"Dima.c\",\"Dmitri Sandler\",\"Dmytro Podgornyi\",\"Eduard Aleksandrov\",\"Gary Phillips\",\"Guy Carbonneau\",\"Hanesan Umanesan\",\"Igor Pivovarov\",\"Iman Anvari\",\"Ivan Alekhin\",\"Jason Sliger-Sparks\",\"Jjohn Carrier\",\"Julia Rubtsov\",\"Kalpesh Chhajed\",\"Kaustubh Suresh Deorukhkar\",\"Ken Haugen\",\"Ketan Anil Arlulkar\",\"Konstatin Nekrasov\",\"Lance Blumberg\",\"Madhavrao Vemuri\",\"Mark Jedraszek\",\"Maxim Malezhin\",\"Max Medved\",\"Mehul Joshi\",\"Nicholas Krauter\",\"Nigel Hart\",\"Nikita Danilov\",\"Patrick Raaf\",\"Paul Woods\",\"Philippe Nicolas\",\"Phil Ruff\",\"Raydon Gordon\",\"Ricardo Alvarez-Miranda\",\"Sachin Punadikar\",\"Sailesh Manjrekar\",\"Sai Narasimhamurthy\",\"Sarang Sawant\",\"Serkay Olmez\",\"Shankar More\",\"Shiji Zhang\",\"Swapnil Khandare\",\"Taro Iwata\",\"Ujjwal Lanjewar\"])\n webinar_dec=set([\"Andriy Tkachuk\",\"Anthony Tocco\",\"Charles Kunkel\",\"Daniar Kurniawan\",\"Dan Olster\",\"Ganesan Umanesan\",\"Gary Phillips\",\"Guy Carbonneau\",\"Ivan Poddubnyy\",\"Justin Rackowski\",\"Nicolas Krauter\",\"Nigel Hart\",\"Paul Benn\",\"Praveen Viraraghavan\",\"Rajesh Nambiar\",\"Ricardo Alvarez-miranda\",\"Sachin Punadikar\",\"Sarang Sawant\",\"Shankar More\",\"Shiji Zhang\",\"Swapnil Khandare\",\"Trend Geerdes\",\"Ujjwal Lanjewar\",\"Walter Lopatka\",])\n webinar_jan=set([\"Unknown1\",\"Anatoliy Bilenko\",\"Andrea Chamorro\",\"Andriy Tkachuk\",\"Anthony Tocco\",\"Unknown2\",\"Charles Kunkel\",\"Chetan Kumar\",\"Chirs Cramer\",\"Erin Foley\",\"Gabe Wham\",\"Gary Grider\",\"Gregory Touretsky\",\"Iman Anvari\",\"Joseph Rebovich\",\"Justin Rackowski\",\"Keith Pine\",\"Ken Haugen\",\"Ketan Anil Arlulkar\",\"Madhavrao Vemuri\",\"Amandar Sawant\",\"Mark Jedraszek\",\"Mark Sprouse\",\"Matthew Halcomb\",\"Matthew L Curry Sandia\",\"Max\",\"Mehul Joshi\",\"Meng Wang\",\"Mike Sevilla\",\"Muhul Malhotra\",\"Nedko Amaudov\",\"Oded Kellner\",\"Paul Kusbel\",\"Pedro Fernandez\",\"Pritesh Pawar\",\"Priyanka Borawake\",\"Quinn D Mitchell\",\"Rajesh Bhalerao\",\"Ricardo Alvarez-Miranda\",\"Robert Pechman\",\"Rohan Puri\",\"Sachin Punadikar\",\"Sai Narasimhamurthy\",\"Sarang Sawant\",\"Shailesh\",\"Shankar More\",\"Sharad Mehrotra\",\"Shlomi Avihou\",\"Shreya Karmakar\",\"Shrihari Waskar\",\"Unknown\",\"Sridbar Dubhaka\",\"Stephane Thiell\",\"Swapril Khandare\",\"Tong Shi\",\"Ujjwal Lanjewar\",\"Venky P\",\"Vijay Nanjunda Swamy\",\"Vikram\",\"Vojtech Juranek\",\"Walkter Lopatka\",\"Ziv\",\"Theodore Omtzigt\",\"Rajkumar Patel\",\"Anjinkya Deshpande\",\"Anatoliy Bilenko\",\"Chetan Deshmukh\",\"Henry Newman\",\"Paul Benn\",\"Paul Woods\",\"Kyle Lamb\"])\n webinar_feb=set([\"Ashwin Agrawal\",\"Jean Luca Bez\",\"Rex Tanakit\",\"Samuel Spencer\",\"Shailesh Vaidya\",\"Tripti Srivastava\",\"Abraham Checkoway\",\"Abhijeet Dhumal\",\"Anatoliy Bilenko\",\"Anthony Tocco\",\"Antoine Le Bideau\",\"Basavaraj Kirunge\",\"BK Singh\",\"Branislav Radovanovic\",\"Charles Kunkel\",\"Chetan Deshmukh\",\"Carlos Thomaz\",\"Dan Olster\",\"Debashish Pal\",\"Geert Wenes\",\"Gary Grider\",\"Gary Lowell\",\"Jason Sliger-Sparks\",\"Jean-Thomas\",\"Justin Rackowski\",\"Justin Woo\",\"Kalpesh Chhajed\",\"Keith Pine\",\"Ken Haugen\",\"Ketan Anil Arlulkar\",\"Kiran Mangalore\",\"Liang Gan\",\"Madhavrao Vemuri\",\"Mandar Sawant\",\"Mark Jedraszek\",\"Mehul Joshi\",\"Mukul Malhotra\",\"Nicolau Manubens\",\"Nigel Hart\",\"Nilesh Navale\",\"Parag Joshi\",\"Parks Fields\",\"Paul Benn\",\"Paul Woods\",\"Peyton McNully\",\"Prudence Huang\",\"Philippe Nicolas\",\"Pranali Ramdas Tirkhunde\",\"Ryan Cassidy\",\"Rob Wilson\",\"Robert Read\",\"Rohan Puri\",\"Ryan Tyler\",\"Sarang Sawant\",\"Serkay Olmez\",\"Shankar More\",\"Seth Kindley\",\"Swarajya Pendharkar\",\"Sumedh Kulkarni\",\"Sven Breuner\",\"Sven Breuner\",\"Theodore Omtzigt\",\"Tim Coullter\",\"Ravi Tripathi\",\"Tushar Tarkas\",\"Ujjwal Lanjewar\",\"Venky P\",\"Walter Lopatka\",\"Earl Dodd\",\"Wendell Wenjen\",\"Weikuan Yu\",\"George Zhi Qiao\",])\n jan_date='2021-01-06'\n feb_date='2021-02-06'\n dec_date='2020-12-21'\n nov_date='2020-11-03'\n #print(len(webinar_nov),len(webinar_dec),len(webinar_jan))\n #webinar = [(nov_date,webinar_nov),(dec_date,webinar_dec),(jan_date,webinar_jan)]\n webinar = [(feb_date,webinar_feb),(dec_date,webinar_dec),(jan_date,webinar_jan),(nov_date,webinar_nov)]\n add_stats(webinar,webinar_key)\n print(ps.get_values_as_numbers(repo,webinar_key))\n \ndef update_integrations():\n # add the integrations metric for executive report\n integrations = [(sep_date,0),(oct_date,1),(nov_date,1),(dec_date,6),(last_date,6)]\n add_stats(integrations,integrations_key)\n print(ps.get_values_as_numbers(repo,integrations_key))\n\ndef update_external(repo):\n def update_external_single(repo,date,original,removals,key):\n if original is None:\n return\n for r in removals:\n try:\n original -= r\n except TypeError:\n pass # can't subtract None from a set\n ps.add_stat(repo=repo,date=date,stat=key,value=original)\n \n # so we used to double count hackathon folks as external folks\n # we fixed that but now the external counts suddenly unexpectedly dropped\n # let's fix that in the historical record\n # actually this should be easy\n # just iterate through every date and subtract hackathon folks from external folks and resave the difference as external\n #external participants\n for date in dates:\n ec = ps.get_values(repo, ec_key,[date])[0]\n hc = ps.get_values(repo, hc_key,[date])[0]\n ep = ps.get_values(repo, ep_key,[date])[0]\n hp = ps.get_values(repo, hp_key,[date])[0]\n eup = {'u-u-h', 'jayeshbadwaik'}\n update_external_single(repo=repo,date=date,original=ec,removals=[hc], key=ec_key)\n update_external_single(repo=repo,date=date,original=ep,removals=[hp,eup], key=ep_key)\n \ndef manually_add_historical_committers():\n # for external committers also go through and manually add them in the early months\n sep_committers=set(['cotigao','daniarherikurniawan','jan--f'])\n oct_committers=sep_committers | set(['raikrahul'])\n committers = [(jun_date,set()),(sep_date,sep_committers),(oct_date,oct_committers),('2020-12-20',oct_committers),('2020-12-21',oct_committers),('2020-12-23',oct_committers)]\n add_stats(committers,ec_key)\n #print(ps.get_values_as_numbers(repo, 'external_participants'))\n\ndef clean_external():\n print(dates) \n for r in ps.get_repos():\n print(\"Will clean external committers and participants for %s\" % r)\n update_external(r)\n print(ps.get_values_as_numbers(r, ec_key))\n print(ps.get_values_as_numbers(r, ep_key))\n print(ps.get_values(repo, ec_key))\n\nupdate_webinar()\n#print(ps.get_dates(repo))\nupdate_newsletter()\nps.persist()\n#manually_add_historical_committers()\n#update_slack()\n#update_webinar()\n#update_integrations()\n#for i in [3,4,5,10,11,12,20]:\n# print(ps.get_dates('GLOBAL')[i], '->', ps.get_values_as_numbers('GLOBAL',webinar_key)[i])", "Repo GLOBAL Date 2021-02-06 Stat webinar_attendees Value {'Parks Fields', 'Branislav Radovanovic', 'Kalpesh Chhajed', 'Venky P', 'Charles Kunkel', 'Mandar Sawant', 'Debashish Pal', 'Anthony Tocco', 'Antoine Le Bideau', 'Sarang Sawant', 'Justin Rackowski', 'Paul Woods', 'Jean-Thomas', 'Keith Pine', 'BK Singh', 'Jason Sliger-Sparks', 'Anatoliy Bilenko', 'Chetan Deshmukh', 'Philippe Nicolas', 'Shankar More', 'Swarajya Pendharkar', 'Mukul Malhotra', 'Prudence Huang', 'Ketan Anil Arlulkar', 'Ryan Tyler', 'Nigel Hart', 'Kiran Mangalore', 'Liang Gan', 'Serkay Olmez', 'Ken Haugen', 'Rob Wilson', 'Abraham Checkoway', 'Tim Coullter', 'Parag Joshi', 'Shailesh Vaidya', 'Geert Wenes', 'Ashwin Agrawal', 'Carlos Thomaz', 'Sven Breuner', 'Theodore Omtzigt', 'Dan Olster', 'Tushar Tarkas', 'Wendell Wenjen', 'Gary Grider', 'Nicolau Manubens', 'Paul Benn', 'Sumedh Kulkarni', 'Peyton McNully', 'Samuel Spencer', 'Abhijeet Dhumal', 'Robert Read', 'Tripti Srivastava', 'Mark Jedraszek', 'Walter Lopatka', 'Gary Lowell', 'Ryan Cassidy', 'Ravi Tripathi', 'Earl Dodd', 'Rohan Puri', 'Seth Kindley', 'Justin Woo', 'Jean Luca Bez', 'Pranali Ramdas Tirkhunde', 'George Zhi Qiao', 'Mehul Joshi', 'Nilesh Navale', 'Rex Tanakit', 'Ujjwal Lanjewar', 'Weikuan Yu', 'Basavaraj Kirunge', 'Madhavrao Vemuri'}\nSetting value to be {'Parks Fields', 'Branislav Radovanovic', 'Kalpesh Chhajed', 'Venky P', 'Charles Kunkel', 'Mandar Sawant', 'Debashish Pal', 'Anthony Tocco', 'Antoine Le Bideau', 'Sarang Sawant', 'Justin Rackowski', 'Paul Woods', 'Jean-Thomas', 'Keith Pine', 'BK Singh', 'Jason Sliger-Sparks', 'Anatoliy Bilenko', 'Chetan Deshmukh', 'Philippe Nicolas', 'Shankar More', 'Swarajya Pendharkar', 'Mukul Malhotra', 'Prudence Huang', 'Ketan Anil Arlulkar', 'Ryan Tyler', 'Nigel Hart', 'Kiran Mangalore', 'Liang Gan', 'Serkay Olmez', 'Ken Haugen', 'Rob Wilson', 'Abraham Checkoway', 'Tim Coullter', 'Parag Joshi', 'Shailesh Vaidya', 'Geert Wenes', 'Ashwin Agrawal', 'Carlos Thomaz', 'Sven Breuner', 'Theodore Omtzigt', 'Dan Olster', 'Tushar Tarkas', 'Wendell Wenjen', 'Gary Grider', 'Nicolau Manubens', 'Paul Benn', 'Sumedh Kulkarni', 'Peyton McNully', 'Samuel Spencer', 'Abhijeet Dhumal', 'Robert Read', 'Tripti Srivastava', 'Mark Jedraszek', 'Walter Lopatka', 'Gary Lowell', 'Ryan Cassidy', 'Ravi Tripathi', 'Earl Dodd', 'Rohan Puri', 'Seth Kindley', 'Justin Woo', 'Jean Luca Bez', 'Pranali Ramdas Tirkhunde', 'George Zhi Qiao', 'Mehul Joshi', 'Nilesh Navale', 'Rex Tanakit', 'Ujjwal Lanjewar', 'Weikuan Yu', 'Basavaraj Kirunge', 'Madhavrao Vemuri'}\nRepo GLOBAL Date 2020-12-21 Stat webinar_attendees Value {'Gary Phillips', 'Ganesan Umanesan', 'Charles Kunkel', 'Shiji Zhang', 'Trend Geerdes', 'Anthony Tocco', 'Rajesh Nambiar', 'Swapnil Khandare', 'Sachin Punadikar', 'Guy Carbonneau', 'Sarang Sawant', 'Justin Rackowski', 'Shankar More', 'Nicolas Krauter', 'Daniar Kurniawan', 'Nigel Hart', 'Andriy Tkachuk', 'Dan Olster', 'Ricardo Alvarez-miranda', 'Paul Benn', 'Walter Lopatka', 'Ujjwal Lanjewar', 'Praveen Viraraghavan', 'Ivan Poddubnyy'}\nSetting value to be {'Gary Phillips', 'Ganesan Umanesan', 'Charles Kunkel', 'Shiji Zhang', 'Trend Geerdes', 'Anthony Tocco', 'Rajesh Nambiar', 'Swapnil Khandare', 'Sachin Punadikar', 'Guy Carbonneau', 'Sarang Sawant', 'Justin Rackowski', 'Shankar More', 'Nicolas Krauter', 'Daniar Kurniawan', 'Nigel Hart', 'Andriy Tkachuk', 'Dan Olster', 'Ricardo Alvarez-miranda', 'Paul Benn', 'Walter Lopatka', 'Ujjwal Lanjewar', 'Praveen Viraraghavan', 'Ivan Poddubnyy'}\nRepo GLOBAL Date 2021-01-06 Stat webinar_attendees Value {'Shreya Karmakar', 'Chirs Cramer', 'Oded Kellner', 'Venky P', 'Rajkumar Patel', 'Gregory Touretsky', 'Max', 'Kyle Lamb', 'Charles Kunkel', 'Chetan Kumar', 'Paul Kusbel', 'Anthony Tocco', 'Sridbar Dubhaka', 'Stephane Thiell', 'Sachin Punadikar', 'Sarang Sawant', 'Justin Rackowski', 'Shankar More', 'Amandar Sawant', 'Keith Pine', 'Quinn D Mitchell', 'Paul Woods', 'Anatoliy Bilenko', 'Chetan Deshmukh', 'Walkter Lopatka', 'Gabe Wham', 'Ketan Anil Arlulkar', 'Andrea Chamorro', 'Mark Sprouse', 'Priyanka Borawake', 'Sai Narasimhamurthy', 'Swapril Khandare', 'Vojtech Juranek', 'Shlomi Avihou', 'Iman Anvari', 'Ken Haugen', 'Andriy Tkachuk', 'Meng Wang', 'Henry Newman', 'Ricardo Alvarez-Miranda', 'Unknown1', 'Erin Foley', 'Theodore Omtzigt', 'Pritesh Pawar', 'Anjinkya Deshpande', 'Gary Grider', 'Joseph Rebovich', 'Vikram', 'Paul Benn', 'Mike Sevilla', 'Matthew Halcomb', 'Muhul Malhotra', 'Shailesh', 'Mark Jedraszek', 'Unknown', 'Shrihari Waskar', 'Rajesh Bhalerao', 'Sharad Mehrotra', 'Rohan Puri', 'Matthew L Curry Sandia', 'Nedko Amaudov', 'Ziv', 'Pedro Fernandez', 'Mehul Joshi', 'Tong Shi', 'Vijay Nanjunda Swamy', 'Ujjwal Lanjewar', 'Robert Pechman', 'Unknown2', 'Madhavrao Vemuri'}\nSetting value to be {'Shreya Karmakar', 'Chirs Cramer', 'Oded Kellner', 'Venky P', 'Rajkumar Patel', 'Gregory Touretsky', 'Max', 'Kyle Lamb', 'Charles Kunkel', 'Chetan Kumar', 'Paul Kusbel', 'Anthony Tocco', 'Sridbar Dubhaka', 'Stephane Thiell', 'Sachin Punadikar', 'Sarang Sawant', 'Justin Rackowski', 'Shankar More', 'Amandar Sawant', 'Keith Pine', 'Quinn D Mitchell', 'Paul Woods', 'Anatoliy Bilenko', 'Chetan Deshmukh', 'Walkter Lopatka', 'Gabe Wham', 'Ketan Anil Arlulkar', 'Andrea Chamorro', 'Mark Sprouse', 'Priyanka Borawake', 'Sai Narasimhamurthy', 'Swapril Khandare', 'Vojtech Juranek', 'Shlomi Avihou', 'Iman Anvari', 'Ken Haugen', 'Andriy Tkachuk', 'Meng Wang', 'Henry Newman', 'Ricardo Alvarez-Miranda', 'Unknown1', 'Erin Foley', 'Theodore Omtzigt', 'Pritesh Pawar', 'Anjinkya Deshpande', 'Gary Grider', 'Joseph Rebovich', 'Vikram', 'Paul Benn', 'Mike Sevilla', 'Matthew Halcomb', 'Muhul Malhotra', 'Shailesh', 'Mark Jedraszek', 'Unknown', 'Shrihari Waskar', 'Rajesh Bhalerao', 'Sharad Mehrotra', 'Rohan Puri', 'Matthew L Curry Sandia', 'Nedko Amaudov', 'Ziv', 'Pedro Fernandez', 'Mehul Joshi', 'Tong Shi', 'Vijay Nanjunda Swamy', 'Ujjwal Lanjewar', 'Robert Pechman', 'Unknown2', 'Madhavrao Vemuri'}\nRepo GLOBAL Date 2020-11-03 Stat webinar_attendees Value {'Gary Phillips', 'Clay Curry', 'Kalpesh Chhajed', 'Nicholas Krauter', 'Charles Kunkel', 'Julia Rubtsov', 'Shiji Zhang', 'Chetan Kumar', 'Anthony Toccco', 'Eduard Aleksandrov', 'Swapnil Khandare', 'Sachin Punadikar', 'Guy Carbonneau', 'Sarang Sawant', 'Paul Woods', 'Philippe Nicolas', 'Shankar More', 'Andrei Zheregelia', 'Jason Sliger-Sparks', 'Jjohn Carrier', 'Chetan Deshmukh', 'Daniar Kurniawan', 'Nikita Danilov', 'Ketan Anil Arlulkar', 'Dmytro Podgornyi', 'Nigel Hart', 'Sailesh Manjrekar', 'Sai Narasimhamurthy', 'Dmitri Sandler', 'Serkay Olmez', 'Ivan Alekhin', 'Lance Blumberg', 'Iman Anvari', 'Ken Haugen', 'Andriy Tkachuk', 'Patrick Raaf', 'Konstatin Nekrasov', 'Andrew List', 'Ricardo Alvarez-Miranda', 'Igor Pivovarov', 'Anitoliy Bilenko', 'Raydon Gordon', 'Hanesan Umanesan', 'Ben Wason', 'Mark Jedraszek', 'Maxim Malezhin', 'Phil Ruff', 'Kaustubh Suresh Deorukhkar', 'Mehul Joshi', 'Dima.c', 'Max Medved', 'Ujjwal Lanjewar', 'Taro Iwata', 'Madhavrao Vemuri'}\nSetting value to be {'Gary Phillips', 'Clay Curry', 'Kalpesh Chhajed', 'Nicholas Krauter', 'Charles Kunkel', 'Julia Rubtsov', 'Shiji Zhang', 'Chetan Kumar', 'Anthony Toccco', 'Eduard Aleksandrov', 'Swapnil Khandare', 'Sachin Punadikar', 'Guy Carbonneau', 'Sarang Sawant', 'Paul Woods', 'Philippe Nicolas', 'Shankar More', 'Andrei Zheregelia', 'Jason Sliger-Sparks', 'Jjohn Carrier', 'Chetan Deshmukh', 'Daniar Kurniawan', 'Nikita Danilov', 'Ketan Anil Arlulkar', 'Dmytro Podgornyi', 'Nigel Hart', 'Sailesh Manjrekar', 'Sai Narasimhamurthy', 'Dmitri Sandler', 'Serkay Olmez', 'Ivan Alekhin', 'Lance Blumberg', 'Iman Anvari', 'Ken Haugen', 'Andriy Tkachuk', 'Patrick Raaf', 'Konstatin Nekrasov', 'Andrew List', 'Ricardo Alvarez-Miranda', 'Igor Pivovarov', 'Anitoliy Bilenko', 'Raydon Gordon', 'Hanesan Umanesan', 'Ben Wason', 'Mark Jedraszek', 'Maxim Malezhin', 'Phil Ruff', 'Kaustubh Suresh Deorukhkar', 'Mehul Joshi', 'Dima.c', 'Max Medved', 'Ujjwal Lanjewar', 'Taro Iwata', 'Madhavrao Vemuri'}\n[None, None, None, 54, None, 24, None, None, None, None, 24, 24, 70, None, None, None, None, None, None, None, 71, None, None, None, None, None, 55, None]\n527\nRepo GLOBAL Date 2021-02-13 Stat newsletter_subscribers Value {'[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'}\nSetting value to be {'[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', ' [email protected]', ' [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'}\n" ], [ "def clean_external_participants():\n ep1=ps.stats[repo][dates[4]][ep_key]\n ep2=ps.stats[repo][dates[5]][ep_key]\n print(len(ep1),len(ep2))\n print(dates[4],ep1)\n print(dates[5],ep2)\n print(ep1-ep2)\n ps.stats[repo][dates[5]][ep_key]=ep2|{'jan--f'}\n ps.persist()\n print(ps.get_values_as_numbers(repo, ep_key))\n ep3=ps.stats[repo][dates[-1]][ep_key]\n ep4=ps.stats[repo][dates[-2]][ep_key]\n print(ep4-ep3)\n#clean_external_participants()", "_____no_output_____" ], [ "def get_logins(Type):\n folks=set()\n people=cortx_community.CortxCommunity()\n for p in people.values():\n if p.type == Type:\n folks.add(p.login)\n return folks", "_____no_output_____" ], [ "# we once mistakenly characterized a few team folks as innersource folks\ndef clean_innersource(repo,Type,folks):\n key='innersource_%s'%Type\n for d in ps.get_dates(repo):\n try:\n values=ps.stats[repo][d][key]\n except KeyError:\n continue # some keys didn't always exist\n if isinstance(values,set):\n bad = [v for v in values if v not in folks]\n if len(bad)>0:\n print(\"Need to remove\",bad,\"from %s:%s on %s\" % (repo,key,d))\n new_values = values - set(bad)\n print(\"Will reduce from %d to %d\" % (len(values),len(new_values)))\n ps.stats[repo][d][key]=new_values\n\ndef print_values(repo,key,dates=None):\n for v in ps.get_values(repo,key,dates):\n try:\n print(len(v),sorted(v))\n except:\n print(v)\n\n\ndef bulk_clean():\n for r in ps.get_repos():\n for t in ('participants','committers'):\n clean_innersource(repo=r,Type=t,folks=folks)\n print(r,t,ps.get_values_as_numbers(r,'innersource_%s'%t))\n \nfolks=get_logins('Innersource')\nprint(folks)\n\n#bulk_clean()\n#ps.persist()\n#print(\"all cleaned?!?!\")\ndef clean_ip():\n repo='GLOBAL'\n key='innersource_participants'\n dates=ps.get_dates(repo)\n good_values=ps.get_values(repo=repo,key=key,dates=[dates[5]])[0]\n for d in [dates[2],dates[3],dates[4]]:\n ps.add_stat(date=d,repo=repo,stat=key,value=good_values)\n print_values(repo,key,dates)\n print(len(values),values)\n \ndef clean_ic():\n repo='GLOBAL'\n key='innersource_committers'\n clean_innersource(repo,'committers',folks)\n \n#clean_ip()\ndef print_innersource(Key):\n for r in ps.get_repos():\n print(r, ps.get_values_as_numbers(repo=r,key=Key))\n\nclean_ic()\nprint_innersource('innersource_committers')\nps.persist()", "{'GID_UNK_jin.m.li', 'jkan-cn', 'GID_UNK_lenin.jegatheesan', 'seagate-eds', 'fyqphd', 'ketanarlulkar', 'laike23', 'GID_UNK_chong.li', 'IxiaX', 'PSO-Dmitry', 'ninovsnino', 'GID_UNK_sukhontharat.wetthais', 'GID_UNK_subathra.sampath', 'christina-00', 'brian479974', 'GID_UNK_swathi.kumarvembu', 'GID_UNK_timothy.r.coulter', 'ManuelCabusas', 'GID_UNK_john.j.sweeney', 'namu3418-seagate', 'GID_UNK_vinoth.ramalingam', 'fyq', 'LumCheeWai', 'GID_UNK_sanjeevkumar.murali', 'FullMentalPanic', 'kachhwahadivya', 'WesleyChanWingHung', 'harrison-seow-seagate', 'nikhilsawake', 'stx-yazid', 'GID_UNK_xinxu.zheng', 'jay-stx', 'sbruce', 'AmberJH', 'ShrihariWaskar', 'GreenEggNHam', 'hemantvraut', 'yanqingfu', 'TProhofsky', 'rupasree-roy', 'suntins', 'kiwionly', 'suykerbuyk', 'kiwionly2', 'vaibhavparatwar', 'GID_UNK_nigel.hart', 'timothyCoulter', 'GID_UNK_logan.smith', 'GID_UNK_milind.naik', 'GID_UNK_mandar.sabhapatikar', 'kenjhaugen', 'starGate123', 'paulq11', 'Jeetandra'}\nGLOBAL [None, None, None, None, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 16, 16, 16, 17]\ncortx [11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 15, 15, 16]\ncortx-dsal [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-experiments [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-fs [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-fs-ganesha [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-ha [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-hare [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-images [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-management-portal [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-manager [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\ncortx-mio [0, 0, 0, 0, 0, 0, 0, 0]\ncortx-monitor [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-motr [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4]\ncortx-motr-apps [0, 0, 0, 0, 0, 0, 0, 0]\ncortx-motr-galois [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-nsal [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-posix [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-prvsnr [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\ncortx-s3server [2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\ncortx-utils [0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2]\ncortx-videos [0, 0, 0, 0, 0, 0]\n" ], [ "compare_fields(ps=ps,repo='cortx',field='innersource_committers',verbose=False)", "Difference between first and last is: set()\nDifference between last and first is: {'timothyCoulter', 'vaibhavparatwar', 'kiwionly2', 'jkan-cn', 'fyq'}\n" ], [ "compare_fields(ps=ps,repo='cortx',field='external_email_addresses',verbose=False)", "Difference between first and last is: {'[email protected]', '[email protected]', '[email protected]'}\nDifference between last and first is: {'[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected], [email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]', '[email protected]'}\n" ], [ "targets=['issues_closed_ave_age_in_s','issues_closed']\nfor target in targets:\n for r in ['GLOBAL','cortx-ha','cortx-hare']:\n print(\"%s %s -> %d \" % (r, target, ps.stats[r]['2020-12-29'][target]))", "GLOBAL issues_closed_ave_age_in_s -> 4990879 \ncortx-ha issues_closed_ave_age_in_s -> 3722564 \ncortx-hare issues_closed_ave_age_in_s -> 3960006 \nGLOBAL issues_closed -> 857 \ncortx-ha issues_closed -> 24 \ncortx-hare issues_closed -> 572 \n" ], [ "check_scan_progress('2021-01-06',ps)", "0 out of 22 repos have been scanned\n" ], [ "(a,b)=ps.get_latest('cortx-hare')\na['issues_open']\nb", "_____no_output_____" ], [ "ps.stats['GLOBAL']['2021-01-02']['stars']\n", "_____no_output_____" ], [ "# this block is a one-time thing to add historical data from before we automated the scraping\nd1={'innersource_participants' : 5, 'pull_requests_external' : 0, \n 'external_participants' : 0,\n 'watchers' : 34, 'stars' : 19, 'forks' : 13, 'views_unique_14_days' : 106,\n 'clones_count_14_days' : 38, 'clones_unique_14_days' : 4,\n 'seagate_blog_referrer_uniques' : 0, 'seagate_referrer_uniques' : 0,\n 'downloads_vms' : 0}\nd1_date='2020-05-19'\nd2={'innersource_participants' : 8, 'pull_requests_external' : 0, \n 'external_participants' : 0,\n 'watchers' : 69, 'stars' : 52, 'forks' : 42, \n 'views_unique_14_days' : 86,\n 'clones_count_14_days' : 15, 'clones_unique_14_days' : 6,\n 'seagate_blog_referrer_uniques' : 0, 'seagate_referrer_uniques' : 0,\n 'downloads_vms' : 0}\nd2_date='2020-07-06'\nd3={'innersource_participants' : 18, 'pull_requests_external' : 1, \n 'external_participants' : 0,\n 'watchers' : 62, 'stars' : 116, 'forks' : 31, \n 'views_unique_14_days' : 1817,\n 'clones_count_14_days' : 468, 'clones_unique_14_days' : 224,\n 'seagate_blog_referrer_uniques' : 0, 'seagate_referrer_uniques' : 0,\n 'downloads_vms' : 130}\nd3_date='2020-10-07'\nd4={'innersource_participants' : 18, 'pull_requests_external' : 4, \n 'external_participants' : 0,\n 'watchers' : 65, 'stars' : 159, 'forks' : 45, \n 'views_unique_14_days' : 817,\n 'clones_count_14_days' : 1851, 'clones_unique_14_days' : 259,\n 'seagate_blog_referrer_uniques' : 0, 'seagate_referrer_uniques' : 0,\n 'downloads_vms' : 363}\nd4_date='2020-11-03'\nprint(d1)\n#ps.add_stats(date=d1_date,repo='GLOBAL',stats=d1)\n#ps.add_stats(date=d2_date,repo='GLOBAL',stats=d2)\n#ps.add_stats(date=d3_date,repo='GLOBAL',stats=d3)\n#ps.add_stats(date=d4_date,repo='GLOBAL',stats=d4)", "{'downloads_vms': 0, 'seagate_blog_referrer_uniques': 0, 'watchers': 34, 'stars': 19, 'external_participants': 0, 'views_unique_14_days': 106, 'innersource_participants': 5, 'seagate_referrer_uniques': 0, 'clones_count_14_days': 38, 'pull_requests_external': 0, 'clones_unique_14_days': 4, 'forks': 13}\n" ], [ "ps.get_dates('GLOBAL')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb9902f42c8dd63395a9a2b333e5485034f410cd
78,381
ipynb
Jupyter Notebook
project/.ipynb_checkpoints/CTC-checkpoint.ipynb
BlessingBassey/AMMI-Speech-Lig-Aikuma
db596b42cb993bcab48a0ebcc5f32caa26148c08
[ "MIT" ]
null
null
null
project/.ipynb_checkpoints/CTC-checkpoint.ipynb
BlessingBassey/AMMI-Speech-Lig-Aikuma
db596b42cb993bcab48a0ebcc5f32caa26148c08
[ "MIT" ]
null
null
null
project/.ipynb_checkpoints/CTC-checkpoint.ipynb
BlessingBassey/AMMI-Speech-Lig-Aikuma
db596b42cb993bcab48a0ebcc5f32caa26148c08
[ "MIT" ]
null
null
null
30.522196
393
0.531187
[ [ [ "from pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\nfrom google.colab import auth\nfrom oauth2client.client import GoogleCredentials\nauth.authenticate_user()\ngauth = GoogleAuth()\ngauth.credentials = GoogleCredentials.get_application_default()\ndrive = GoogleDrive(gauth)", "_____no_output_____" ], [ "import torch\nimport torchaudio", "_____no_output_____" ] ], [ [ "# Part 1 : contrastive predictive coding\n\nContrastive Predictive Coding (CPC) is a method of unsupervised training for speech models. The idea behind it is pretty simple:\n\n\n1. The raw audio wave is passed through a convolutional network: the ```encoder```\n2. Then, the encoder's output is given to a recurrent network the ```context```\n3. A third party network, the ```prediction_network``` will try to predict the future embeddings of the encoder using the output of the context network.\n\nIn order to avoid a collapse to trivial solutions, the prediction_network doesn't try to reconstruct the future features. Instead, using the context output $c_t$ at time $t$ it is trained to discriminate the real encoder representatioin $g_{t+k}$ at time $t+k$ from several other features $(g_n)_n$ taken elsewhere in the batch. Thus the loss becomes:\n\n\\\\[ \\mathcal{L}_c = - \\frac{1}{K} \\sum_{k=1}^K \\text{Cross_entropy}(\\phi_k(c_t), g_{t+k}) \\\\]\n\nOr:\n\n\\\\[ \\mathcal{L}_c = - \\frac{1}{K} \\sum_{k=1}^K \\log \\frac{ \\exp\\left(\\phi_k(c_t)^\\top g_{t+k}\\right) }{ \\sum_{\\mathbf{n}\\in\\mathcal{N}_t} \\exp\\left(\\phi_k(c_t)^\\top g_n\\right)} \\\\]\n\nWhere:\n\n\n* $\\phi_k$ is the prediction network for the kth timestep\n* $\\mathcal{N}_t$ is the set of all negative examples sampled for timestep $t$\n\n\n", "_____no_output_____" ], [ "## Exercice 1 : Building the model\n\nIn this exercise, we will build and train a small CPC model using the repository CPC_audio.\n\nThe code below loads a context and an encoder newtorks.", "_____no_output_____" ] ], [ [ "%cd /content/CPC_audio\nfrom cpc.model import CPCEncoder, CPCAR\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nDIM_ENCODER=256\nDIM_CONTEXT=256\nKEEP_HIDDEN_VECTOR=False\nN_LEVELS_CONTEXT=1\nCONTEXT_RNN=\"LSTM\"\nN_PREDICTIONS=12\nLEARNING_RATE=2e-4\nN_NEGATIVE_SAMPLE =128", "/content/CPC_audio\n" ], [ "encoder = CPCEncoder(DIM_ENCODER).to(device)\ncontext = CPCAR(DIM_ENCODER, DIM_CONTEXT, KEEP_HIDDEN_VECTOR, 1, mode=CONTEXT_RNN).to(device)", "_____no_output_____" ], [ "# Several functions that will be necessary to load the data later\nfrom cpc.dataset import findAllSeqs, AudioBatchData, parseSeqLabels\nSIZE_WINDOW = 20480\nBATCH_SIZE=8\ndef load_dataset(path_dataset, file_extension='.flac', phone_label_dict=None):\n data_list, speakers = findAllSeqs(path_dataset, extension=file_extension)\n dataset = AudioBatchData(path_dataset, SIZE_WINDOW, data_list, phone_label_dict, len(speakers))\n return dataset", "_____no_output_____" ] ], [ [ "Now build a new class, ```CPCModel``` which will", "_____no_output_____" ] ], [ [ "class CPCModel(torch.nn.Module):\n\n def __init__(self,\n encoder,\n AR):\n\n super(CPCModel, self).__init__()\n self.gEncoder = encoder\n self.gAR = AR\n\n def forward(self, batch_data):\n \n\n encoder_output = self.gEncoder(batch_data)\n #print(encoder_output.shape)\n # The output of the encoder data does not have the good format \n # indeed it is Batch_size x Hidden_size x temp size\n # while the context requires Batch_size x temp size x Hidden_size\n # thus you need to permute\n context_input = encoder_output.permute(0, 2, 1)\n\n context_output = self.gAR(context_input)\n #print(context_output.shape)\n return context_output, encoder_output", "_____no_output_____" ] ], [ [ "Let's test your code !\n", "_____no_output_____" ] ], [ [ "audio = torchaudio.load(\"/content/train_data/831/130739/831-130739-0048.flac\")[0]\naudio = audio.view(1, 1, -1)\ncpc_model = CPCModel(encoder, context).to(device)\ncontext_output, encoder_output = cpc_model(audio.to(device))", "_____no_output_____" ] ], [ [ "## Exercise 2 : CPC loss\n\nWe will define a class ```CPCCriterion``` which will hold the prediction networks $\\phi_k$ defined above and perform the classification loss $\\mathcal{L}_c$.\n\na) In this exercise, the $\\phi_k$ will be a linear transform, ie:\n\n\\\\[ \\phi_k(c_t) = \\mathbf{A}_k c_t\\\\]\n\nUsing the class [torch.nn.Linear](https://pytorch.org/docs/stable/nn.html#torch.nn.Linear), define the transformations $\\phi_k$ in the code below and complete the function ```get_prediction_k``` which computes $\\phi_k(c_t)$ for a given batch of vectors $c_t$.\n\nb) Using both ```get_prediction_k``` and ```sample_negatives``` defined below, write the forward function which will take as input two batches of features $c_t$ and $g_t$ and outputs the classification loss $\\mathcal{L}_c$ and the average acuracy for all predictions. ", "_____no_output_____" ] ], [ [ "# Exercice 2: write the CPC loss\n# a) Write the negative sampling (with some help)\n# ERRATUM: it's really hard, the sampling will be provided\n\nclass CPCCriterion(torch.nn.Module):\n\n def __init__(self,\n K,\n dim_context,\n dim_encoder,\n n_negative):\n super(CPCCriterion, self).__init__()\n self.K_ = K\n self.dim_context = dim_context\n self.dim_encoder = dim_encoder\n self.n_negative = n_negative\n\n self.predictors = torch.nn.ModuleList() \n for k in range(self.K_):\n # TO COMPLETE !\n \n # A affine transformation in pytorch is equivalent to a nn.Linear layer\n # To get a linear transformation you must set bias=False\n # input dimension of the layer = dimension of the encoder\n # output dimension of the layer = dimension of the context\n self.predictors.append(torch.nn.Linear(dim_context, dim_encoder, bias=False))\n\n def get_prediction_k(self, context_data):\n\n #TO COMPLETE !\n output = [] \n # For each time step k\n for k in range(self.K_):\n\n # We need to compute phi_k = A_k * c_t\n phi_k = self.predictors[k](context_data)\n output.append(phi_k)\n\n return output\n\n\n def sample_negatives(self, encoded_data):\n r\"\"\"\n Sample some negative examples in the given encoded data.\n Input:\n - encoded_data size: B x T x H\n Returns\n - outputs of size B x (n_negative + 1) x (T - K_) x H\n outputs[:, 0, :, :] contains the positive example\n outputs[:, 1:, :, :] contains negative example sampled in the batch\n - labels, long tensor of size B x (T - K_)\n Since the positive example is always at coordinates 0 for all sequences \n in the batch and all timestep in the sequence, labels is just a tensor\n full of zeros !\n \"\"\"\n batch_size, time_size, dim_encoded = encoded_data.size()\n window_size = time_size - self.K_\n outputs = []\n\n neg_ext = encoded_data.contiguous().view(-1, dim_encoded)\n n_elem_sampled = self.n_negative * window_size * batch_size\n # Draw nNegativeExt * batchSize negative samples anywhere in the batch\n batch_idx = torch.randint(low=0, high=batch_size,\n size=(n_elem_sampled, ),\n device=encoded_data.device)\n\n seq_idx = torch.randint(low=1, high=time_size,\n size=(n_elem_sampled, ),\n device=encoded_data.device)\n\n base_idx = torch.arange(0, window_size, device=encoded_data.device)\n base_idx = base_idx.view(1, 1, window_size)\n base_idx = base_idx.expand(1, self.n_negative, window_size)\n base_idx = base_idx.expand(batch_size, self.n_negative, window_size)\n seq_idx += base_idx.contiguous().view(-1)\n seq_idx = torch.remainder(seq_idx, time_size)\n\n ext_idx = seq_idx + batch_idx * time_size\n neg_ext = neg_ext[ext_idx].view(batch_size, self.n_negative,\n window_size, dim_encoded)\n label_loss = torch.zeros((batch_size, window_size),\n dtype=torch.long,\n device=encoded_data.device)\n\n for k in range(1, self.K_ + 1):\n\n # Positive samples\n if k < self.K_:\n pos_seq = encoded_data[:, k:-(self.K_-k)]\n else:\n pos_seq = encoded_data[:, k:]\n\n pos_seq = pos_seq.view(batch_size, 1, pos_seq.size(1), dim_encoded)\n full_seq = torch.cat((pos_seq, neg_ext), dim=1)\n outputs.append(full_seq)\n\n return outputs, label_loss\n\n def forward(self, encoded_data, context_data):\n\n # TO COMPLETE:\n # Perform the full cpc criterion\n # Returns 2 values:\n # - the average classification loss avg_loss\n # - the average classification acuracy avg_acc\n\n # Reminder : The permuation !\n encoded_data = encoded_data.permute(0, 2, 1)\n\n # First we need to sample the negative examples\n negative_samples, labels = self.sample_negatives(encoded_data)\n\n # Then we must compute phi_k\n phi_k = self.get_prediction_k(context_data)\n\n # Finally we must get the dot product between phi_k and negative_samples \n # for each k\n\n #The total loss is the average of all losses\n avg_loss = 0\n\n # Average acuracy\n avg_acc = 0\n\n for k in range(self.K_):\n B, N_sampled, S_small, H = negative_samples[k].size() \n B, S, H = phi_k[k].size()\n\n # As told before S = S_small + K. For segments too far in the sequence\n # there are no positive exmples anyway, so we must shorten phi_k\n phi = phi_k[k][:, :S_small]\n\n # Now the dot product\n # You have several ways to do that, let's do the simple but non optimal \n # one\n # pytorch has a matrix product function https://pytorch.org/docs/stable/torch.html#torch.bmm\n # But it takes only 3D tensors of the same batch size !\n # To begin negative_samples is a 4D tensor ! \n # We want to compute the dot product for each features, of each sequence\n # of the batch. Thus we are trying to compute a dot product for all\n # B* N_sampled * S_small 1D vector of negative_samples[k]\n # Or, a 1D tensor of size H is also a matrix of size 1 x H\n # Then, we must view it as a 3D tensor of size (B* N_sampled * S_small, 1, H)\n negative_sample_k = negative_samples[k].view(B* N_sampled* S_small, 1, H)\n\n # But now phi and negative_sample_k no longer have the same batch size !\n # No worries, we can expand phi so that each sequence of the batch\n # is repeated N_sampled times\n phi = phi.view(B, 1,S_small, H).expand(B, N_sampled, S_small, H)\n\n # And now we can view it as a 3D tensor \n phi = phi.contiguous().view(B * N_sampled * S_small, H, 1)\n\n # We can finally get the dot product !\n scores = torch.bmm(negative_sample_k, phi)\n\n # Dot_product has a size (B * N_sampled * S_small , 1, 1)\n # Let's reorder it a bit\n scores = scores.reshape(B, N_sampled, S_small)\n\n # For each elements of the sequence, and each elements sampled, it gives \n # a floating score stating the likelihood of this element being the \n # true one.\n # Now the classification loss, we need to use the Cross Entropy loss\n # https://pytorch.org/docs/master/generated/torch.nn.CrossEntropyLoss.html\n\n # For each time-step of each sequence of the batch \n # we have N_sampled possible predictions. \n # Looking at the documentation of torch.nn.CrossEntropyLoss\n # we can see that this loss expect a tensor of size M x C where \n # - M is the number of elements with a classification score\n # - C is the number of possible classes\n # There are N_sampled candidates for each predictions so\n # C = N_sampled \n # Each timestep of each sequence of the batch has a prediction so\n # M = B * S_small\n # Thus we need an input vector of size B * S_small, N_sampled\n # To begin, we need to permute the axis\n scores = scores.permute(0, 2, 1) # Now it has size B , S_small, N_sampled\n\n # Then we can cast it into a 2D tensor\n scores = scores.reshape(B * S_small, N_sampled)\n\n # Same thing for the labels \n labels = labels.reshape(B * S_small)\n\n # Finally we can get the classification loss\n loss_criterion = torch.nn.CrossEntropyLoss()\n loss_k = loss_criterion(scores, labels)\n avg_loss+= loss_k\n\n # And for the acuracy\n # The prediction for each elements is the sample with the highest score\n # Thus the tensors of all predictions is the tensors of the index of the \n # maximal score for each time-step of each sequence of the batch\n predictions = torch.argmax(scores, 1)\n acc_k = (labels == predictions).sum() / (B * S_small)\n avg_acc += acc_k\n\n # Normalization\n avg_loss = avg_loss / self.K_\n avg_acc = avg_acc / self.K_\n \n return avg_loss , avg_acc", "_____no_output_____" ] ], [ [ "Don't forget to test !", "_____no_output_____" ] ], [ [ "audio = torchaudio.load(\"/content/train_data/831/130739/831-130739-0048.flac\")[0]\naudio = audio.view(1, 1, -1)\ncpc_criterion = CPCCriterion(N_PREDICTIONS, DIM_CONTEXT, \n DIM_ENCODER, N_NEGATIVE_SAMPLE).to(device)\ncontext_output, encoder_output = cpc_model(audio.to(device))\nloss, avg = cpc_criterion(encoder_output,context_output)", "/pytorch/aten/src/ATen/native/BinaryOps.cpp:81: UserWarning: Integer division of tensors using div or / is deprecated, and in a future release div will perform true division as in Python 3. Use true_divide or floor_divide (// in Python) instead.\n" ] ], [ [ "## Exercise 3: Full training loop !\n\nYou have the model, you have the criterion. All you need now are a data loader and an optimizer to run your training loop.\n\nWe will use an Adam optimizer:", "_____no_output_____" ] ], [ [ "parameters = list(cpc_criterion.parameters()) + list(cpc_model.parameters())\noptimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE)", "_____no_output_____" ] ], [ [ "And as far as the data loader is concerned, we will rely on the data loader provided by the CPC_audio library.", "_____no_output_____" ] ], [ [ "dataset_train = load_dataset('/content/train_data')\ndataset_val = load_dataset('/content/val_data')\ndata_loader_train = dataset_train.getDataLoader(BATCH_SIZE, \"speaker\", True)\ndata_loader_val = dataset_train.getDataLoader(BATCH_SIZE, \"sequence\", False)", "162it [00:00, 14774.14it/s]" ] ], [ [ "Now that everything is ready, complete and test the ```train_step``` function below which trains the model for one epoch.", "_____no_output_____" ] ], [ [ "def train_step(data_loader,\n cpc_model,\n cpc_criterion,\n optimizer):\n \n avg_loss = 0\n avg_acc = 0\n n_items = 0\n\n for step, data in enumerate(data_loader):\n x,y = data\n bs = len(x)\n optimizer.zero_grad()\n context_output, encoder_output = cpc_model(x.to(device))\n loss , acc = cpc_criterion(encoder_output, context_output)\n loss.backward()\n n_items+=bs\n avg_loss+=loss.item()*bs\n avg_acc +=acc.item()*bs\n \n avg_loss/=n_items\n avg_acc/=n_items\n return avg_loss, avg_acc", "_____no_output_____" ] ], [ [ "## Exercise 4 : Validation loop\n\nNow complete the validation loop.", "_____no_output_____" ] ], [ [ "def validation_step(data_loader,\n cpc_model,\n cpc_criterion):\n \n avg_loss = 0\n avg_acc = 0\n n_items = 0\n\n for step, data in enumerate(data_loader):\n x,y = data\n bs = len(x)\n context_output, encoder_output = cpc_model(x.to(device))\n loss , acc = cpc_criterion(encoder_output, context_output)\n n_items+=bs\n avg_loss+=loss.item()*bs\n avg_acc+=acc.item()*bs\n \n avg_loss/=n_items\n avg_acc/=n_items\n return avg_loss, avg_acc", "_____no_output_____" ] ], [ [ "## Exercise 5: Run everything", "_____no_output_____" ] ], [ [ "def run(train_loader,\n val_loader,\n cpc_model,\n cpc_criterion,\n optimizer,\n n_epochs):\n \n for epoch in range(n_epochs):\n\n \n print(f\"Running epoch {epoch+1} / {n_epochs}\")\n avg_loss_train, avg_acc_train = train_step(train_loader, cpc_model, cpc_criterion, optimizer)\n print(\"----------------------\")\n print(f\"Training dataset\")\n print(f\"- average loss : {avg_loss_train}\")\n print(f\"- average acuracy : {avg_acc_train}\")\n print(\"----------------------\")\n with torch.no_grad():\n cpc_model.eval()\n cpc_criterion.eval()\n avg_loss_val, avg_acc_val = validation_step(val_loader, cpc_model, cpc_criterion)\n print(f\"Validation dataset\")\n print(f\"- average loss : {avg_loss_val}\")\n print(f\"- average acuracy : {avg_acc_val}\")\n print(\"----------------------\")\n print()\n cpc_model.train()\n cpc_criterion.train()", "_____no_output_____" ], [ "run(data_loader_train, data_loader_val, cpc_model,cpc_criterion,optimizer,1)", "Running epoch 1 / 1\n----------------------\nTraining dataset\n- average loss : 4.878548990311724\n- average acuracy : 0.0\n----------------------\nValidation dataset\n- average loss : 4.878525506558247\n- average acuracy : 0.0\n----------------------\n\n" ] ], [ [ "Once everything is donw, clear the memory.", "_____no_output_____" ] ], [ [ "del dataset_train\ndel dataset_val\ndel cpc_model\ndel context\ndel encoder", "_____no_output_____" ] ], [ [ "# Part 2 : Fine tuning", "_____no_output_____" ], [ "## Exercice 1 : Phone separability with aligned phonemes.\n\nOne option to evaluate the quality of the features trained with CPC can be to check if they can be used to recognize phonemes. \nTo do so, we can fine-tune a pre-trained model using a limited amount of labelled speech data.\nWe are going to start with a simple evaluation setting where we have the phone labels for each timestep corresponding to a CPC feature.\n\nWe will work with a model already pre-trained on English data. As far as the fine-tuning dataset is concerned, we will use a 1h subset of [librispeech-100](http://www.openslr.org/12/). ", "_____no_output_____" ] ], [ [ "!mkdir checkpoint_data\n!wget https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_30.pt -P checkpoint_data\n!wget https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_logs.json -P checkpoint_data\n!wget https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_args.json -P checkpoint_data\n!ls checkpoint_data", "--2020-06-29 10:11:59-- https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_30.pt\nResolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 104.22.74.142, 104.22.75.142, 172.67.9.4, ...\nConnecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|104.22.74.142|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 113599715 (108M) [application/octet-stream]\nSaving to: ‘checkpoint_data/checkpoint_30.pt’\n\ncheckpoint_30.pt 100%[===================>] 108.34M 12.2MB/s in 10s \n\n2020-06-29 10:12:10 (10.5 MB/s) - ‘checkpoint_data/checkpoint_30.pt’ saved [113599715/113599715]\n\n--2020-06-29 10:12:12-- https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_logs.json\nResolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 104.22.74.142, 104.22.75.142, 172.67.9.4, ...\nConnecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|104.22.74.142|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 20786 (20K) [text/plain]\nSaving to: ‘checkpoint_data/checkpoint_logs.json’\n\ncheckpoint_logs.jso 100%[===================>] 20.30K 125KB/s in 0.2s \n\n2020-06-29 10:12:13 (125 KB/s) - ‘checkpoint_data/checkpoint_logs.json’ saved [20786/20786]\n\n--2020-06-29 10:12:14-- https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_args.json\nResolving dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)... 104.22.74.142, 104.22.75.142, 172.67.9.4, ...\nConnecting to dl.fbaipublicfiles.com (dl.fbaipublicfiles.com)|104.22.74.142|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 2063 (2.0K) [text/plain]\nSaving to: ‘checkpoint_data/checkpoint_args.json’\n\ncheckpoint_args.jso 100%[===================>] 2.01K --.-KB/s in 0s \n\n2020-06-29 10:12:15 (35.3 MB/s) - ‘checkpoint_data/checkpoint_args.json’ saved [2063/2063]\n\ncheckpoint_30.pt checkpoint_args.json\tcheckpoint_logs.json\n" ], [ "%cd /content/CPC_audio\nfrom cpc.dataset import parseSeqLabels\nfrom cpc.feature_loader import loadModel\n\ncheckpoint_path = 'checkpoint_data/checkpoint_30.pt'\ncpc_model, HIDDEN_CONTEXT_MODEL, HIDDEN_ENCODER_MODEL = loadModel([checkpoint_path])\ncpc_model = cpc_model.cuda()\nlabel_dict, N_PHONES = parseSeqLabels('/content/converted_aligned_phones.txt')\ndataset_train = load_dataset('/content/train_data', file_extension='.flac', phone_label_dict=label_dict)\ndataset_val = load_dataset('/content/val_data', file_extension='.flac', phone_label_dict=label_dict)\ndata_loader_train = dataset_train.getDataLoader(BATCH_SIZE, \"speaker\", True)\ndata_loader_val = dataset_val.getDataLoader(BATCH_SIZE, \"sequence\", False)", "/content/CPC_audio\nLoading checkpoint checkpoint_data/checkpoint_30.pt\nLoading the state dict at checkpoint_data/checkpoint_30.pt\n" ], [ "??cpc_model", "_____no_output_____" ] ], [ [ "Then we will use a simple linear classifier to recognize the phonemes from the features produced by ```cpc_model```. \n\n### a) Build the phone classifier \n\nDesign a class of linear classifiers, ```PhoneClassifier``` that will take as input a batch of sequences of CPC features and output a score vector for each phoneme", "_____no_output_____" ] ], [ [ "class PhoneClassifier(torch.nn.Module):\n\n def __init__(self,\n input_dim : int,\n n_phones : int):\n super(PhoneClassifier, self).__init__()\n self.linear = torch.nn.Linear(input_dim, n_phones)\n \n\n def forward(self, x):\n return self.linear(x)", "_____no_output_____" ] ], [ [ "Our phone classifier will then be:", "_____no_output_____" ] ], [ [ "phone_classifier = PhoneClassifier(HIDDEN_CONTEXT_MODEL, N_PHONES).to(device)", "_____no_output_____" ] ], [ [ "### b - What would be the correct loss criterion for this task ?\n\n", "_____no_output_____" ] ], [ [ "loss_criterion = torch.nn.CrossEntropyLoss()", "_____no_output_____" ] ], [ [ "To perform the fine-tuning, we will also need an optimization function.\n\nWe will use an [Adam optimizer ](https://pytorch.org/docs/stable/optim.html#torch.optim.Adam).", "_____no_output_____" ] ], [ [ "parameters = list(phone_classifier.parameters()) + list(cpc_model.parameters())\nLEARNING_RATE = 2e-4\noptimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE)", "_____no_output_____" ] ], [ [ "You might also want to perform this training while freezing the weights of the ```cpc_model```. Indeed, if the pre-training was good enough, then ```cpc_model``` phonemes representation should be linearly separable. In this case the optimizer should be defined like this:", "_____no_output_____" ] ], [ [ "optimizer_frozen = torch.optim.Adam(list(phone_classifier.parameters()), lr=LEARNING_RATE)", "_____no_output_____" ] ], [ [ "### c- Now let's build a training loop. \nComplete the function ```train_one_epoch``` below.\n\n", "_____no_output_____" ] ], [ [ "def train_one_epoch(cpc_model, \n phone_classifier, \n loss_criterion, \n data_loader, \n optimizer):\n\n cpc_model.train()\n loss_criterion.train()\n\n avg_loss = 0\n avg_accuracy = 0\n n_items = 0\n for step, full_data in enumerate(data_loader):\n # Each batch is represented by a Tuple of vectors:\n # sequence of size : N x 1 x T\n # label of size : N x T\n # \n # With :\n # - N number of sequence in the batch\n # - T size of each sequence\n sequence, label = full_data\n \n \n\n bs = len(sequence)\n seq_len = label.size(1)\n optimizer.zero_grad()\n context_out, enc_out, _ = cpc_model(sequence.to(device),label.to(device))\n\n scores = phone_classifier(context_out)\n\n scores = scores.permute(0,2,1)\n loss = loss_criterion(scores,label.to(device))\n loss.backward()\n optimizer.step()\n avg_loss+=loss.item()*bs\n n_items+=bs\n correct_labels = scores.argmax(1)\n avg_accuracy += ((label==correct_labels.cpu()).float()).mean(1).sum().item()\n avg_loss/=n_items\n avg_accuracy/=n_items\n return avg_loss, avg_accuracy\n ", "_____no_output_____" ] ], [ [ "Don't forget to test it !", "_____no_output_____" ] ], [ [ "avg_loss, avg_accuracy = train_one_epoch(cpc_model, phone_classifier, loss_criterion, data_loader_train, optimizer_frozen)", "_____no_output_____" ], [ "avg_loss, avg_accuracy", "_____no_output_____" ] ], [ [ "### d- Build the validation loop", "_____no_output_____" ] ], [ [ "def validation_step(cpc_model, \n phone_classifier, \n loss_criterion, \n data_loader):\n \n cpc_model.eval()\n phone_classifier.eval()\n\n avg_loss = 0\n avg_accuracy = 0\n n_items = 0\n with torch.no_grad():\n for step, full_data in enumerate(data_loader):\n # Each batch is represented by a Tuple of vectors:\n # sequence of size : N x 1 x T\n # label of size : N x T\n # \n # With :\n # - N number of sequence in the batch\n # - T size of each sequence\n sequence, label = full_data\n bs = len(sequence)\n seq_len = label.size(1)\n context_out, enc_out, _ = cpc_model(sequence.to(device),label.to(device))\n scores = phone_classifier(context_out)\n scores = scores.permute(0,2,1)\n loss = loss_criterion(scores,label.to(device))\n avg_loss+=loss.item()*bs\n n_items+=bs\n correct_labels = scores.argmax(1)\n avg_accuracy += ((label==correct_labels.cpu()).float()).mean(1).sum().item()\n avg_loss/=n_items\n avg_accuracy/=n_items\n return avg_loss, avg_accuracy", "_____no_output_____" ] ], [ [ "### e- Run everything\n\nTest this functiion with both ```optimizer``` and ```optimizer_frozen```.", "_____no_output_____" ] ], [ [ "def run(cpc_model, \n phone_classifier, \n loss_criterion, \n data_loader_train, \n data_loader_val, \n optimizer,\n n_epoch):\n\n for epoch in range(n_epoch):\n\n print(f\"Running epoch {epoch + 1} / {n_epoch}\")\n loss_train, acc_train = train_one_epoch(cpc_model, phone_classifier, loss_criterion, data_loader_train, optimizer)\n print(\"-------------------\")\n print(f\"Training dataset :\")\n print(f\"Average loss : {loss_train}. Average accuracy {acc_train}\")\n\n print(\"-------------------\")\n print(\"Validation dataset\")\n loss_val, acc_val = validation_step(cpc_model, phone_classifier, loss_criterion, data_loader_val)\n print(f\"Average loss : {loss_val}. Average accuracy {acc_val}\")\n print(\"-------------------\")\n print()", "_____no_output_____" ], [ "run(cpc_model,phone_classifier,loss_criterion,data_loader_train,data_loader_val,optimizer_frozen,n_epoch=10)", "Running epoch 1 / 10\n-------------------\nTraining dataset :\nAverage loss : 1.0219527069604895. Average accuracy 0.7057498257933105\n-------------------\nValidation dataset\nAverage loss : 1.0688188383544701. Average accuracy 0.6966259057971015\n-------------------\n\nRunning epoch 2 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.9813667901793229. Average accuracy 0.7150101397226987\n-------------------\nValidation dataset\nAverage loss : 1.0425163616304811. Average accuracy 0.7018257472826087\n-------------------\n\nRunning epoch 3 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.9586697046657233. Average accuracy 0.7197957984919954\n-------------------\nValidation dataset\nAverage loss : 1.029444672577623. Average accuracy 0.7042572463768116\n-------------------\n\nRunning epoch 4 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.9506696053679975. Average accuracy 0.7210777810534591\n-------------------\nValidation dataset\nAverage loss : 0.9972989958265553. Average accuracy 0.7106855751811594\n-------------------\n\nRunning epoch 5 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.9448814161032387. Average accuracy 0.7218349101272156\n-------------------\nValidation dataset\nAverage loss : 0.9780038367147031. Average accuracy 0.7143144248188406\n-------------------\n\nRunning epoch 6 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.929420669805397. Average accuracy 0.7249711888936535\n-------------------\nValidation dataset\nAverage loss : 0.9746506056923797. Average accuracy 0.715234375\n-------------------\n\nRunning epoch 7 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.9157625487901743. Average accuracy 0.7281409689465409\n-------------------\nValidation dataset\nAverage loss : 0.9955525258313055. Average accuracy 0.710739356884058\n-------------------\n\nRunning epoch 8 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.9135043791459587. Average accuracy 0.7293961504788451\n-------------------\nValidation dataset\nAverage loss : 0.9721649721048881. Average accuracy 0.7155910326086956\n-------------------\n\nRunning epoch 9 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.9004762198940013. Average accuracy 0.7319076302530018\n-------------------\nValidation dataset\nAverage loss : 0.980453244154004. Average accuracy 0.713963428442029\n-------------------\n\nRunning epoch 10 / 10\n-------------------\nTraining dataset :\nAverage loss : 0.9172716077018834. Average accuracy 0.7282263972269869\n-------------------\nValidation dataset\nAverage loss : 0.9502466599146525. Average accuracy 0.7199048913043479\n-------------------\n\n" ] ], [ [ "## Exercise 2 : Phone separability without alignment (PER)\n\nAligned data are very practical, but un real life they are rarely available. That's why in this excercise we will consider a fine-tuning with non-aligned phonemes.\n\nThe model, the optimizer and the phone classifier will stay the same. However, we will replace our phone criterion with a [CTC loss](https://pytorch.org/docs/master/generated/torch.nn.CTCLoss.html). ", "_____no_output_____" ] ], [ [ "loss_ctc = torch.nn.CTCLoss()", "_____no_output_____" ] ], [ [ "Besides, we will use a siglthy different dataset class.", "_____no_output_____" ] ], [ [ "%cd /content/CPC_audio\nfrom cpc.eval.common_voices_eval import SingleSequenceDataset, parseSeqLabels, findAllSeqs\npath_train_data_per = '/content/per_data/pack_master/1h'\npath_val_data_per = '/content/per_data/pack_master/10min'\npath_phone_data_per = '/content/per_data/pack_master/10h_phones.txt'\nBATCH_SIZE=8\n\nphone_labels, N_PHONES = parseSeqLabels(path_phone_data_per)\ndata_train_per, _ = findAllSeqs(path_train_data_per, extension='.flac')\ndataset_train_non_aligned = SingleSequenceDataset(path_train_data_per, data_train_per, phone_labels)\ndata_loader_train = torch.utils.data.DataLoader(dataset_train_non_aligned, batch_size=BATCH_SIZE,\n shuffle=True)\n\ndata_val_per, _ = findAllSeqs(path_val_data_per, extension='.flac')\ndataset_val_non_aligned = SingleSequenceDataset(path_val_data_per, data_val_per, phone_labels)\ndata_loader_val = torch.utils.data.DataLoader(dataset_val_non_aligned, batch_size=BATCH_SIZE,\n shuffle=True)", "67it [00:00, 16115.29it/s]" ] ], [ [ "### a- Training\n\nSince the phonemes are not aligned, there is no simple direct way to get the classification acuracy of a model. Write and test the three functions ```train_one_epoch_ctc```, ```validation_step_ctc``` and ```run_ctc``` as before but without considering the average acuracy of the model. ", "_____no_output_____" ] ], [ [ "from cpc.feature_loader import loadModel\n\ncheckpoint_path = 'checkpoint_data/checkpoint_30.pt'\ncpc_model, HIDDEN_CONTEXT_MODEL, HIDDEN_ENCODER_MODEL = loadModel([checkpoint_path])\ncpc_model = cpc_model.cuda()\nphone_classifier = PhoneClassifier(HIDDEN_CONTEXT_MODEL, N_PHONES).to(device)", "Loading checkpoint checkpoint_data/checkpoint_30.pt\nLoading the state dict at checkpoint_data/checkpoint_30.pt\n" ], [ "parameters = list(phone_classifier.parameters()) + list(cpc_model.parameters())\nLEARNING_RATE = 2e-4\noptimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE)\n\noptimizer_frozen = torch.optim.Adam(list(phone_classifier.parameters()), lr=LEARNING_RATE)", "_____no_output_____" ], [ "import torch.nn.functional as F\n\ndef train_one_epoch_ctc(cpc_model, \n phone_classifier, \n loss_criterion, \n data_loader, \n optimizer):\n \n cpc_model.train()\n loss_criterion.train()\n\n avg_loss = 0\n avg_accuracy = 0\n n_items = 0\n for step, full_data in enumerate(data_loader):\n\n x, x_len, y, y_len = full_data\n\n x_batch_len = x.shape[-1]\n x, y = x.to(device), y.to(device)\n\n bs=x.size(0)\n optimizer.zero_grad()\n context_out, enc_out, _ = cpc_model(x.to(device),y.to(device))\n \n scores = phone_classifier(context_out)\n scores = scores.permute(1,0,2)\n scores = F.log_softmax(scores,2)\n yhat_len = torch.tensor([int(scores.shape[0]*x_len[i]/x_batch_len) for i in range(scores.shape[1])]) # this is an approximation, should be good enough\n\n loss = loss_criterion(scores,y.to(device),yhat_len,y_len)\n loss.backward()\n optimizer.step()\n avg_loss+=loss.item()*bs\n n_items+=bs\n avg_loss/=n_items\n return avg_loss\n\ndef validation_step(cpc_model, \n phone_classifier, \n loss_criterion, \n data_loader):\n\n cpc_model.eval()\n phone_classifier.eval()\n avg_loss = 0\n avg_accuracy = 0\n n_items = 0\n with torch.no_grad():\n for step, full_data in enumerate(data_loader):\n\n x, x_len, y, y_len = full_data\n\n x_batch_len = x.shape[-1]\n x, y = x.to(device), y.to(device)\n\n bs=x.size(0)\n context_out, enc_out, _ = cpc_model(x.to(device),y.to(device))\n \n scores = phone_classifier(context_out)\n scores = scores.permute(1,0,2)\n scores = F.log_softmax(scores,2)\n yhat_len = torch.tensor([int(scores.shape[0]*x_len[i]/x_batch_len) for i in range(scores.shape[1])]) # this is an approximation, should be good enough\n\n loss = loss_criterion(scores,y.to(device),yhat_len,y_len)\n avg_loss+=loss.item()*bs\n n_items+=bs\n avg_loss/=n_items\n\n return avg_loss\n\ndef run_ctc(cpc_model, \n phone_classifier, \n loss_criterion, \n data_loader_train, \n data_loader_val, \n optimizer,\n n_epoch):\n for epoch in range(n_epoch):\n\n print(f\"Running epoch {epoch + 1} / {n_epoch}\")\n loss_train = train_one_epoch_ctc(cpc_model, phone_classifier, loss_criterion, data_loader_train, optimizer)\n print(\"-------------------\")\n print(f\"Training dataset :\")\n print(f\"Average loss : {loss_train}.\")\n\n print(\"-------------------\")\n print(\"Validation dataset\")\n loss_val = validation_step(cpc_model, phone_classifier, loss_criterion, data_loader_val)\n print(f\"Average loss : {loss_val}\")\n print(\"-------------------\")\n print()", "_____no_output_____" ], [ "run_ctc(cpc_model,phone_classifier,loss_ctc,data_loader_train,data_loader_val,optimizer_frozen,n_epoch=10)", "Running epoch 1 / 10\n-------------------\nTraining dataset :\nAverage loss : 32.44543953208657.\n-------------------\nValidation dataset\nAverage loss : 32.01081585093132\n-------------------\n\nRunning epoch 2 / 10\n-------------------\nTraining dataset :\nAverage loss : 30.99022026328774.\n-------------------\nValidation dataset\nAverage loss : 30.300324444522225\n-------------------\n\nRunning epoch 3 / 10\n-------------------\nTraining dataset :\nAverage loss : 29.319565432888645.\n-------------------\nValidation dataset\nAverage loss : 28.464903420181635\n-------------------\n\nRunning epoch 4 / 10\n-------------------\nTraining dataset :\nAverage loss : 27.567655403297262.\n-------------------\nValidation dataset\nAverage loss : 26.642856191119876\n-------------------\n\nRunning epoch 5 / 10\n-------------------\nTraining dataset :\nAverage loss : 25.832834390493538.\n-------------------\nValidation dataset\nAverage loss : 24.82515669546986\n-------------------\n\nRunning epoch 6 / 10\n-------------------\nTraining dataset :\nAverage loss : 24.127855487636754.\n-------------------\nValidation dataset\nAverage loss : 23.089440377402646\n-------------------\n\nRunning epoch 7 / 10\n-------------------\nTraining dataset :\nAverage loss : 22.487650171026484.\n-------------------\nValidation dataset\nAverage loss : 21.420966125777547\n-------------------\n\nRunning epoch 8 / 10\n-------------------\nTraining dataset :\nAverage loss : 20.911723703771205.\n-------------------\nValidation dataset\nAverage loss : 19.82448653813222\n-------------------\n\nRunning epoch 9 / 10\n-------------------\nTraining dataset :\nAverage loss : 19.407203647640202.\n-------------------\nValidation dataset\nAverage loss : 18.315616182806366\n-------------------\n\nRunning epoch 10 / 10\n-------------------\nTraining dataset :\nAverage loss : 17.984150039566146.\n-------------------\nValidation dataset\nAverage loss : 16.896408695745244\n-------------------\n\n" ] ], [ [ "### b- Evaluation: the Phone Error Rate (PER)\n\nIn order to compute the similarity between two sequences, we can use the [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance). This distance estimates the minimum number of insertion, deletion and addition to move from one sequence to another. If we normalize this distance by the number of characters in the reference sequence we get the Phone Error Rate (PER).\n\nThis value can be interpreted as :\n\\\\[ PER = \\frac{S + D + I}{N} \\\\]\n\nWhere:\n\n\n* N is the number of characters in the reference\n* S is the number of substitutiion\n* I in the number of insertion\n* D in the number of deletion\n\nFor the best possible alignment of the two sequences.\n\n", "_____no_output_____" ] ], [ [ "import numpy as np\n\ndef get_PER_sequence(ref_seq, target_seq):\n\n # re = g.split()\n # h = h.split()\n n = len(ref_seq)\n m = len(target_seq)\n\n D = np.zeros((n+1,m+1))\n for i in range(1,n+1):\n D[i,0] = D[i-1,0]+1\n for j in range(1,m+1):\n D[0,j] = D[0,j-1]+1\n \n ### TODO compute the alignment\n\n for i in range(1,n+1):\n for j in range(1,m+1):\n D[i,j] = min(\n D[i-1,j]+1,\n D[i-1,j-1]+1,\n D[i,j-1]+1,\n D[i-1,j-1]+ 0 if ref_seq[i-1]==target_seq[j-1] else float(\"inf\")\n )\n return D[n,m]/len(ref_seq)\n \n\n #return PER", "_____no_output_____" ] ], [ [ "You can test your function below:", "_____no_output_____" ] ], [ [ "ref_seq = [0, 1, 1, 2, 0, 2, 2]\npred_seq = [1, 1, 2, 2, 0, 0]\n\nexpected_PER = 4. / 7.\nprint(get_PER_sequence(ref_seq, pred_seq) == expected_PER)", "True\n" ] ], [ [ "## c- Evaluating the PER of your model on the test dataset\n\nEvaluate the PER on the validation dataset. Please notice that you should usually use a separate dataset, called the dev dataset, to perform this operation. However for the sake of simplicity we will work with validation data in this exercise.", "_____no_output_____" ] ], [ [ "import progressbar\nfrom multiprocessing import Pool\n\ndef cut_data(seq, sizeSeq):\n maxSeq = sizeSeq.max()\n return seq[:, :maxSeq]\n\n\ndef prepare_data(data):\n seq, sizeSeq, phone, sizePhone = data\n seq = seq.cuda()\n phone = phone.cuda()\n sizeSeq = sizeSeq.cuda().view(-1)\n sizePhone = sizePhone.cuda().view(-1)\n\n seq = cut_data(seq.permute(0, 2, 1), sizeSeq).permute(0, 2, 1)\n return seq, sizeSeq, phone, sizePhone\n\n\ndef get_per(test_dataloader,\n cpc_model,\n phone_classifier):\n\n downsampling_factor = 160\n cpc_model.eval()\n phone_classifier.eval()\n\n avgPER = 0\n nItems = 0 \n\n print(\"Starting the PER computation through beam search\")\n bar = progressbar.ProgressBar(maxval=len(test_dataloader))\n bar.start()\n\n for index, data in enumerate(test_dataloader):\n\n bar.update(index)\n\n with torch.no_grad():\n \n seq, sizeSeq, phone, sizePhone = prepare_data(data)\n c_feature, _, _ = cpc_model(seq.to(device),phone.to(device))\n sizeSeq = sizeSeq / downsampling_factor\n predictions = torch.nn.functional.softmax(\n phone_classifier(c_feature), dim=2).cpu()\n phone = phone.cpu()\n sizeSeq = sizeSeq.cpu()\n sizePhone = sizePhone.cpu()\n\n bs = c_feature.size(0)\n data_per = [(predictions[b].argmax(1), phone[b]) for b in range(bs)]\n # data_per = [(predictions[b], sizeSeq[b], phone[b], sizePhone[b],\n # \"criterion.module.BLANK_LABEL\") for b in range(bs)]\n\n with Pool(bs) as p:\n poolData = p.starmap(get_PER_sequence, data_per)\n avgPER += sum([x for x in poolData])\n nItems += len(poolData)\n\n bar.finish()\n\n avgPER /= nItems\n\n print(f\"Average PER {avgPER}\")\n return avgPER\n", "_____no_output_____" ], [ "get_per(data_loader_val,cpc_model,phone_classifier)", "N/A% (0 of 27) | | Elapsed Time: 0:00:00 ETA: --:--:--" ] ], [ [ "## Exercice 3 : Character error rate (CER) \n\nThe Character Error Rate (CER) is an evaluation metric similar to the PER but with characters insterad of phonemes. Using the following data, run the functions you defined previously to estimate the CER of your model after fine-tuning.", "_____no_output_____" ] ], [ [ "# Load a dataset labelled with the letters of each sequence.\n%cd /content/CPC_audio\nfrom cpc.eval.common_voices_eval import SingleSequenceDataset, parseSeqLabels, findAllSeqs\npath_train_data_cer = '/content/per_data/pack_master/1h'\npath_val_data_cer = '/content/per_data/pack_master/10min'\npath_letter_data_cer = '/content/per_data/pack_master/chars.txt'\nBATCH_SIZE=8\n\nletters_labels, N_LETTERS = parseSeqLabels(path_letter_data_cer)\ndata_train_cer, _ = findAllSeqs(path_train_data_cer, extension='.flac')\ndataset_train_non_aligned = SingleSequenceDataset(path_train_data_cer, data_train_cer, letters_labels)\n\n\ndata_val_cer, _ = findAllSeqs(path_val_data_cer, extension='.flac')\ndataset_val_non_aligned = SingleSequenceDataset(path_val_data_cer, data_val_cer, letters_labels)\n\n\n# The data loader will generate a tuple of tensors data, labels for each batch\n# data : size N x T1 x 1 : the audio sequence\n# label : size N x T2 the sequence of letters corresponding to the audio data\n# IMPORTANT NOTE: just like the PER the CER is computed with non-aligned phone data.\ndata_loader_train_letters = torch.utils.data.DataLoader(dataset_train_non_aligned, batch_size=BATCH_SIZE,\n shuffle=True)\ndata_loader_val_letters = torch.utils.data.DataLoader(dataset_val_non_aligned, batch_size=BATCH_SIZE,\n shuffle=True)", "67it [00:00, 10784.76it/s]" ], [ "from cpc.feature_loader import loadModel\n\ncheckpoint_path = 'checkpoint_data/checkpoint_30.pt'\ncpc_model, HIDDEN_CONTEXT_MODEL, HIDDEN_ENCODER_MODEL = loadModel([checkpoint_path])\ncpc_model = cpc_model.cuda()\ncharacter_classifier = PhoneClassifier(HIDDEN_CONTEXT_MODEL, N_LETTERS).to(device)", "Loading checkpoint checkpoint_data/checkpoint_30.pt\nLoading the state dict at checkpoint_data/checkpoint_30.pt\n" ], [ "parameters = list(character_classifier.parameters()) + list(cpc_model.parameters())\nLEARNING_RATE = 2e-4\noptimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE)\n\noptimizer_frozen = torch.optim.Adam(list(character_classifier.parameters()), lr=LEARNING_RATE)", "_____no_output_____" ], [ "loss_ctc = torch.nn.CTCLoss()", "_____no_output_____" ], [ "run_ctc(cpc_model,character_classifier,loss_ctc,data_loader_train_letters,data_loader_val_letters,optimizer_frozen,n_epoch=10)", "Running epoch 1 / 10\n-------------------\nTraining dataset :\nAverage loss : 17.15224294729166.\n-------------------\nValidation dataset\nAverage loss : 16.621312869103598\n-------------------\n\nRunning epoch 2 / 10\n-------------------\nTraining dataset :\nAverage loss : 15.531890602378578.\n-------------------\nValidation dataset\nAverage loss : 14.840831449246519\n-------------------\n\nRunning epoch 3 / 10\n-------------------\nTraining dataset :\nAverage loss : 13.80899079863008.\n-------------------\nValidation dataset\nAverage loss : 12.998893340052021\n-------------------\n\nRunning epoch 4 / 10\n-------------------\nTraining dataset :\nAverage loss : 12.093861906678526.\n-------------------\nValidation dataset\nAverage loss : 11.24841435599666\n-------------------\n\nRunning epoch 5 / 10\n-------------------\nTraining dataset :\nAverage loss : 10.522326436076131.\n-------------------\nValidation dataset\nAverage loss : 9.72009142654202\n-------------------\n\nRunning epoch 6 / 10\n-------------------\nTraining dataset :\nAverage loss : 9.169493901979672.\n-------------------\nValidation dataset\nAverage loss : 8.452587624861732\n-------------------\n\nRunning epoch 7 / 10\n-------------------\nTraining dataset :\nAverage loss : 8.05292275568822.\n-------------------\nValidation dataset\nAverage loss : 7.4334790514543725\n-------------------\n\nRunning epoch 8 / 10\n-------------------\nTraining dataset :\nAverage loss : 7.152782953702486.\n-------------------\nValidation dataset\nAverage loss : 6.621052656128509\n-------------------\n\nRunning epoch 9 / 10\n-------------------\nTraining dataset :\nAverage loss : 6.4348565915247775.\n-------------------\nValidation dataset\nAverage loss : 5.983951862389443\n-------------------\n\nRunning epoch 10 / 10\n-------------------\nTraining dataset :\nAverage loss : 5.864399843282633.\n-------------------\nValidation dataset\nAverage loss : 5.487570416870841\n-------------------\n\n" ], [ "get_per(data_loader_val_letters,cpc_model,character_classifier)", "N/A% (0 of 27) | | Elapsed Time: 0:00:00 ETA: --:--:--" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb9911c3e7e0c5cfa37a4ffcd58eff07540f94e8
33,417
ipynb
Jupyter Notebook
notebooks/official/pipelines/google_cloud_pipeline_components_automl_text.ipynb
lclc19/vertex-ai-samples
1844df54a6fc3d7afff1110a6758afaf13181b19
[ "Apache-2.0" ]
null
null
null
notebooks/official/pipelines/google_cloud_pipeline_components_automl_text.ipynb
lclc19/vertex-ai-samples
1844df54a6fc3d7afff1110a6758afaf13181b19
[ "Apache-2.0" ]
null
null
null
notebooks/official/pipelines/google_cloud_pipeline_components_automl_text.ipynb
lclc19/vertex-ai-samples
1844df54a6fc3d7afff1110a6758afaf13181b19
[ "Apache-2.0" ]
null
null
null
34.557394
313
0.528384
[ [ [ "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Vertex Pipelines: AutoML text classification pipelines using google-cloud-pipeline-components\n\n<table align=\"left\">\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-unified/notebooks/official/google_cloud_pipeline_components_automl_text.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/ai-platform-unified/notebooks/official/google_cloud_pipeline_components_automl_text.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n <td>\n <a href=\"https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/ai-platform-unified/notebooks/official/google_cloud_pipeline_components_automl_text.ipynb\">\n Open in Google Cloud Notebooks\n </a>\n </td>\n</table>\n<br/><br/><br/>", "_____no_output_____" ], [ "## Overview\n\nThis notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML text classification workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines).", "_____no_output_____" ], [ "### Dataset\n\nThe dataset used for this tutorial is the [Happy Moments dataset](https://www.kaggle.com/ritresearch/happydb) from [Kaggle Datasets](https://www.kaggle.com/ritresearch/happydb). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.", "_____no_output_____" ], [ "### Objective\n\nIn this tutorial, you create an AutoML text classification using a pipeline with components from `google_cloud_pipeline_components`.\n\nThe steps performed include:\n\n- Create a `Dataset` resource.\n- Train an AutoML `Model` resource.\n- Creates an `Endpoint` resource.\n- Deploys the `Model` resource to the `Endpoint` resource.\n\nThe components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.html#module-google_cloud_pipeline_components.aiplatform).", "_____no_output_____" ], [ "### Costs\n\nThis tutorial uses billable components of Google Cloud:\n\n* Vertex AI\n* Cloud Storage\n\nLearn about [Vertex AI\npricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage\npricing](https://cloud.google.com/storage/pricing), and use the [Pricing\nCalculator](https://cloud.google.com/products/calculator/)\nto generate a cost estimate based on your projected usage.", "_____no_output_____" ], [ "### Set up your local development environment\n\nIf you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step.\n\nOtherwise, make sure your environment meets this notebook's requirements. You need the following:\n\n- The Cloud Storage SDK\n- Git\n- Python 3\n- virtualenv\n- Jupyter notebook running in a virtual environment with Python 3\n\nThe Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:\n\n1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/).\n\n2. [Install Python 3](https://cloud.google.com/python/setup#installing_python).\n\n3. [Install virtualenv](Ihttps://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3.\n\n4. Activate that environment and run `pip3 install Jupyter` in a terminal shell to install Jupyter.\n\n5. Run `jupyter notebook` on the command line in a terminal shell to launch Jupyter.\n\n6. Open this notebook in the Jupyter Notebook Dashboard.\n", "_____no_output_____" ], [ "## Installation\n\nInstall the latest version of Vertex SDK for Python.", "_____no_output_____" ] ], [ [ "import os\n\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = \"--user\"\nelse:\n USER_FLAG = \"\"\n\n! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG", "_____no_output_____" ] ], [ [ "Install the latest GA version of *google-cloud-storage* library as well.", "_____no_output_____" ] ], [ [ "! pip3 install -U google-cloud-storage $USER_FLAG", "_____no_output_____" ] ], [ [ "Install the latest GA version of *google-cloud-pipeline-components* library as well.", "_____no_output_____" ] ], [ [ "! pip3 install $USER kfp google-cloud-pipeline-components --upgrade", "_____no_output_____" ] ], [ [ "### Restart the kernel\n\nOnce you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.", "_____no_output_____" ] ], [ [ "import os\n\nif not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)", "_____no_output_____" ] ], [ [ "Check the versions of the packages you installed. The KFP SDK version should be >=1.6.", "_____no_output_____" ] ], [ [ "! python3 -c \"import kfp; print('KFP SDK version: {}'.format(kfp.__version__))\"\n! python3 -c \"import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))\"", "_____no_output_____" ] ], [ [ "## Before you begin\n\n### GPU runtime\n\nThis tutorial does not require a GPU runtime.\n\n### Set up your Google Cloud project\n\n**The following steps are required, regardless of your notebook environment.**\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)\n\n3. [Enable the Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com)\n\n4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.\n\n5. Enter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.", "_____no_output_____" ] ], [ [ "PROJECT_ID = \"[your-project-id]\" # @param {type:\"string\"}", "_____no_output_____" ], [ "if PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)", "_____no_output_____" ], [ "! gcloud config set project $PROJECT_ID", "_____no_output_____" ] ], [ [ "#### Region\n\nYou can also change the `REGION` variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.\n\n- Americas: `us-central1`\n- Europe: `europe-west4`\n- Asia Pacific: `asia-east1`\n\nYou may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.\n\nLearn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)", "_____no_output_____" ] ], [ [ "REGION = \"us-central1\" # @param {type: \"string\"}", "_____no_output_____" ] ], [ [ "#### Timestamp\n\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.", "_____no_output_____" ] ], [ [ "from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")", "_____no_output_____" ] ], [ [ "### Authenticate your Google Cloud account\n\n**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.\n\n**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\n\n**Otherwise**, follow these steps:\n\nIn the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.\n\n**Click Create service account**.\n\nIn the **Service account name** field, enter a name, and click **Create**.\n\nIn the **Grant this service account access to project** section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select **Vertex Administrator**. Type \"Storage Object Admin\" into the filter box, and select **Storage Object Admin**.\n\nClick Create. A JSON file that contains your key downloads to your local environment.\n\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.", "_____no_output_____" ] ], [ [ "# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\nimport os\nimport sys\n\n# If on Google Cloud Notebook, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''", "_____no_output_____" ] ], [ [ "### Create a Cloud Storage bucket\n\n**The following steps are required, regardless of your notebook environment.**\n\nWhen you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.\n\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.", "_____no_output_____" ] ], [ [ "BUCKET_NAME = \"gs://[your-bucket-name]\" # @param {type:\"string\"}", "_____no_output_____" ], [ "if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP", "_____no_output_____" ] ], [ [ "**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.", "_____no_output_____" ] ], [ [ "! gsutil mb -l $REGION $BUCKET_NAME", "_____no_output_____" ] ], [ [ "Finally, validate access to your Cloud Storage bucket by examining its contents:", "_____no_output_____" ] ], [ [ "! gsutil ls -al $BUCKET_NAME", "_____no_output_____" ] ], [ [ "#### Service Account\n\n**If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below.", "_____no_output_____" ] ], [ [ "SERVICE_ACCOUNT = \"[your-service-account]\" # @param {type:\"string\"}", "_____no_output_____" ], [ "if (\n SERVICE_ACCOUNT == \"\"\n or SERVICE_ACCOUNT is None\n or SERVICE_ACCOUNT == \"[your-service-account]\"\n):\n # Get your GCP project id from gcloud\n shell_output = !gcloud auth list 2>/dev/null\n SERVICE_ACCOUNT = shell_output[2].strip()\n print(\"Service Account:\", SERVICE_ACCOUNT)", "_____no_output_____" ] ], [ [ "#### Set service account access for Vertex Pipelines\n\nRun the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.", "_____no_output_____" ] ], [ [ "! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME\n\n! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME", "_____no_output_____" ] ], [ [ "### Set up variables\n\nNext, set up some variables used throughout the tutorial.\n### Import libraries and define constants", "_____no_output_____" ] ], [ [ "import google.cloud.aiplatform as aip", "_____no_output_____" ] ], [ [ "#### Vertex AI constants\n\nSetup up the following constants for Vertex AI:\n\n- `API_ENDPOINT`: The Vertex AI API service endpoint for `Dataset`, `Model`, `Job`, `Pipeline` and `Endpoint` services.", "_____no_output_____" ] ], [ [ "# API service endpoint\nAPI_ENDPOINT = \"{}-aiplatform.googleapis.com\".format(REGION)", "_____no_output_____" ] ], [ [ "#### Vertex Pipelines constants\n\nSetup up the following constants for Vertex Pipelines:", "_____no_output_____" ] ], [ [ "PIPELINE_ROOT = \"{}/pipeline_root/happydb\".format(BUCKET_NAME)", "_____no_output_____" ] ], [ [ "Additional imports.", "_____no_output_____" ] ], [ [ "import kfp\nfrom google_cloud_pipeline_components import aiplatform as gcc_aip", "_____no_output_____" ] ], [ [ "## Initialize Vertex SDK for Python\n\nInitialize the Vertex SDK for Python for your project and corresponding bucket.", "_____no_output_____" ] ], [ [ "aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)", "_____no_output_____" ] ], [ [ "## Define AutoML text classification model pipeline that uses components from `google_cloud_pipeline_components`\n\nNext, you define the pipeline.\n\nCreate and deploy an AutoML text classification `Model` resource using a `Dataset` resource.", "_____no_output_____" ] ], [ [ "IMPORT_FILE = \"gs://cloud-ml-data/NL-classification/happiness.csv\"\n\n\[email protected](name=\"automl-text-classification\" + TIMESTAMP)\ndef pipeline(project: str = PROJECT_ID, import_file: str = IMPORT_FILE):\n\n dataset_create_task = gcc_aip.TextDatasetCreateOp(\n display_name=\"train-automl-happydb\",\n gcs_source=import_file,\n import_schema_uri=aip.schema.dataset.ioformat.text.multi_label_classification,\n project=project,\n )\n\n training_run_task = gcc_aip.AutoMLTextTrainingJobRunOp(\n dataset=dataset_create_task.outputs[\"dataset\"],\n display_name=\"train-automl-happydb\",\n prediction_type=\"classification\",\n multi_label=True,\n training_fraction_split=0.6,\n validation_fraction_split=0.2,\n test_fraction_split=0.2,\n model_display_name=\"train-automl-happydb\",\n project=project,\n )\n\n model_deploy_op = gcc_aip.ModelDeployOp( # noqa: F841\n model=training_run_task.outputs[\"model\"], project=project\n )", "_____no_output_____" ] ], [ [ "## Compile the pipeline\n\nNext, compile the pipeline.", "_____no_output_____" ] ], [ [ "from kfp.v2 import compiler # noqa: F811\n\ncompiler.Compiler().compile(\n pipeline_func=pipeline,\n package_path=\"text classification_pipeline.json\".replace(\" \", \"_\"),\n)", "_____no_output_____" ] ], [ [ "## Run the pipeline\n\nNext, run the pipeline.", "_____no_output_____" ] ], [ [ "DISPLAY_NAME = \"happydb_\" + TIMESTAMP\n\njob = aip.PipelineJob(\n display_name=DISPLAY_NAME,\n template_path=\"text classification_pipeline.json\".replace(\" \", \"_\"),\n pipeline_root=PIPELINE_ROOT,\n)\n\njob.run()", "_____no_output_____" ] ], [ [ "Click on the generated link to see your run in the Cloud Console.\n\n<!-- It should look something like this as it is running:\n\n<a href=\"https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png\" target=\"_blank\"><img src=\"https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png\" width=\"40%\"/></a> -->\n\nIn the UI, many of the pipeline DAG nodes will expand or collapse when you click on them. Here is a partially-expanded view of the DAG (click image to see larger version).\n\n<a href=\"https://storage.googleapis.com/amy-jo/images/mp/automl_text_classif.png\" target=\"_blank\"><img src=\"https://storage.googleapis.com/amy-jo/images/mp/automl_text_classif.png\" width=\"40%\"/></a>", "_____no_output_____" ], [ "# Cleaning up\n\nTo clean up all Google Cloud resources used in this project, you can [delete the Google Cloud\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n\nOtherwise, you can delete the individual resources you created in this tutorial -- *Note:* this is auto-generated and not all resources may be applicable for this tutorial:\n\n- Dataset\n- Pipeline\n- Model\n- Endpoint\n- Batch Job\n- Custom Job\n- Hyperparameter Tuning Job\n- Cloud Storage Bucket", "_____no_output_____" ] ], [ [ "delete_dataset = True\ndelete_pipeline = True\ndelete_model = True\ndelete_endpoint = True\ndelete_batchjob = True\ndelete_customjob = True\ndelete_hptjob = True\ndelete_bucket = True\n\ntry:\n if delete_model and \"DISPLAY_NAME\" in globals():\n models = aip.Model.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n model = models[0]\n aip.Model.delete(model)\n print(\"Deleted model:\", model)\nexcept Exception as e:\n print(e)\n\ntry:\n if delete_endpoint and \"DISPLAY_NAME\" in globals():\n endpoints = aip.Endpoint.list(\n filter=f\"display_name={DISPLAY_NAME}_endpoint\", order_by=\"create_time\"\n )\n endpoint = endpoints[0]\n endpoint.undeploy_all()\n aip.Endpoint.delete(endpoint.resource_name)\n print(\"Deleted endpoint:\", endpoint)\nexcept Exception as e:\n print(e)\n\nif delete_dataset and \"DISPLAY_NAME\" in globals():\n if \"text\" == \"tabular\":\n try:\n datasets = aip.TabularDataset.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n dataset = datasets[0]\n aip.TabularDataset.delete(dataset.resource_name)\n print(\"Deleted dataset:\", dataset)\n except Exception as e:\n print(e)\n\n if \"text\" == \"image\":\n try:\n datasets = aip.ImageDataset.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n dataset = datasets[0]\n aip.ImageDataset.delete(dataset.resource_name)\n print(\"Deleted dataset:\", dataset)\n except Exception as e:\n print(e)\n\n if \"text\" == \"text\":\n try:\n datasets = aip.TextDataset.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n dataset = datasets[0]\n aip.TextDataset.delete(dataset.resource_name)\n print(\"Deleted dataset:\", dataset)\n except Exception as e:\n print(e)\n\n if \"text\" == \"video\":\n try:\n datasets = aip.VideoDataset.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n dataset = datasets[0]\n aip.VideoDataset.delete(dataset.resource_name)\n print(\"Deleted dataset:\", dataset)\n except Exception as e:\n print(e)\n\ntry:\n if delete_pipeline and \"DISPLAY_NAME\" in globals():\n pipelines = aip.PipelineJob.list(\n filter=f\"display_name={DISPLAY_NAME}\", order_by=\"create_time\"\n )\n pipeline = pipelines[0]\n aip.PipelineJob.delete(pipeline.resource_name)\n print(\"Deleted pipeline:\", pipeline)\nexcept Exception as e:\n print(e)\n\nif delete_bucket and \"BUCKET_NAME\" in globals():\n ! gsutil rm -r $BUCKET_NAME", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
cb99268be31000f08e0b2388bb95de571f382c73
41,134
ipynb
Jupyter Notebook
LSTM_model_for__RNN.ipynb
souvikg123/Python-NLTK-Sentiment-Analysis.
bbfd01f59e0e3e5c23ce8125fe70de85bc637c79
[ "MIT" ]
null
null
null
LSTM_model_for__RNN.ipynb
souvikg123/Python-NLTK-Sentiment-Analysis.
bbfd01f59e0e3e5c23ce8125fe70de85bc637c79
[ "MIT" ]
null
null
null
LSTM_model_for__RNN.ipynb
souvikg123/Python-NLTK-Sentiment-Analysis.
bbfd01f59e0e3e5c23ce8125fe70de85bc637c79
[ "MIT" ]
null
null
null
108.820106
28,998
0.826786
[ [ [ "<a href=\"https://colab.research.google.com/github/souvikg123/Python-NLTK-Sentiment-Analysis./blob/master/LSTM_model_for__RNN.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "#LSTM\nfrom pandas import DataFrame\nfrom pandas import Series\nfrom pandas import concat\nfrom pandas import read_csv\nfrom pandas import datetime\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom math import sqrt\nfrom matplotlib import pyplot\nimport numpy", "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:6: FutureWarning: The pandas.datetime class is deprecated and will be removed from pandas in a future version. Import from datetime module instead.\n \n" ], [ "def parser(x):\n\treturn datetime.strptime('190'+x, '%Y-%m')", "_____no_output_____" ], [ "# frame a sequence as a supervised learning problem\ndef timeseries_to_supervised(data, lag=1):\n\tdf = DataFrame(data)\n\tcolumns = [df.shift(i) for i in range(1, lag+1)]\n\tcolumns.append(df)\n\tdf = concat(columns, axis=1)\n\tdf.fillna(0, inplace=True)\n\treturn df", "_____no_output_____" ], [ "# create a differenced series\ndef difference(dataset, interval=1):\n\tdiff = list()\n\tfor i in range(interval, len(dataset)):\n\t\tvalue = dataset[i] - dataset[i - interval]\n\t\tdiff.append(value)\n\treturn Series(diff)", "_____no_output_____" ], [ "# invert differenced value\ndef inverse_difference(history, yhat, interval=1):\n\treturn yhat + history[-interval]", "_____no_output_____" ], [ "# scale train and test data to [-1, 1]\ndef scale(train, test):\n\t# fit scaler\n\tscaler = MinMaxScaler(feature_range=(-1, 1))\n\tscaler = scaler.fit(train)\n\t# transform train\n\ttrain = train.reshape(train.shape[0], train.shape[1])\n\ttrain_scaled = scaler.transform(train)\n\t# transform test\n\ttest = test.reshape(test.shape[0], test.shape[1])\n\ttest_scaled = scaler.transform(test)\n\treturn scaler, train_scaled, test_scaled", "_____no_output_____" ], [ "def invert_scale(scaler, X, value):\n\tnew_row = [x for x in X] + [value]\n\tarray = numpy.array(new_row)\n\tarray = array.reshape(1, len(array))\n\tinverted = scaler.inverse_transform(array)\n\treturn inverted[0, -1]\n", "_____no_output_____" ], [ "# fit an LSTM network to training data\ndef fit_lstm(train, batch_size, nb_epoch, neurons):\n\tX, y = train[:, 0:-1], train[:, -1]\n\tX = X.reshape(X.shape[0], 1, X.shape[1])\n\tmodel = Sequential()\n\tmodel.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))\n\tmodel.add(Dense(1))\n\tmodel.compile(loss='mean_squared_error', optimizer='adam')\n\tfor i in range(nb_epoch):\n\t\tmodel.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\n\t\tmodel.reset_states()\n\treturn model", "_____no_output_____" ], [ "# make a one-step forecast\ndef forecast_lstm(model, batch_size, X):\n\tX = X.reshape(1, 1, len(X))\n\tyhat = model.predict(X, batch_size=batch_size)\n\treturn yhat[0,0]", "_____no_output_____" ], [ "# make a one-step forecast\ndef forecast_lstm(model, batch_size, X):\n\tX = X.reshape(1, 1, len(X))\n\tyhat = model.predict(X, batch_size=batch_size)\n\treturn yhat[0,0]\n \n# load dataset\nseries = read_csv('shampoo.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)\n \n# transform data to be stationary\nraw_values = series.values\ndiff_values = difference(raw_values, 1)\n \n# transform data to be supervised learning\nsupervised = timeseries_to_supervised(diff_values, 1)\nsupervised_values = supervised.values\n \n# split data into train and test-sets\ntrain, test = supervised_values[0:-12], supervised_values[-12:]\n \n# transform the scale of the data\nscaler, train_scaled, test_scaled = scale(train, test)", "_____no_output_____" ], [ "# fit the model\nlstm_model = fit_lstm(train_scaled, 1, 3000, 4)\n# forecast the entire training dataset to build up state for forecasting\ntrain_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)\nlstm_model.predict(train_reshaped, batch_size=1)", "_____no_output_____" ], [ "predictions = list()\nfor i in range(len(test_scaled)):\n\t# make one-step forecast\n\tX, y = test_scaled[i, 0:-1], test_scaled[i, -1]\n\tyhat = forecast_lstm(lstm_model, 1, X)\n\t# invert scaling\n\tyhat = invert_scale(scaler, X, yhat)\n\t# invert differencing\n\tyhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)\n\t# store forecast\n\tpredictions.append(yhat)\n\texpected = raw_values[len(train) + i + 1]\n\tprint('Month=%d, Predicted=%f, Expected=%f' % (i+1, yhat, expected))", "Month=1, Predicted=349.528868, Expected=339.700000\nMonth=2, Predicted=496.462415, Expected=440.400000\nMonth=3, Predicted=391.776268, Expected=315.900000\nMonth=4, Predicted=384.610767, Expected=439.300000\nMonth=5, Predicted=469.539070, Expected=401.300000\nMonth=6, Predicted=595.388576, Expected=437.400000\nMonth=7, Predicted=443.209064, Expected=575.500000\nMonth=8, Predicted=724.364852, Expected=407.600000\nMonth=9, Predicted=475.786641, Expected=682.000000\nMonth=10, Predicted=687.298865, Expected=475.300000\nMonth=11, Predicted=494.490521, Expected=581.300000\nMonth=12, Predicted=713.132503, Expected=646.900000\n" ], [ "# report performance\nrmse = sqrt(mean_squared_error(raw_values[-12:], predictions))\nprint('Test RMSE: %.3f' % rmse)\n# line plot of observed vs predicted\npyplot.plot(raw_values[-12:])\npyplot.plot(predictions)\npyplot.show()", "Test RMSE: 146.866\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb993e1bdc9b07f99dbba37da95f6a681a6d51cd
13,133
ipynb
Jupyter Notebook
ray-serve/03-Ray-Serve-Example.ipynb
dmatrix/academy
dbcac28e08caf83a9d7937f3bcb2747d6f6e67ef
[ "Apache-2.0" ]
null
null
null
ray-serve/03-Ray-Serve-Example.ipynb
dmatrix/academy
dbcac28e08caf83a9d7937f3bcb2747d6f6e67ef
[ "Apache-2.0" ]
null
null
null
ray-serve/03-Ray-Serve-Example.ipynb
dmatrix/academy
dbcac28e08caf83a9d7937f3bcb2747d6f6e67ef
[ "Apache-2.0" ]
null
null
null
31.269048
275
0.575497
[ [ [ "# Ray Serve - Model Serving\n\n© 2019-2022, Anyscale. All Rights Reserved\n\n![Anyscale Academy](../images/AnyscaleAcademyLogo.png)", "_____no_output_____" ], [ "Now we'll explore a short example for Ray Serve. This example is from the Ray Serve [scikit-learn example.](https://docs.ray.io/en/latest/serve/tutorials/sklearn.html)\n\nSee also the Serve documentation's [mini-tutorials](https://docs.ray.io/en/latest/serve/tutorials/index.html) for using Serve with various frameworks.\n\n<img src=\"../images/ray_serve_deployment_workflow.png\" width=\"90%\" height=\"50%\">", "_____no_output_____" ] ], [ [ "import ray\nfrom ray import serve\nimport os\nimport requests # for making web requests\nimport tempfile", "_____no_output_____" ], [ "serve.start()", "2022-03-16 16:34:31,024\tINFO services.py:1412 -- View the Ray dashboard at \u001b[1m\u001b[32mhttp://127.0.0.1:8270\u001b[39m\u001b[22m\n\u001b[2m\u001b[36m(ServeController pid=63313)\u001b[0m 2022-03-16 16:34:33,999\tINFO checkpoint_path.py:16 -- Using RayInternalKVStore for controller checkpoint and recovery.\n\u001b[2m\u001b[36m(ServeController pid=63313)\u001b[0m 2022-03-16 16:34:34,105\tINFO http_state.py:98 -- Starting HTTP proxy with name 'SERVE_CONTROLLER_ACTOR:RamarF:SERVE_PROXY_ACTOR-node:127.0.0.1-0' on node 'node:127.0.0.1-0' listening on '127.0.0.1:8000'\n2022-03-16 16:34:34,477\tINFO api.py:521 -- Started Serve instance in namespace 'serve'.\n" ] ], [ [ "## Get a Model to Serve \n\nWe'll begin by training a classifier with the Iris data we used before, this time using [scikit-learn](https://scikit-learn.org/stable/). The details aren't too important for our purposes, except for the fact we'll save the trained model to disk for subsequent serving.", "_____no_output_____" ] ], [ [ "import pickle\nimport json\nimport numpy as np", "_____no_output_____" ], [ "import sklearn\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import mean_squared_error", "\u001b[2m\u001b[36m(HTTPProxyActor pid=63308)\u001b[0m INFO: Started server process [63308]\n" ], [ "# Load data\niris_dataset = load_iris()\ndata, target, target_names = iris_dataset[\"data\"], iris_dataset[\n \"target\"], iris_dataset[\"target_names\"]", "_____no_output_____" ], [ "# Instantiate model\nmodel = GradientBoostingClassifier()", "_____no_output_____" ], [ "# Training and validation split\ndata, target = sklearn.utils.shuffle(data, target)\ntrain_x, train_y = data[:100], target[:100]\nval_x, val_y = data[100:], target[100:]", "_____no_output_____" ], [ "# Train and evaluate models\nmodel.fit(train_x, train_y)\nprint(\"MSE:\", mean_squared_error(model.predict(val_x), val_y))", "MSE: 0.04\n" ], [ "# Save the model and label to file\nMODEL_PATH = os.path.join(tempfile.gettempdir(),\n \"iris_model_logistic_regression.pkl\")\nLABEL_PATH = os.path.join(tempfile.gettempdir(), \"iris_labels.json\")", "_____no_output_____" ], [ "# Save the model and label to file. (This could also be S3 or other \"global\" place)\n\nwith open(MODEL_PATH, \"wb\") as f:\n pickle.dump(model, f)\nwith open(LABEL_PATH, \"w\") as f:\n json.dump(target_names.tolist(), f)", "_____no_output_____" ] ], [ [ "## Create a Model and Serve It\n\nNext, we define a servable model by instantiating a class and defining the `__call__` method that Ray Serve will use. ", "_____no_output_____" ] ], [ [ "@serve.deployment(route_prefix=\"/regressor\")\nclass BoostingModel:\n def __init__(self):\n with open(MODEL_PATH, \"rb\") as f:\n self.model = pickle.load(f)\n with open(LABEL_PATH) as f:\n self.label_list = json.load(f)\n\n # async allows us to have this call concurrently \n async def __call__(self, starlette_request):\n payload = await starlette_request.json()\n print(\"Worker: received starlette request with data\", payload)\n\n input_vector = [\n payload[\"sepal length\"],\n payload[\"sepal width\"],\n payload[\"petal length\"],\n payload[\"petal width\"],\n ]\n prediction = self.model.predict([input_vector])[0]\n human_name = self.label_list[prediction]\n return {\"result\": human_name}", "_____no_output_____" ] ], [ [ "## Deploy the model", "_____no_output_____" ] ], [ [ "BoostingModel.deploy()", "2022-03-16 16:34:46,505\tINFO api.py:262 -- Updating deployment 'BoostingModel'. component=serve deployment=BoostingModel\n\u001b[2m\u001b[36m(ServeController pid=63313)\u001b[0m 2022-03-16 16:34:46,532\tINFO deployment_state.py:920 -- Adding 1 replicas to deployment 'BoostingModel'. component=serve deployment=BoostingModel\n2022-03-16 16:34:47,284\tINFO api.py:274 -- Deployment 'BoostingModel' is ready at `http://127.0.0.1:8000/regressor`. component=serve deployment=BoostingModel\n" ] ], [ [ "## Score the model\nInternally, Serve stores the model as a Ray actor and routes traffic to it as the endpoint is queried, in this case over HTTP. \n\nNow let’s query the endpoint to see results.", "_____no_output_____" ] ], [ [ "sample_request_input = {\n \"sepal length\": 1.2,\n \"sepal width\": 1.0,\n \"petal length\": 1.1,\n \"petal width\": 0.9,\n}", "_____no_output_____" ] ], [ [ "We can now send HTTP requests to our route `route_prefix=/regressor` at the default port 8000", "_____no_output_____" ] ], [ [ "response = requests.get(\n \"http://localhost:8000/regressor\", json=sample_request_input)\nprint(response.text)", "{\n \"result\": \"versicolor\"\n}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n" ], [ "for i in range(10):\n response = requests.get(\"http://localhost:8000/regressor\", json=sample_request_input).json()\n print(response)", "{'result': 'versicolor'}\n{'result': 'versicolor'}\n{'result': 'versicolor'}\n{'result': 'versicolor'}\n{'result': 'versicolor'}\n{'result': 'versicolor'}\n{'result': 'versicolor'}\n{'result': 'versicolor'}\n{'result': 'versicolor'}\n{'result': 'versicolor'}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n\u001b[2m\u001b[36m(BoostingModel pid=63311)\u001b[0m Worker: received starlette request with data {'sepal length': 1.2, 'sepal width': 1.0, 'petal length': 1.1, 'petal width': 0.9}\n" ] ], [ [ "## Cleanup", "_____no_output_____" ] ], [ [ "deployments = serve.list_deployments()\nprint(f'deployments: {deployments}')", "deployments: {'BoostingModel': Deployment(name=BoostingModel,version=None,route_prefix=/regressor)}\n" ], [ "serve.shutdown() ", "\u001b[2m\u001b[36m(ServeController pid=63313)\u001b[0m 2022-03-16 16:35:06,116\tINFO deployment_state.py:940 -- Removing 1 replicas from deployment 'BoostingModel'. component=serve deployment=BoostingModel\n" ] ], [ [ "## Exercise - Try Adding more examples\n\nHere are some things you can try:\n\n1. Send more input requests.\n2. Add a small model of your own", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
cb9941fc5e6d26c6ef9d86c8690f36d63819227b
6,785
ipynb
Jupyter Notebook
gan_flavours_keras.ipynb
asifajunaidahmad/WGAN
a28b8984074f38506147205fae87611dc3ba2afc
[ "MIT" ]
null
null
null
gan_flavours_keras.ipynb
asifajunaidahmad/WGAN
a28b8984074f38506147205fae87611dc3ba2afc
[ "MIT" ]
null
null
null
gan_flavours_keras.ipynb
asifajunaidahmad/WGAN
a28b8984074f38506147205fae87611dc3ba2afc
[ "MIT" ]
null
null
null
31.267281
186
0.500811
[ [ [ "# GAN Flavours\n\nThis jupyter notebook contains a training script for the https://github.com/beresandras/gan-flavours-keras repository, and is intended to be used in a Google Colab environment.", "_____no_output_____" ] ], [ [ "# uncomment on first run\n# !pip install tensorflow_addons\n# !git clone https://github.com/beresandras/gan-flavours-keras", "_____no_output_____" ], [ "import sys\nimport tensorflow as tf\n\nfrom tensorflow import keras\n\nsys.path.insert(0,'/content/WGAN')\n\nfrom dataset import prepare_dataset\nfrom architecture import get_generator, get_discriminator\nfrom augmentation import AdaptiveAugmenter\nfrom losses import (\n MiniMaxGAN,\n NonSaturatingGAN,\n LeastSquaresGAN,\n HingeGAN,\n WassersteinGAN,\n RelativisticGAN,\n RelativisticAverageGAN,\n)\nfrom utils import generate_images_with, plot_history", "_____no_output_____" ], [ "# hyperparameters\n\n# data\n# some datasets might be unavailable for download at times\ndataset_name = \"playable\" # \"oxford_flowers102\", \"celeb_a\", \"cifar10\"\nimage_size = 64 # 64, 64, 32\nnum_epochs = 400 # 500, 25, 100\nkid_image_size = 75 # resolution of KID measurement, default 299\nplot_interval = 10 # 10, 1, 2\n\n# optimization\nbatch_size = 128\none_sided_label_smoothing = 0.0 # can be 0.1\nema = 0.99\ngenerator_lr = 2e-4\ndiscriminator_lr = 2e-4\nbeta_1 = 0.5\nbeta_2 = 0.999\n\n# architecture\nnoise_size = 64\ndepth = 4 # number of up- and downsampling layers, change with resolution\nwidth = 128\ninitializer = \"glorot_uniform\"\nresidual = False\ntransposed = True # transposed convs vs upsampling + convs in generator\nleaky_relu_slope = 0.2\ndropout_rate = 0.4\nspectral_norm = False\n\n# adaptive discriminator augmentation\ntarget_accuracy = None # 0.85, set to None to disable\nintegration_steps = 1000\nmax_probability = 0.8 # maximal augmentation probability\n\nid = 0", "_____no_output_____" ], [ "# load dataset\ntrain_dataset = prepare_dataset(dataset_name, \"train\", image_size, batch_size)\nval_dataset = prepare_dataset(dataset_name, \"validation\", image_size, batch_size)", "_____no_output_____" ], [ "# create model\nmodel = NonSaturatingGAN(\n id=id,\n generator=get_generator(\n noise_size, depth, width, initializer, residual, transposed\n ),\n discriminator=get_discriminator(\n image_size,\n depth,\n width,\n initializer,\n residual,\n leaky_relu_slope,\n dropout_rate,\n spectral_norm,\n ),\n augmenter=AdaptiveAugmenter(\n target_accuracy=target_accuracy,\n integration_steps=integration_steps,\n max_probability=max_probability,\n input_shape=(image_size, image_size, 3),\n ),\n one_sided_label_smoothing=one_sided_label_smoothing,\n ema=ema,\n kid_image_size=kid_image_size,\n plot_interval=plot_interval,\n is_jupyter=True,\n)\n\nmodel.compile(\n generator_optimizer=keras.optimizers.Adam(\n learning_rate=generator_lr, beta_1=beta_1, beta_2=beta_2\n ),\n discriminator_optimizer=keras.optimizers.Adam(\n learning_rate=discriminator_lr, beta_1=beta_1, beta_2=beta_2\n ),\n)", "_____no_output_____" ], [ "# checkpointing\ncheckpoint_path = \"checkpoints/model_{}\".format(id)\ncheckpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_path,\n save_weights_only=True,\n monitor=\"val_kid\",\n mode=\"min\",\n save_best_only=True,\n)\n\n# run training\nhistory = model.fit(\n train_dataset,\n epochs=num_epochs,\n validation_data=val_dataset,\n callbacks=[\n keras.callbacks.LambdaCallback(on_epoch_end=model.plot_images),\n checkpoint_callback,\n ],\n)\n\n# load best model\nmodel.load_weights(checkpoint_path)\ngenerate_images_with(model, history, id, is_jupyter=True)\n\n# plot history\nplot_history(history, id, is_jupyter=True)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
cb9968d46fd77c9f33519f003189ae88b7c150f5
95,656
ipynb
Jupyter Notebook
examples/tutorials/colabs/Habitat_Interactive_Tasks.ipynb
dachii-azm/habitat-api
ac937fde9e14a47968eaf221857eb4e65f48383e
[ "MIT" ]
1
2020-10-10T05:28:45.000Z
2020-10-10T05:28:45.000Z
examples/tutorials/colabs/Habitat_Interactive_Tasks.ipynb
dachii-azm/habitat-api
ac937fde9e14a47968eaf221857eb4e65f48383e
[ "MIT" ]
null
null
null
examples/tutorials/colabs/Habitat_Interactive_Tasks.ipynb
dachii-azm/habitat-api
ac937fde9e14a47968eaf221857eb4e65f48383e
[ "MIT" ]
1
2021-06-04T06:38:51.000Z
2021-06-04T06:38:51.000Z
42.589492
421
0.539778
[ [ [ "# Furniture Rearrangement - How to setup a new interaction task in Habitat-Lab\n\nThis tutorial demonstrates how to setup a new task in Habitat that utilizes interaction capabilities in Habitat Simulator.\n\n![teaser](https://drive.google.com/uc?id=1pupGvb4dGefd0T_23GpeDkkcIocDHSL_)\n\n## Task Definition:\nThe working example in this demo will be the task of **Furniture Rearrangement** - The agent will be randomly spawned in an environment in which the furniture are initially displaced from their desired position. The agent is tasked with navigating the environment, picking furniture and putting them in the desired position. To keep the tutorial simple and easy to follow, we will rearrange just a single object.\n\nTo setup this task, we will build on top of existing API in Habitat-Simulator and Habitat-Lab. Here is a summary of all the steps involved in setting up this task:\n\n1. **Setup the Simulator**: Using existing functionalities of the Habitat-Sim, we can add or remove objects from the scene. We will use these methods to spawn the agent and the objects at some pre-defined initial configuration.\n2. **Create a New Dataset**: We will define a new dataset class to save / load a list of episodes for the agent to train and evaluate on.\n3. **Grab / Release Action**: We will add the \"grab/release\" action to the agent's action space to allow the agent to pickup / drop an object under a crosshair.\n4. **Extend the Simulator Class**: We will extend the Simulator Class to add support for new actions implemented in previous step and add other additional utility functions\n5. **Create a New Task**: Create a new task definition, implement new *sensors* and *metrics*.\n6. **Train an RL agent**: We will define rewards for this task and utilize it to train an RL agent using the PPO algorithm.\n\nLet's get started!", "_____no_output_____" ] ], [ [ "# @title Installation { display-mode: \"form\" }\n# @markdown (double click to show code).\n\n!curl -L https://raw.githubusercontent.com/facebookresearch/habitat-sim/master/examples/colab_utils/colab_install.sh | NIGHTLY=true bash -s\n%cd /content\n\n!gdown --id 1Pc-J6pZzXEd8RSeLM94t3iwO8q_RQ853\n!unzip -o /content/coda.zip -d /content/habitat-sim/data/scene_datasets\n\n# reload the cffi version\nimport sys\n\nif \"google.colab\" in sys.modules:\n import importlib\n\n import cffi\n\n importlib.reload(cffi)", "_____no_output_____" ], [ "# @title Path Setup and Imports { display-mode: \"form\" }\n# @markdown (double click to show code).\n\n%cd /content/habitat-lab\n\n## [setup]\nimport gzip\nimport json\nimport os\nimport sys\nfrom typing import Any, Dict, List, Optional, Type\n\nimport attr\nimport cv2\nimport git\nimport magnum as mn\nimport numpy as np\n\n%matplotlib inline\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\nimport habitat\nimport habitat_sim\nfrom habitat.config import Config\nfrom habitat.core.registry import registry\nfrom habitat_sim.utils import viz_utils as vut\n\nif \"google.colab\" in sys.modules:\n os.environ[\"IMAGEIO_FFMPEG_EXE\"] = \"/usr/bin/ffmpeg\"\n\nrepo = git.Repo(\".\", search_parent_directories=True)\ndir_path = repo.working_tree_dir\n%cd $dir_path\ndata_path = os.path.join(dir_path, \"data\")\noutput_directory = \"data/tutorials/output/\" # @param {type:\"string\"}\noutput_path = os.path.join(dir_path, output_directory)\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--no-display\", dest=\"display\", action=\"store_false\")\n parser.add_argument(\n \"--no-make-video\", dest=\"make_video\", action=\"store_false\"\n )\n parser.set_defaults(show_video=True, make_video=True)\n args, _ = parser.parse_known_args()\n show_video = args.display\n display = args.display\n make_video = args.make_video\nelse:\n show_video = False\n make_video = False\n display = False\n\nif make_video and not os.path.exists(output_path):\n os.makedirs(output_path)", "_____no_output_____" ], [ "# @title Util functions to visualize observations\n# @markdown - `make_video_cv2`: Renders a video from a list of observations\n# @markdown - `simulate`: Runs simulation for a given amount of time at 60Hz\n# @markdown - `simulate_and_make_vid` Runs simulation and creates video\n\n\ndef make_video_cv2(\n observations, cross_hair=None, prefix=\"\", open_vid=True, fps=60\n):\n sensor_keys = list(observations[0])\n videodims = observations[0][sensor_keys[0]].shape\n videodims = (videodims[1], videodims[0]) # flip to w,h order\n print(videodims)\n video_file = output_path + prefix + \".mp4\"\n print(\"Encoding the video: %s \" % video_file)\n writer = vut.get_fast_video_writer(video_file, fps=fps)\n for ob in observations:\n # If in RGB/RGBA format, remove the alpha channel\n rgb_im_1st_person = cv2.cvtColor(ob[\"rgb\"], cv2.COLOR_RGBA2RGB)\n if cross_hair is not None:\n rgb_im_1st_person[\n cross_hair[0] - 2 : cross_hair[0] + 2,\n cross_hair[1] - 2 : cross_hair[1] + 2,\n ] = [255, 0, 0]\n\n if rgb_im_1st_person.shape[:2] != videodims:\n rgb_im_1st_person = cv2.resize(\n rgb_im_1st_person, videodims, interpolation=cv2.INTER_AREA\n )\n # write the 1st person observation to video\n writer.append_data(rgb_im_1st_person)\n writer.close()\n\n if open_vid:\n print(\"Displaying video\")\n vut.display_video(video_file)\n\n\ndef simulate(sim, dt=1.0, get_frames=True):\n # simulate dt seconds at 60Hz to the nearest fixed timestep\n print(\"Simulating \" + str(dt) + \" world seconds.\")\n observations = []\n start_time = sim.get_world_time()\n while sim.get_world_time() < start_time + dt:\n sim.step_physics(1.0 / 60.0)\n if get_frames:\n observations.append(sim.get_sensor_observations())\n return observations\n\n\n# convenience wrapper for simulate and make_video_cv2\ndef simulate_and_make_vid(sim, crosshair, prefix, dt=1.0, open_vid=True):\n observations = simulate(sim, dt)\n make_video_cv2(observations, crosshair, prefix=prefix, open_vid=open_vid)\n\n\ndef display_sample(\n rgb_obs,\n semantic_obs=np.array([]),\n depth_obs=np.array([]),\n key_points=None, # noqa: B006\n):\n from habitat_sim.utils.common import d3_40_colors_rgb\n\n rgb_img = Image.fromarray(rgb_obs, mode=\"RGB\")\n\n arr = [rgb_img]\n titles = [\"rgb\"]\n if semantic_obs.size != 0:\n semantic_img = Image.new(\n \"P\", (semantic_obs.shape[1], semantic_obs.shape[0])\n )\n semantic_img.putpalette(d3_40_colors_rgb.flatten())\n semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))\n semantic_img = semantic_img.convert(\"RGBA\")\n arr.append(semantic_img)\n titles.append(\"semantic\")\n\n if depth_obs.size != 0:\n depth_img = Image.fromarray(\n (depth_obs / 10 * 255).astype(np.uint8), mode=\"L\"\n )\n arr.append(depth_img)\n titles.append(\"depth\")\n\n plt.figure(figsize=(12, 8))\n for i, data in enumerate(arr):\n ax = plt.subplot(1, 3, i + 1)\n ax.axis(\"off\")\n ax.set_title(titles[i])\n # plot points on images\n if key_points is not None:\n for point in key_points:\n plt.plot(\n point[0], point[1], marker=\"o\", markersize=10, alpha=0.8\n )\n plt.imshow(data)\n\n plt.show(block=False)", "_____no_output_____" ] ], [ [ "## 1. Setup the Simulator\n\n---\n\n", "_____no_output_____" ] ], [ [ "# @title Setup simulator configuration\n# @markdown We'll start with setting up simulator with the following configurations\n# @markdown - The simulator will render both RGB, Depth observations of 256x256 resolution.\n# @markdown - The actions available will be `move_forward`, `turn_left`, `turn_right`.\n\n\ndef make_cfg(settings):\n sim_cfg = habitat_sim.SimulatorConfiguration()\n sim_cfg.gpu_device_id = 0\n sim_cfg.default_agent_id = settings[\"default_agent_id\"]\n sim_cfg.scene.id = settings[\"scene\"]\n sim_cfg.enable_physics = settings[\"enable_physics\"]\n sim_cfg.physics_config_file = settings[\"physics_config_file\"]\n\n # Note: all sensors must have the same resolution\n sensors = {\n \"rgb\": {\n \"sensor_type\": habitat_sim.SensorType.COLOR,\n \"resolution\": [settings[\"height\"], settings[\"width\"]],\n \"position\": [0.0, settings[\"sensor_height\"], 0.0],\n },\n \"depth\": {\n \"sensor_type\": habitat_sim.SensorType.DEPTH,\n \"resolution\": [settings[\"height\"], settings[\"width\"]],\n \"position\": [0.0, settings[\"sensor_height\"], 0.0],\n },\n }\n\n sensor_specs = []\n for sensor_uuid, sensor_params in sensors.items():\n if settings[sensor_uuid]:\n sensor_spec = habitat_sim.SensorSpec()\n sensor_spec.uuid = sensor_uuid\n sensor_spec.sensor_type = sensor_params[\"sensor_type\"]\n sensor_spec.resolution = sensor_params[\"resolution\"]\n sensor_spec.position = sensor_params[\"position\"]\n\n sensor_specs.append(sensor_spec)\n\n # Here you can specify the amount of displacement in a forward action and the turn angle\n agent_cfg = habitat_sim.agent.AgentConfiguration()\n agent_cfg.sensor_specifications = sensor_specs\n agent_cfg.action_space = {\n \"move_forward\": habitat_sim.agent.ActionSpec(\n \"move_forward\", habitat_sim.agent.ActuationSpec(amount=0.1)\n ),\n \"turn_left\": habitat_sim.agent.ActionSpec(\n \"turn_left\", habitat_sim.agent.ActuationSpec(amount=10.0)\n ),\n \"turn_right\": habitat_sim.agent.ActionSpec(\n \"turn_right\", habitat_sim.agent.ActuationSpec(amount=10.0)\n ),\n }\n\n return habitat_sim.Configuration(sim_cfg, [agent_cfg])\n\n\nsettings = {\n \"max_frames\": 10,\n \"width\": 256,\n \"height\": 256,\n \"scene\": \"data/scene_datasets/coda/coda.glb\",\n \"default_agent_id\": 0,\n \"sensor_height\": 1.5, # Height of sensors in meters\n \"rgb\": True, # RGB sensor\n \"depth\": True, # Depth sensor\n \"seed\": 1,\n \"enable_physics\": True,\n \"physics_config_file\": \"data/default.phys_scene_config.json\",\n \"silent\": False,\n \"compute_shortest_path\": False,\n \"compute_action_shortest_path\": False,\n \"save_png\": True,\n}\n\ncfg = make_cfg(settings)", "_____no_output_____" ], [ "# @title Spawn the agent at a pre-defined location\n\n\ndef init_agent(sim):\n agent_pos = np.array([-0.15776923, 0.18244143, 0.2988735])\n\n # Place the agent\n sim.agents[0].scene_node.translation = agent_pos\n agent_orientation_y = -40\n sim.agents[0].scene_node.rotation = mn.Quaternion.rotation(\n mn.Deg(agent_orientation_y), mn.Vector3(0, 1.0, 0)\n )\n\n\ncfg.sim_cfg.default_agent_id = 0\nwith habitat_sim.Simulator(cfg) as sim:\n init_agent(sim)\n if make_video:\n # Visualize the agent's initial position\n simulate_and_make_vid(\n sim, None, \"sim-init\", dt=1.0, open_vid=show_video\n )", "_____no_output_____" ], [ "# @title Set the object's initial and final position\n# @markdown Defines two utility functions:\n# @markdown - `remove_all_objects`: This will remove all objects from the scene\n# @markdown - `set_object_in_front_of_agent`: This will add an object in the scene in front of the agent at the specified distance.\n\n# @markdown Here we add a chair *3.0m* away from the agent and the task is to place the agent at the desired final position which is *7.0m* in front of the agent.\n\n\ndef remove_all_objects(sim):\n for id in sim.get_existing_object_ids():\n sim.remove_object(id)\n\n\ndef set_object_in_front_of_agent(sim, obj_id, z_offset=-1.5):\n r\"\"\"\n Adds an object in front of the agent at some distance.\n \"\"\"\n agent_transform = sim.agents[0].scene_node.transformation_matrix()\n obj_translation = agent_transform.transform_point(\n np.array([0, 0, z_offset])\n )\n sim.set_translation(obj_translation, obj_id)\n\n obj_node = sim.get_object_scene_node(obj_id)\n xform_bb = habitat_sim.geo.get_transformed_bb(\n obj_node.cumulative_bb, obj_node.transformation\n )\n\n # also account for collision margin of the scene\n scene_collision_margin = 0.04\n y_translation = mn.Vector3(\n 0, xform_bb.size_y() / 2.0 + scene_collision_margin, 0\n )\n sim.set_translation(y_translation + sim.get_translation(obj_id), obj_id)\n\n\ndef init_objects(sim):\n # Manager of Object Attributes Templates\n obj_attr_mgr = sim.get_object_template_manager()\n\n # Add a chair into the scene.\n obj_path = \"test_assets/objects/chair\"\n chair_template_id = obj_attr_mgr.load_object_configs(\n str(os.path.join(data_path, obj_path))\n )[0]\n chair_attr = obj_attr_mgr.get_template_by_ID(chair_template_id)\n obj_attr_mgr.register_template(chair_attr)\n\n # Object's initial position 3m away from the agent.\n object_id = sim.add_object_by_handle(chair_attr.handle)\n set_object_in_front_of_agent(sim, object_id, -3.0)\n sim.set_object_motion_type(\n habitat_sim.physics.MotionType.STATIC, object_id\n )\n\n # Object's final position 7m away from the agent\n goal_id = sim.add_object_by_handle(chair_attr.handle)\n set_object_in_front_of_agent(sim, goal_id, -7.0)\n sim.set_object_motion_type(habitat_sim.physics.MotionType.STATIC, goal_id)\n\n return object_id, goal_id\n\n\nwith habitat_sim.Simulator(cfg) as sim:\n init_agent(sim)\n init_objects(sim)\n\n # Visualize the scene after the chair is added into the scene.\n if make_video:\n simulate_and_make_vid(\n sim, None, \"object-init\", dt=1.0, open_vid=show_video\n )", "_____no_output_____" ] ], [ [ "## Rearrangement Dataset\n![dataset](https://drive.google.com/uc?id=1y0qS0MifmJsZ0F4jsRZGI9BrXzslFLn7)\n\nIn the previous section, we created a single episode of the rearrangement task. Let's define a format to store all the necessary information about a single episode. It should store the *scene* the episode belongs to, *initial spawn position and orientation* of the agent, *object type*, object's *initial position and orientation* as well as *final position and orientation*.\n\nThe format will be as follows:\n```\n{\n 'episode_id': 0,\n 'scene_id': 'data/scene_datasets/coda/coda.glb',\n 'goals': {\n 'position': [4.34, 0.67, -5.06],\n 'rotation': [0.0, 0.0, 0.0, 1.0]\n },\n 'objects': {\n 'object_id': 0,\n 'object_template': 'data/test_assets/objects/chair',\n 'position': [1.77, 0.67, -1.99],\n 'rotation': [0.0, 0.0, 0.0, 1.0]\n },\n 'start_position': [-0.15, 0.18, 0.29],\n 'start_rotation': [-0.0, -0.34, -0.0, 0.93]}\n}\n```\nOnce an episode is defined, a dataset will just be a collection of such episodes. For simplicity, in this notebook, the dataset will only contain one episode defined above.\n", "_____no_output_____" ] ], [ [ "# @title Create a new dataset\n# @markdown Utility functions to define and save the dataset for the rearrangement task\n\n\ndef get_rotation(sim, object_id):\n quat = sim.get_rotation(object_id)\n return np.array(quat.vector).tolist() + [quat.scalar]\n\n\ndef init_episode_dict(episode_id, scene_id, agent_pos, agent_rot):\n episode_dict = {\n \"episode_id\": episode_id,\n \"scene_id\": \"data/scene_datasets/coda/coda.glb\",\n \"start_position\": agent_pos,\n \"start_rotation\": agent_rot,\n \"info\": {},\n }\n return episode_dict\n\n\ndef add_object_details(sim, episode_dict, id, object_template, object_id):\n object_template = {\n \"object_id\": id,\n \"object_template\": object_template,\n \"position\": np.array(sim.get_translation(object_id)).tolist(),\n \"rotation\": get_rotation(sim, object_id),\n }\n episode_dict[\"objects\"] = object_template\n return episode_dict\n\n\ndef add_goal_details(sim, episode_dict, object_id):\n goal_template = {\n \"position\": np.array(sim.get_translation(object_id)).tolist(),\n \"rotation\": get_rotation(sim, object_id),\n }\n episode_dict[\"goals\"] = goal_template\n return episode_dict\n\n\n# set the number of objects to 1 always for now.\ndef build_episode(sim, episode_num, object_id, goal_id):\n episodes = {\"episodes\": []}\n for episode in range(episode_num):\n agent_state = sim.get_agent(0).get_state()\n agent_pos = np.array(agent_state.position).tolist()\n agent_quat = agent_state.rotation\n agent_rot = np.array(agent_quat.vec).tolist() + [agent_quat.real]\n episode_dict = init_episode_dict(\n episode, settings[\"scene\"], agent_pos, agent_rot\n )\n\n object_attr = sim.get_object_initialization_template(object_id)\n object_path = os.path.relpath(\n os.path.splitext(object_attr.render_asset_handle)[0]\n )\n\n episode_dict = add_object_details(\n sim, episode_dict, 0, object_path, object_id\n )\n episode_dict = add_goal_details(sim, episode_dict, goal_id)\n episodes[\"episodes\"].append(episode_dict)\n\n return episodes\n\n\nwith habitat_sim.Simulator(cfg) as sim:\n init_agent(sim)\n object_id, goal_id = init_objects(sim)\n\n episodes = build_episode(sim, 1, object_id, goal_id)\n\n dataset_content_path = \"data/datasets/rearrangement/coda/v1/train/\"\n if not os.path.exists(dataset_content_path):\n os.makedirs(dataset_content_path)\n\n with gzip.open(\n os.path.join(dataset_content_path, \"train.json.gz\"), \"wt\"\n ) as f:\n json.dump(episodes, f)\n\n print(\n \"Dataset written to {}\".format(\n os.path.join(dataset_content_path, \"train.json.gz\")\n )\n )", "_____no_output_____" ], [ "# @title Dataset class to read the saved dataset in Habitat-Lab.\n# @markdown To read the saved episodes in Habitat-Lab, we will extend the `Dataset` class and the `Episode` base class. It will help provide all the relevant details about the episode through a consistent API to all downstream tasks.\n\n# @markdown - We will first create a `RearrangementEpisode` by extending the `NavigationEpisode` to include additional information about object's initial configuration and desired final configuration.\n# @markdown - We will then define a `RearrangementDatasetV0` class that builds on top of `PointNavDatasetV1` class to read the JSON file stored earlier and initialize a list of `RearrangementEpisode`.\n\nfrom habitat.core.utils import DatasetFloatJSONEncoder, not_none_validator\nfrom habitat.datasets.pointnav.pointnav_dataset import (\n CONTENT_SCENES_PATH_FIELD,\n DEFAULT_SCENE_PATH_PREFIX,\n PointNavDatasetV1,\n)\nfrom habitat.tasks.nav.nav import NavigationEpisode\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass RearrangementSpec:\n r\"\"\"Specifications that capture a particular position of final position\n or initial position of the object.\n \"\"\"\n\n position: List[float] = attr.ib(default=None, validator=not_none_validator)\n rotation: List[float] = attr.ib(default=None, validator=not_none_validator)\n info: Optional[Dict[str, str]] = attr.ib(default=None)\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass RearrangementObjectSpec(RearrangementSpec):\n r\"\"\"Object specifications that capture position of each object in the scene,\n the associated object template.\n \"\"\"\n object_id: str = attr.ib(default=None, validator=not_none_validator)\n object_template: Optional[str] = attr.ib(\n default=\"data/test_assets/objects/chair\"\n )\n\n\[email protected](auto_attribs=True, kw_only=True)\nclass RearrangementEpisode(NavigationEpisode):\n r\"\"\"Specification of episode that includes initial position and rotation\n of agent, all goal specifications, all object specifications\n\n Args:\n episode_id: id of episode in the dataset\n scene_id: id of scene inside the simulator.\n start_position: numpy ndarray containing 3 entries for (x, y, z).\n start_rotation: numpy ndarray with 4 entries for (x, y, z, w)\n elements of unit quaternion (versor) representing agent 3D\n orientation.\n goal: object's goal position and rotation\n object: object's start specification defined with object type,\n position, and rotation.\n \"\"\"\n objects: RearrangementObjectSpec = attr.ib(\n default=None, validator=not_none_validator\n )\n goals: RearrangementSpec = attr.ib(\n default=None, validator=not_none_validator\n )\n\n\[email protected]_dataset(name=\"RearrangementDataset-v0\")\nclass RearrangementDatasetV0(PointNavDatasetV1):\n r\"\"\"Class inherited from PointNavDataset that loads Rearrangement dataset.\"\"\"\n episodes: List[RearrangementEpisode]\n content_scenes_path: str = \"{data_path}/content/{scene}.json.gz\"\n\n def to_json(self) -> str:\n result = DatasetFloatJSONEncoder().encode(self)\n return result\n\n def __init__(self, config: Optional[Config] = None) -> None:\n super().__init__(config)\n\n def from_json(\n self, json_str: str, scenes_dir: Optional[str] = None\n ) -> None:\n deserialized = json.loads(json_str)\n if CONTENT_SCENES_PATH_FIELD in deserialized:\n self.content_scenes_path = deserialized[CONTENT_SCENES_PATH_FIELD]\n\n for i, episode in enumerate(deserialized[\"episodes\"]):\n rearrangement_episode = RearrangementEpisode(**episode)\n rearrangement_episode.episode_id = str(i)\n\n if scenes_dir is not None:\n if rearrangement_episode.scene_id.startswith(\n DEFAULT_SCENE_PATH_PREFIX\n ):\n rearrangement_episode.scene_id = (\n rearrangement_episode.scene_id[\n len(DEFAULT_SCENE_PATH_PREFIX) :\n ]\n )\n\n rearrangement_episode.scene_id = os.path.join(\n scenes_dir, rearrangement_episode.scene_id\n )\n\n rearrangement_episode.objects = RearrangementObjectSpec(\n **rearrangement_episode.objects\n )\n rearrangement_episode.goals = RearrangementSpec(\n **rearrangement_episode.goals\n )\n\n self.episodes.append(rearrangement_episode)", "_____no_output_____" ], [ "# @title Load the saved dataset using the Dataset class\nconfig = habitat.get_config(\"configs/datasets/pointnav/habitat_test.yaml\")\nconfig.defrost()\nconfig.DATASET.DATA_PATH = (\n \"data/datasets/rearrangement/coda/v1/{split}/{split}.json.gz\"\n)\nconfig.DATASET.TYPE = \"RearrangementDataset-v0\"\nconfig.freeze()\n\ndataset = RearrangementDatasetV0(config.DATASET)\n\n# check if the dataset got correctly deserialized\nassert len(dataset.episodes) == 1\n\nassert dataset.episodes[0].objects.position == [\n 1.770593523979187,\n 0.6726829409599304,\n -1.9992598295211792,\n]\nassert dataset.episodes[0].objects.rotation == [0.0, 0.0, 0.0, 1.0]\nassert (\n dataset.episodes[0].objects.object_template\n == \"data/test_assets/objects/chair\"\n)\n\nassert dataset.episodes[0].goals.position == [\n 4.3417439460754395,\n 0.6726829409599304,\n -5.0634379386901855,\n]\nassert dataset.episodes[0].goals.rotation == [0.0, 0.0, 0.0, 1.0]", "_____no_output_____" ] ], [ [ "## Implement Grab/Release Action", "_____no_output_____" ] ], [ [ "# @title RayCast utility to implement Grab/Release Under Cross-Hair Action\n# @markdown Cast a ray in the direction of crosshair from the camera and check if it collides with another object within a certain distance threshold\n\n\ndef raycast(sim, sensor_name, crosshair_pos=(128, 128), max_distance=2.0):\n r\"\"\"Cast a ray in the direction of crosshair and check if it collides\n with another object within a certain distance threshold\n :param sim: Simulator object\n :param sensor_name: name of the visual sensor to be used for raycasting\n :param crosshair_pos: 2D coordiante in the viewport towards which the\n ray will be cast\n :param max_distance: distance threshold beyond which objects won't\n be considered\n \"\"\"\n visual_sensor = sim._sensors[sensor_name]\n scene_graph = sim.get_active_scene_graph()\n scene_graph.set_default_render_camera_parameters(\n visual_sensor._sensor_object\n )\n render_camera = scene_graph.get_default_render_camera()\n center_ray = render_camera.unproject(mn.Vector2i(crosshair_pos))\n\n raycast_results = sim.cast_ray(center_ray, max_distance=max_distance)\n\n closest_object = -1\n closest_dist = 1000.0\n if raycast_results.has_hits():\n for hit in raycast_results.hits:\n if hit.ray_distance < closest_dist:\n closest_dist = hit.ray_distance\n closest_object = hit.object_id\n\n return closest_object", "_____no_output_____" ], [ "# Test the raycast utility.\n\nwith habitat_sim.Simulator(cfg) as sim:\n init_agent(sim)\n obj_attr_mgr = sim.get_object_template_manager()\n obj_path = \"test_assets/objects/chair\"\n chair_template_id = obj_attr_mgr.load_object_configs(\n str(os.path.join(data_path, obj_path))\n )[0]\n chair_attr = obj_attr_mgr.get_template_by_ID(chair_template_id)\n obj_attr_mgr.register_template(chair_attr)\n object_id = sim.add_object_by_handle(chair_attr.handle)\n print(f\"Chair's object id is {object_id}\")\n\n set_object_in_front_of_agent(sim, object_id, -1.5)\n sim.set_object_motion_type(\n habitat_sim.physics.MotionType.STATIC, object_id\n )\n if make_video:\n # Visualize the agent's initial position\n simulate_and_make_vid(\n sim, [190, 128], \"sim-before-grab\", dt=1.0, open_vid=show_video\n )\n\n # Distance threshold=2 is greater than agent-to-chair distance.\n # Should return chair's object id\n closest_object = raycast(\n sim, \"rgb\", crosshair_pos=[128, 190], max_distance=2.0\n )\n print(f\"Closest Object ID: {closest_object} using 2.0 threshold\")\n assert (\n closest_object == object_id\n ), f\"Could not pick chair with ID: {object_id}\"\n\n # Distance threshold=1 is smaller than agent-to-chair distance .\n # Should return -1\n closest_object = raycast(\n sim, \"rgb\", crosshair_pos=[128, 190], max_distance=1.0\n )\n print(f\"Closest Object ID: {closest_object} using 1.0 threshold\")\n assert closest_object == -1, \"Agent shoud not be able to pick any object\"", "_____no_output_____" ], [ "# @title Define a Grab/Release action and create a new action space.\n# @markdown Each new action is defined by a `ActionSpec` and an `ActuationSpec`. `ActionSpec` is mapping between the action name and its corresponding `ActuationSpec`. `ActuationSpec` contains all the necessary specifications required to define the action.\n\nfrom habitat.config.default import _C, CN\nfrom habitat.core.embodied_task import SimulatorTaskAction\nfrom habitat.sims.habitat_simulator.actions import (\n HabitatSimActions,\n HabitatSimV1ActionSpaceConfiguration,\n)\nfrom habitat_sim.agent.controls.controls import ActuationSpec\nfrom habitat_sim.physics import MotionType\n\n\n# @markdown For instance, `GrabReleaseActuationSpec` contains the following:\n# @markdown - `visual_sensor_name` defines which viewport (rgb, depth, etc) to to use to cast the ray.\n# @markdown - `crosshair_pos` stores the position in the viewport through which the ray passes. Any object which intersects with this ray can be grabbed by the agent.\n# @markdown - `amount` defines a distance threshold. Objects which are farther than the treshold cannot be picked up by the agent.\[email protected](auto_attribs=True, slots=True)\nclass GrabReleaseActuationSpec(ActuationSpec):\n visual_sensor_name: str = \"rgb\"\n crosshair_pos: List[int] = [128, 128]\n amount: float = 2.0\n\n\n# @markdown Then, we extend the `HabitatSimV1ActionSpaceConfiguration` to add the above action into the agent's action space. `ActionSpaceConfiguration` is a mapping between action name and the corresponding `ActionSpec`\[email protected]_action_space_configuration(name=\"RearrangementActions-v0\")\nclass RearrangementSimV0ActionSpaceConfiguration(\n HabitatSimV1ActionSpaceConfiguration\n):\n def __init__(self, config):\n super().__init__(config)\n if not HabitatSimActions.has_action(\"GRAB_RELEASE\"):\n HabitatSimActions.extend_action_space(\"GRAB_RELEASE\")\n\n def get(self):\n config = super().get()\n new_config = {\n HabitatSimActions.GRAB_RELEASE: habitat_sim.ActionSpec(\n \"grab_or_release_object_under_crosshair\",\n GrabReleaseActuationSpec(\n visual_sensor_name=self.config.VISUAL_SENSOR,\n crosshair_pos=self.config.CROSSHAIR_POS,\n amount=self.config.GRAB_DISTANCE,\n ),\n )\n }\n\n config.update(new_config)\n\n return config\n\n\n# @markdown Finally, we extend `SimualtorTaskAction` which tells the simulator which action to call when a named action ('GRAB_RELEASE' in this case) is predicte by the agent's policy.\[email protected]_task_action\nclass GrabOrReleaseAction(SimulatorTaskAction):\n def step(self, *args: Any, **kwargs: Any):\n r\"\"\"This method is called from ``Env`` on each ``step``.\"\"\"\n return self._sim.step(HabitatSimActions.GRAB_RELEASE)\n\n\n_C.TASK.ACTIONS.GRAB_RELEASE = CN()\n_C.TASK.ACTIONS.GRAB_RELEASE.TYPE = \"GrabOrReleaseAction\"\n_C.SIMULATOR.CROSSHAIR_POS = [128, 160]\n_C.SIMULATOR.GRAB_DISTANCE = 2.0\n_C.SIMULATOR.VISUAL_SENSOR = \"rgb\"", "_____no_output_____" ] ], [ [ "##Setup Simulator Class for Rearrangement Task\n\n![sim](https://drive.google.com/uc?id=1ce6Ti-gpumMEyfomqAKWqOspXm6tN4_8)", "_____no_output_____" ] ], [ [ "# @title RearrangementSim Class\n# @markdown Here we will extend the `HabitatSim` class for the rearrangement task. We will make the following changes:\n# @markdown - define a new `_initialize_objects` function which will load the object in its initial configuration as defined by the episode.\n# @markdown - define a `gripped_object_id` property that stores whether the agent is holding any object or not.\n# @markdown - modify the `step` function of the simulator to use the `grab/release` action we define earlier.\n\n# @markdown #### Writing the `step` function:\n# @markdown Since we added a new action for this task, we have to modify the `step` function to define what happens when `grab/release` action is called. If a simple navigation action (`move_forward`, `turn_left`, `turn_right`) is called, we pass it forward to `act` function of the agent which already defines the behavior of these actions.\n\n# @markdown For the `grab/release` action, if the agent is not already holding an object, we first call the `raycast` function using the values from the `ActuationSpec` to see if any object is grippable. If it returns a valid object id, we put the object in a \"invisible\" inventory and remove it from the scene.\n\n# @markdown If the agent was already holding an object, `grab/release` action will try release the object at the same relative position as it was grabbed. If the object can be placed without any collision, then the `release` action is successful.\n\nfrom habitat.sims.habitat_simulator.habitat_simulator import HabitatSim\nfrom habitat_sim.nav import NavMeshSettings\nfrom habitat_sim.utils.common import quat_from_coeffs, quat_to_magnum\n\n\[email protected]_simulator(name=\"RearrangementSim-v0\")\nclass RearrangementSim(HabitatSim):\n r\"\"\"Simulator wrapper over habitat-sim with\n object rearrangement functionalities.\n \"\"\"\n\n def __init__(self, config: Config) -> None:\n self.did_reset = False\n super().__init__(config=config)\n self.grip_offset = np.eye(4)\n\n agent_id = self.habitat_config.DEFAULT_AGENT_ID\n agent_config = self._get_agent_config(agent_id)\n\n self.navmesh_settings = NavMeshSettings()\n self.navmesh_settings.set_defaults()\n self.navmesh_settings.agent_radius = agent_config.RADIUS\n self.navmesh_settings.agent_height = agent_config.HEIGHT\n\n def reconfigure(self, config: Config) -> None:\n super().reconfigure(config)\n self._initialize_objects()\n\n def reset(self):\n sim_obs = super().reset()\n if self._update_agents_state():\n sim_obs = self.get_sensor_observations()\n\n self._prev_sim_obs = sim_obs\n self.did_reset = True\n self.grip_offset = np.eye(4)\n return self._sensor_suite.get_observations(sim_obs)\n\n def _initialize_objects(self):\n objects = self.habitat_config.objects[0]\n obj_attr_mgr = self.get_object_template_manager()\n\n # first remove all existing objects\n existing_object_ids = self.get_existing_object_ids()\n\n if len(existing_object_ids) > 0:\n for obj_id in existing_object_ids:\n self.remove_object(obj_id)\n\n self.sim_object_to_objid_mapping = {}\n self.objid_to_sim_object_mapping = {}\n\n if objects is not None:\n object_template = objects[\"object_template\"]\n object_pos = objects[\"position\"]\n object_rot = objects[\"rotation\"]\n\n object_template_id = obj_attr_mgr.load_object_configs(\n object_template\n )[0]\n object_attr = obj_attr_mgr.get_template_by_ID(object_template_id)\n obj_attr_mgr.register_template(object_attr)\n\n object_id = self.add_object_by_handle(object_attr.handle)\n self.sim_object_to_objid_mapping[object_id] = objects[\"object_id\"]\n self.objid_to_sim_object_mapping[objects[\"object_id\"]] = object_id\n\n self.set_translation(object_pos, object_id)\n if isinstance(object_rot, list):\n object_rot = quat_from_coeffs(object_rot)\n\n object_rot = quat_to_magnum(object_rot)\n self.set_rotation(object_rot, object_id)\n\n self.set_object_motion_type(MotionType.STATIC, object_id)\n\n # Recompute the navmesh after placing all the objects.\n self.recompute_navmesh(self.pathfinder, self.navmesh_settings, True)\n\n def _sync_gripped_object(self, gripped_object_id):\n r\"\"\"\n Sync the gripped object with the object associated with the agent.\n \"\"\"\n if gripped_object_id != -1:\n agent_body_transformation = (\n self._default_agent.scene_node.transformation\n )\n self.set_transformation(\n agent_body_transformation, gripped_object_id\n )\n translation = agent_body_transformation.transform_point(\n np.array([0, 2.0, 0])\n )\n self.set_translation(translation, gripped_object_id)\n\n @property\n def gripped_object_id(self):\n return self._prev_sim_obs.get(\"gripped_object_id\", -1)\n\n def step(self, action: int):\n dt = 1 / 60.0\n self._num_total_frames += 1\n collided = False\n gripped_object_id = self.gripped_object_id\n\n agent_config = self._default_agent.agent_config\n action_spec = agent_config.action_space[action]\n\n if action_spec.name == \"grab_or_release_object_under_crosshair\":\n # If already holding an agent\n if gripped_object_id != -1:\n agent_body_transformation = (\n self._default_agent.scene_node.transformation\n )\n T = np.dot(agent_body_transformation, self.grip_offset)\n\n self.set_transformation(T, gripped_object_id)\n\n position = self.get_translation(gripped_object_id)\n\n if self.pathfinder.is_navigable(position):\n self.set_object_motion_type(\n MotionType.STATIC, gripped_object_id\n )\n gripped_object_id = -1\n self.recompute_navmesh(\n self.pathfinder, self.navmesh_settings, True\n )\n # if not holding an object, then try to grab\n else:\n gripped_object_id = raycast(\n self,\n action_spec.actuation.visual_sensor_name,\n crosshair_pos=action_spec.actuation.crosshair_pos,\n max_distance=action_spec.actuation.amount,\n )\n\n # found a grabbable object.\n if gripped_object_id != -1:\n agent_body_transformation = (\n self._default_agent.scene_node.transformation\n )\n\n self.grip_offset = np.dot(\n np.array(agent_body_transformation.inverted()),\n np.array(self.get_transformation(gripped_object_id)),\n )\n self.set_object_motion_type(\n MotionType.KINEMATIC, gripped_object_id\n )\n self.recompute_navmesh(\n self.pathfinder, self.navmesh_settings, True\n )\n\n else:\n collided = self._default_agent.act(action)\n self._last_state = self._default_agent.get_state()\n\n # step physics by dt\n super().step_world(dt)\n\n # Sync the gripped object after the agent moves.\n self._sync_gripped_object(gripped_object_id)\n\n # obtain observations\n self._prev_sim_obs = self.get_sensor_observations()\n self._prev_sim_obs[\"collided\"] = collided\n self._prev_sim_obs[\"gripped_object_id\"] = gripped_object_id\n\n observations = self._sensor_suite.get_observations(self._prev_sim_obs)\n return observations", "_____no_output_____" ] ], [ [ "## Create the Rearrangement Task\n![task](https://drive.google.com/uc?id=1N75Mmi6aigh33uL765ljsAqLzFmcs7Zn)", "_____no_output_____" ] ], [ [ "# @title Implement new sensors and measurements\n# @markdown After defining the dataset, action space and simulator functions for the rearrangement task, we are one step closer to training agents to solve this task.\n\n# @markdown Here we define inputs to the policy and other measurements required to design reward functions.\n\n# @markdown **Sensors**: These define various part of the simulator state that's visible to the agent. For simplicity, we'll assume that agent knows the object's current position, object's final goal position relative to the agent's current position.\n# @markdown - Object's current position will be made given by the `ObjectPosition` sensor\n# @markdown - Object's goal position will be available through the `ObjectGoal` sensor.\n# @markdown - Finally, we will also use `GrippedObject` sensor to tell the agent if it's holding any object or not.\n\n# @markdown **Measures**: These define various metrics about the task which can be used to measure task progress and define rewards. Note that measurements are *privileged* information not accessible to the agent as part of the observation space. We will need the following measurements:\n# @markdown - `AgentToObjectDistance` which measure the euclidean distance between the agent and the object.\n# @markdown - `ObjectToGoalDistance` which measures the euclidean distance between the object and the goal.\n\nfrom gym import spaces\n\nimport habitat_sim\nfrom habitat.config.default import CN, Config\nfrom habitat.core.dataset import Episode\nfrom habitat.core.embodied_task import Measure\nfrom habitat.core.simulator import Observations, Sensor, SensorTypes, Simulator\nfrom habitat.tasks.nav.nav import PointGoalSensor\n\n\[email protected]_sensor\nclass GrippedObjectSensor(Sensor):\n cls_uuid = \"gripped_object_id\"\n\n def __init__(\n self, *args: Any, sim: RearrangementSim, config: Config, **kwargs: Any\n ):\n self._sim = sim\n super().__init__(config=config)\n\n def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return self.cls_uuid\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n\n return spaces.Discrete(self._sim.get_existing_object_ids())\n\n def _get_sensor_type(self, *args: Any, **kwargs: Any):\n return SensorTypes.MEASUREMENT\n\n def get_observation(\n self,\n observations: Dict[str, Observations],\n episode: Episode,\n *args: Any,\n **kwargs: Any,\n ):\n obj_id = self._sim.sim_object_to_objid_mapping.get(\n self._sim.gripped_object_id, -1\n )\n return obj_id\n\n\[email protected]_sensor\nclass ObjectPosition(PointGoalSensor):\n cls_uuid: str = \"object_position\"\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n sensor_shape = (self._dimensionality,)\n\n return spaces.Box(\n low=np.finfo(np.float32).min,\n high=np.finfo(np.float32).max,\n shape=sensor_shape,\n dtype=np.float32,\n )\n\n def get_observation(\n self, *args: Any, observations, episode, **kwargs: Any\n ):\n agent_state = self._sim.get_agent_state()\n agent_position = agent_state.position\n rotation_world_agent = agent_state.rotation\n\n object_id = self._sim.get_existing_object_ids()[0]\n object_position = self._sim.get_translation(object_id)\n pointgoal = self._compute_pointgoal(\n agent_position, rotation_world_agent, object_position\n )\n return pointgoal\n\n\[email protected]_sensor\nclass ObjectGoal(PointGoalSensor):\n cls_uuid: str = \"object_goal\"\n\n def _get_observation_space(self, *args: Any, **kwargs: Any):\n sensor_shape = (self._dimensionality,)\n\n return spaces.Box(\n low=np.finfo(np.float32).min,\n high=np.finfo(np.float32).max,\n shape=sensor_shape,\n dtype=np.float32,\n )\n\n def get_observation(\n self, *args: Any, observations, episode, **kwargs: Any\n ):\n agent_state = self._sim.get_agent_state()\n agent_position = agent_state.position\n rotation_world_agent = agent_state.rotation\n\n goal_position = np.array(episode.goals.position, dtype=np.float32)\n\n point_goal = self._compute_pointgoal(\n agent_position, rotation_world_agent, goal_position\n )\n return point_goal\n\n\[email protected]_measure\nclass ObjectToGoalDistance(Measure):\n \"\"\"The measure calculates distance of object towards the goal.\"\"\"\n\n cls_uuid: str = \"object_to_goal_distance\"\n\n def __init__(\n self, sim: Simulator, config: Config, *args: Any, **kwargs: Any\n ):\n self._sim = sim\n self._config = config\n\n super().__init__(**kwargs)\n\n @staticmethod\n def _get_uuid(*args: Any, **kwargs: Any):\n return ObjectToGoalDistance.cls_uuid\n\n def reset_metric(self, episode, *args: Any, **kwargs: Any):\n self.update_metric(*args, episode=episode, **kwargs)\n\n def _geo_dist(self, src_pos, goal_pos: np.array) -> float:\n return self._sim.geodesic_distance(src_pos, [goal_pos])\n\n def _euclidean_distance(self, position_a, position_b):\n return np.linalg.norm(\n np.array(position_b) - np.array(position_a), ord=2\n )\n\n def update_metric(self, episode, *args: Any, **kwargs: Any):\n sim_obj_id = self._sim.get_existing_object_ids()[0]\n\n previous_position = np.array(\n self._sim.get_translation(sim_obj_id)\n ).tolist()\n goal_position = episode.goals.position\n self._metric = self._euclidean_distance(\n previous_position, goal_position\n )\n\n\[email protected]_measure\nclass AgentToObjectDistance(Measure):\n \"\"\"The measure calculates the distance of objects from the agent\"\"\"\n\n cls_uuid: str = \"agent_to_object_distance\"\n\n def __init__(\n self, sim: Simulator, config: Config, *args: Any, **kwargs: Any\n ):\n self._sim = sim\n self._config = config\n\n super().__init__(**kwargs)\n\n @staticmethod\n def _get_uuid(*args: Any, **kwargs: Any):\n return AgentToObjectDistance.cls_uuid\n\n def reset_metric(self, episode, *args: Any, **kwargs: Any):\n self.update_metric(*args, episode=episode, **kwargs)\n\n def _euclidean_distance(self, position_a, position_b):\n return np.linalg.norm(\n np.array(position_b) - np.array(position_a), ord=2\n )\n\n def update_metric(self, episode, *args: Any, **kwargs: Any):\n sim_obj_id = self._sim.get_existing_object_ids()[0]\n previous_position = np.array(\n self._sim.get_translation(sim_obj_id)\n ).tolist()\n\n agent_state = self._sim.get_agent_state()\n agent_position = agent_state.position\n\n self._metric = self._euclidean_distance(\n previous_position, agent_position\n )\n\n\n# -----------------------------------------------------------------------------\n# # REARRANGEMENT TASK GRIPPED OBJECT SENSOR\n# -----------------------------------------------------------------------------\n_C.TASK.GRIPPED_OBJECT_SENSOR = CN()\n_C.TASK.GRIPPED_OBJECT_SENSOR.TYPE = \"GrippedObjectSensor\"\n# -----------------------------------------------------------------------------\n# # REARRANGEMENT TASK ALL OBJECT POSITIONS SENSOR\n# -----------------------------------------------------------------------------\n_C.TASK.OBJECT_POSITION = CN()\n_C.TASK.OBJECT_POSITION.TYPE = \"ObjectPosition\"\n_C.TASK.OBJECT_POSITION.GOAL_FORMAT = \"POLAR\"\n_C.TASK.OBJECT_POSITION.DIMENSIONALITY = 2\n# -----------------------------------------------------------------------------\n# # REARRANGEMENT TASK ALL OBJECT GOALS SENSOR\n# -----------------------------------------------------------------------------\n_C.TASK.OBJECT_GOAL = CN()\n_C.TASK.OBJECT_GOAL.TYPE = \"ObjectGoal\"\n_C.TASK.OBJECT_GOAL.GOAL_FORMAT = \"POLAR\"\n_C.TASK.OBJECT_GOAL.DIMENSIONALITY = 2\n# -----------------------------------------------------------------------------\n# # OBJECT_DISTANCE_TO_GOAL MEASUREMENT\n# -----------------------------------------------------------------------------\n_C.TASK.OBJECT_TO_GOAL_DISTANCE = CN()\n_C.TASK.OBJECT_TO_GOAL_DISTANCE.TYPE = \"ObjectToGoalDistance\"\n# -----------------------------------------------------------------------------\n# # OBJECT_DISTANCE_FROM_AGENT MEASUREMENT\n# -----------------------------------------------------------------------------\n_C.TASK.AGENT_TO_OBJECT_DISTANCE = CN()\n_C.TASK.AGENT_TO_OBJECT_DISTANCE.TYPE = \"AgentToObjectDistance\"\n\nfrom habitat.config.default import CN, Config", "_____no_output_____" ], [ "# @title Define `RearrangementTask` by extending `NavigationTask`\nfrom habitat.tasks.nav.nav import NavigationTask, merge_sim_episode_config\n\n\ndef merge_sim_episode_with_object_config(\n sim_config: Config, episode: Type[Episode]\n) -> Any:\n sim_config = merge_sim_episode_config(sim_config, episode)\n sim_config.defrost()\n sim_config.objects = [episode.objects.__dict__]\n sim_config.freeze()\n\n return sim_config\n\n\[email protected]_task(name=\"RearrangementTask-v0\")\nclass RearrangementTask(NavigationTask):\n r\"\"\"Embodied Rearrangement Task\n Goal: An agent must place objects at their corresponding goal position.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n\n def overwrite_sim_config(self, sim_config, episode):\n return merge_sim_episode_with_object_config(sim_config, episode)", "_____no_output_____" ] ], [ [ "## Implement a hard-coded and an RL agent\n\n", "_____no_output_____" ] ], [ [ "# @title Load the `RearrangementTask` in Habitat-Lab and run a hard-coded agent\nimport habitat\n\nconfig = habitat.get_config(\"configs/tasks/pointnav.yaml\")\nconfig.defrost()\nconfig.ENVIRONMENT.MAX_EPISODE_STEPS = 50\nconfig.SIMULATOR.TYPE = \"RearrangementSim-v0\"\nconfig.SIMULATOR.ACTION_SPACE_CONFIG = \"RearrangementActions-v0\"\nconfig.SIMULATOR.GRAB_DISTANCE = 2.0\nconfig.SIMULATOR.HABITAT_SIM_V0.ENABLE_PHYSICS = True\nconfig.TASK.TYPE = \"RearrangementTask-v0\"\nconfig.TASK.SUCCESS_DISTANCE = 1.0\nconfig.TASK.SENSORS = [\n \"GRIPPED_OBJECT_SENSOR\",\n \"OBJECT_POSITION\",\n \"OBJECT_GOAL\",\n]\nconfig.TASK.GOAL_SENSOR_UUID = \"object_goal\"\nconfig.TASK.MEASUREMENTS = [\n \"OBJECT_TO_GOAL_DISTANCE\",\n \"AGENT_TO_OBJECT_DISTANCE\",\n]\nconfig.TASK.POSSIBLE_ACTIONS = [\"STOP\", \"MOVE_FORWARD\", \"GRAB_RELEASE\"]\nconfig.DATASET.TYPE = \"RearrangementDataset-v0\"\nconfig.DATASET.SPLIT = \"train\"\nconfig.DATASET.DATA_PATH = (\n \"data/datasets/rearrangement/coda/v1/{split}/{split}.json.gz\"\n)\nconfig.freeze()\n\n\ndef print_info(obs, metrics):\n print(\n \"Gripped Object: {}, Distance To Object: {}, Distance To Goal: {}\".format(\n obs[\"gripped_object_id\"],\n metrics[\"agent_to_object_distance\"],\n metrics[\"object_to_goal_distance\"],\n )\n )\n\n\ntry: # Got to make initialization idiot proof\n sim.close()\nexcept NameError:\n pass\n\nwith habitat.Env(config) as env:\n obs = env.reset()\n obs_list = []\n # Get closer to the object\n while True:\n obs = env.step(1)\n obs_list.append(obs)\n metrics = env.get_metrics()\n print_info(obs, metrics)\n if metrics[\"agent_to_object_distance\"] < 2.0:\n break\n\n # Grab the object\n obs = env.step(2)\n obs_list.append(obs)\n metrics = env.get_metrics()\n print_info(obs, metrics)\n assert obs[\"gripped_object_id\"] != -1\n\n # Get closer to the goal\n while True:\n obs = env.step(1)\n obs_list.append(obs)\n metrics = env.get_metrics()\n print_info(obs, metrics)\n if metrics[\"object_to_goal_distance\"] < 2.0:\n break\n\n # Release the object\n obs = env.step(2)\n obs_list.append(obs)\n metrics = env.get_metrics()\n print_info(obs, metrics)\n assert obs[\"gripped_object_id\"] == -1\n\n if make_video:\n make_video_cv2(\n obs_list,\n [190, 128],\n \"hard-coded-agent\",\n fps=5.0,\n open_vid=show_video,\n )", "_____no_output_____" ], [ "# @title Create a task specific RL Environment with a new reward definition.\n# @markdown We create a `RearragenmentRLEnv` class and modify the `get_reward()` function.\n# @markdown The reward sturcture is as follows:\n# @markdown - The agent gets a positive reward if the agent gets closer to the object otherwise a negative reward.\n# @markdown - The agent gets a positive reward if it moves the object closer to goal otherwise a negative reward.\n# @markdown - The agent gets a positive reward when the agent \"picks\" up an object for the first time. For all other \"grab/release\" action, it gets a negative reward.\n# @markdown - The agent gets a slack penalty of -0.01 for every action it takes in the environment.\n# @markdown - Finally the agent gets a large success reward when the episode is completed successfully.\n\nfrom typing import Optional, Type\n\nimport numpy as np\n\nimport habitat\nfrom habitat import Config, Dataset\nfrom habitat_baselines.common.baseline_registry import baseline_registry\nfrom habitat_baselines.common.environments import NavRLEnv\n\n\n@baseline_registry.register_env(name=\"RearrangementRLEnv\")\nclass RearrangementRLEnv(NavRLEnv):\n def __init__(self, config: Config, dataset: Optional[Dataset] = None):\n self._prev_measure = {\n \"agent_to_object_distance\": 0.0,\n \"object_to_goal_distance\": 0.0,\n \"gripped_object_id\": -1,\n \"gripped_object_count\": 0,\n }\n\n super().__init__(config, dataset)\n\n self._success_distance = self._core_env_config.TASK.SUCCESS_DISTANCE\n\n def reset(self):\n self._previous_action = None\n observations = super().reset()\n\n self._prev_measure.update(self.habitat_env.get_metrics())\n self._prev_measure[\"gripped_object_id\"] = -1\n self._prev_measure[\"gripped_object_count\"] = 0\n\n return observations\n\n def step(self, *args, **kwargs):\n self._previous_action = kwargs[\"action\"]\n return super().step(*args, **kwargs)\n\n def get_reward_range(self):\n return (\n self._rl_config.SLACK_REWARD - 1.0,\n self._rl_config.SUCCESS_REWARD + 1.0,\n )\n\n def get_reward(self, observations):\n reward = self._rl_config.SLACK_REWARD\n gripped_success_reward = 0.0\n episode_success_reward = 0.0\n agent_to_object_dist_reward = 0.0\n object_to_goal_dist_reward = 0.0\n\n action_name = self._env.task.get_action_name(\n self._previous_action[\"action\"]\n )\n\n # If object grabbed, add a success reward\n # The reward gets awarded only once for an object.\n if (\n action_name == \"GRAB_RELEASE\"\n and observations[\"gripped_object_id\"] >= 0\n ):\n obj_id = observations[\"gripped_object_id\"]\n self._prev_measure[\"gripped_object_count\"] += 1\n\n gripped_success_reward = (\n self._rl_config.GRIPPED_SUCCESS_REWARD\n if self._prev_measure[\"gripped_object_count\"] == 1\n else 0.0\n )\n # add a penalty everytime grab/action is called and doesn't do anything\n elif action_name == \"GRAB_RELEASE\":\n gripped_success_reward += -0.1\n\n self._prev_measure[\"gripped_object_id\"] = observations[\n \"gripped_object_id\"\n ]\n\n # If the action is not a grab/release action, and the agent\n # has not picked up an object, then give reward based on agent to\n # object distance.\n if (\n action_name != \"GRAB_RELEASE\"\n and self._prev_measure[\"gripped_object_id\"] == -1\n ):\n agent_to_object_dist_reward = self.get_agent_to_object_dist_reward(\n observations\n )\n\n # If the action is not a grab/release action, and the agent\n # has picked up an object, then give reward based on object to\n # to goal distance.\n if (\n action_name != \"GRAB_RELEASE\"\n and self._prev_measure[\"gripped_object_id\"] != -1\n ):\n object_to_goal_dist_reward = self.get_object_to_goal_dist_reward()\n\n if (\n self._episode_success(observations)\n and self._prev_measure[\"gripped_object_id\"] == -1\n and action_name == \"STOP\"\n ):\n episode_success_reward = self._rl_config.SUCCESS_REWARD\n\n reward += (\n agent_to_object_dist_reward\n + object_to_goal_dist_reward\n + gripped_success_reward\n + episode_success_reward\n )\n\n return reward\n\n def get_agent_to_object_dist_reward(self, observations):\n \"\"\"\n Encourage the agent to move towards the closest object which is not already in place.\n \"\"\"\n curr_metric = self._env.get_metrics()[\"agent_to_object_distance\"]\n prev_metric = self._prev_measure[\"agent_to_object_distance\"]\n dist_reward = prev_metric - curr_metric\n\n self._prev_measure[\"agent_to_object_distance\"] = curr_metric\n\n return dist_reward\n\n def get_object_to_goal_dist_reward(self):\n curr_metric = self._env.get_metrics()[\"object_to_goal_distance\"]\n prev_metric = self._prev_measure[\"object_to_goal_distance\"]\n dist_reward = prev_metric - curr_metric\n\n self._prev_measure[\"object_to_goal_distance\"] = curr_metric\n\n return dist_reward\n\n def _episode_success(self, observations):\n r\"\"\"Returns True if object is within distance threshold of the goal.\"\"\"\n dist = self._env.get_metrics()[\"object_to_goal_distance\"]\n if (\n abs(dist) > self._success_distance\n or observations[\"gripped_object_id\"] != -1\n ):\n return False\n return True\n\n def _gripped_success(self, observations):\n if (\n observations[\"gripped_object_id\"] >= 0\n and observations[\"gripped_object_id\"]\n != self._prev_measure[\"gripped_object_id\"]\n ):\n return True\n\n return False\n\n def get_done(self, observations):\n done = False\n action_name = self._env.task.get_action_name(\n self._previous_action[\"action\"]\n )\n if self._env.episode_over or (\n self._episode_success(observations)\n and self._prev_measure[\"gripped_object_id\"] == -1\n and action_name == \"STOP\"\n ):\n done = True\n return done\n\n def get_info(self, observations):\n info = self.habitat_env.get_metrics()\n info[\"episode_success\"] = self._episode_success(observations)\n return info", "_____no_output_____" ], [ "import os\nimport time\nfrom collections import defaultdict, deque\nfrom typing import Any, Dict, List, Optional\n\nimport numpy as np\nfrom torch.optim.lr_scheduler import LambdaLR\n\nfrom habitat import Config, logger\nfrom habitat.utils.visualizations.utils import observations_to_image\nfrom habitat_baselines.common.baseline_registry import baseline_registry\nfrom habitat_baselines.common.environments import get_env_class\nfrom habitat_baselines.common.rollout_storage import RolloutStorage\nfrom habitat_baselines.common.tensorboard_utils import TensorboardWriter\nfrom habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder\nfrom habitat_baselines.rl.ppo import PPO\nfrom habitat_baselines.rl.ppo.policy import Net, Policy\nfrom habitat_baselines.rl.ppo.ppo_trainer import PPOTrainer\nfrom habitat_baselines.utils.common import (\n batch_obs,\n generate_video,\n linear_decay,\n)\nfrom habitat_baselines.utils.env_utils import make_env_fn\n\n\ndef construct_envs(\n config,\n env_class,\n workers_ignore_signals=False,\n):\n r\"\"\"Create VectorEnv object with specified config and env class type.\n To allow better performance, dataset are split into small ones for\n each individual env, grouped by scenes.\n\n :param config: configs that contain num_processes as well as information\n :param necessary to create individual environments.\n :param env_class: class type of the envs to be created.\n :param workers_ignore_signals: Passed to :ref:`habitat.VectorEnv`'s constructor\n\n :return: VectorEnv object created according to specification.\n \"\"\"\n\n num_processes = config.NUM_PROCESSES\n configs = []\n env_classes = [env_class for _ in range(num_processes)]\n dataset = habitat.datasets.make_dataset(config.TASK_CONFIG.DATASET.TYPE)\n scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES\n if \"*\" in config.TASK_CONFIG.DATASET.CONTENT_SCENES:\n scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)\n\n if num_processes > 1:\n if len(scenes) == 0:\n raise RuntimeError(\n \"No scenes to load, multiple process logic relies on being able to split scenes uniquely between processes\"\n )\n\n if len(scenes) < num_processes:\n scenes = scenes * num_processes\n\n random.shuffle(scenes)\n\n scene_splits = [[] for _ in range(num_processes)]\n for idx, scene in enumerate(scenes):\n scene_splits[idx % len(scene_splits)].append(scene)\n\n assert sum(map(len, scene_splits)) == len(scenes)\n\n for i in range(num_processes):\n proc_config = config.clone()\n proc_config.defrost()\n\n task_config = proc_config.TASK_CONFIG\n task_config.SEED = task_config.SEED + i\n if len(scenes) > 0:\n task_config.DATASET.CONTENT_SCENES = scene_splits[i]\n\n task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = (\n config.SIMULATOR_GPU_ID\n )\n\n task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS\n\n proc_config.freeze()\n configs.append(proc_config)\n\n envs = habitat.ThreadedVectorEnv(\n make_env_fn=make_env_fn,\n env_fn_args=tuple(zip(configs, env_classes)),\n workers_ignore_signals=workers_ignore_signals,\n )\n return envs\n\n\nclass RearrangementBaselinePolicy(Policy):\n def __init__(self, observation_space, action_space, hidden_size=512):\n super().__init__(\n RearrangementBaselineNet(\n observation_space=observation_space, hidden_size=hidden_size\n ),\n action_space.n,\n )\n\n def from_config(cls, config, envs):\n pass\n\n\nclass RearrangementBaselineNet(Net):\n r\"\"\"Network which passes the input image through CNN and concatenates\n goal vector with CNN's output and passes that through RNN.\n \"\"\"\n\n def __init__(self, observation_space, hidden_size):\n super().__init__()\n\n self._n_input_goal = observation_space.spaces[\n ObjectGoal.cls_uuid\n ].shape[0]\n\n self._hidden_size = hidden_size\n\n self.state_encoder = RNNStateEncoder(\n 2 * self._n_input_goal,\n self._hidden_size,\n )\n\n self.train()\n\n @property\n def output_size(self):\n return self._hidden_size\n\n @property\n def is_blind(self):\n return False\n\n @property\n def num_recurrent_layers(self):\n return self.state_encoder.num_recurrent_layers\n\n def forward(self, observations, rnn_hidden_states, prev_actions, masks):\n object_goal_encoding = observations[ObjectGoal.cls_uuid]\n object_pos_encoding = observations[ObjectPosition.cls_uuid]\n\n x = [object_goal_encoding, object_pos_encoding]\n\n x = torch.cat(x, dim=1)\n x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks)\n\n return x, rnn_hidden_states\n\n\n@baseline_registry.register_trainer(name=\"ppo-rearrangement\")\nclass RearrangementTrainer(PPOTrainer):\n supported_tasks = [\"RearrangementTask-v0\"]\n\n def _setup_actor_critic_agent(self, ppo_cfg: Config) -> None:\n r\"\"\"Sets up actor critic and agent for PPO.\n\n Args:\n ppo_cfg: config node with relevant params\n\n Returns:\n None\n \"\"\"\n logger.add_filehandler(self.config.LOG_FILE)\n\n self.actor_critic = RearrangementBaselinePolicy(\n observation_space=self.envs.observation_spaces[0],\n action_space=self.envs.action_spaces[0],\n hidden_size=ppo_cfg.hidden_size,\n )\n self.actor_critic.to(self.device)\n\n self.agent = PPO(\n actor_critic=self.actor_critic,\n clip_param=ppo_cfg.clip_param,\n ppo_epoch=ppo_cfg.ppo_epoch,\n num_mini_batch=ppo_cfg.num_mini_batch,\n value_loss_coef=ppo_cfg.value_loss_coef,\n entropy_coef=ppo_cfg.entropy_coef,\n lr=ppo_cfg.lr,\n eps=ppo_cfg.eps,\n max_grad_norm=ppo_cfg.max_grad_norm,\n use_normalized_advantage=ppo_cfg.use_normalized_advantage,\n )\n\n def train(self) -> None:\n r\"\"\"Main method for training PPO.\n\n Returns:\n None\n \"\"\"\n\n self.envs = construct_envs(\n self.config, get_env_class(self.config.ENV_NAME)\n )\n\n ppo_cfg = self.config.RL.PPO\n self.device = (\n torch.device(\"cuda\", self.config.TORCH_GPU_ID)\n if torch.cuda.is_available()\n else torch.device(\"cpu\")\n )\n if not os.path.isdir(self.config.CHECKPOINT_FOLDER):\n os.makedirs(self.config.CHECKPOINT_FOLDER)\n self._setup_actor_critic_agent(ppo_cfg)\n logger.info(\n \"agent number of parameters: {}\".format(\n sum(param.numel() for param in self.agent.parameters())\n )\n )\n\n rollouts = RolloutStorage(\n ppo_cfg.num_steps,\n self.envs.num_envs,\n self.envs.observation_spaces[0],\n self.envs.action_spaces[0],\n ppo_cfg.hidden_size,\n )\n rollouts.to(self.device)\n\n observations = self.envs.reset()\n batch = batch_obs(observations, device=self.device)\n\n for sensor in rollouts.observations:\n rollouts.observations[sensor][0].copy_(batch[sensor])\n\n # batch and observations may contain shared PyTorch CUDA\n # tensors. We must explicitly clear them here otherwise\n # they will be kept in memory for the entire duration of training!\n batch = None\n observations = None\n\n current_episode_reward = torch.zeros(self.envs.num_envs, 1)\n running_episode_stats = dict(\n count=torch.zeros(self.envs.num_envs, 1),\n reward=torch.zeros(self.envs.num_envs, 1),\n )\n window_episode_stats = defaultdict(\n lambda: deque(maxlen=ppo_cfg.reward_window_size)\n )\n\n t_start = time.time()\n env_time = 0\n pth_time = 0\n count_steps = 0\n count_checkpoints = 0\n\n lr_scheduler = LambdaLR(\n optimizer=self.agent.optimizer,\n lr_lambda=lambda x: linear_decay(x, self.config.NUM_UPDATES),\n )\n\n with TensorboardWriter(\n self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs\n ) as writer:\n for update in range(self.config.NUM_UPDATES):\n if ppo_cfg.use_linear_lr_decay:\n lr_scheduler.step()\n\n if ppo_cfg.use_linear_clip_decay:\n self.agent.clip_param = ppo_cfg.clip_param * linear_decay(\n update, self.config.NUM_UPDATES\n )\n\n for _step in range(ppo_cfg.num_steps):\n (\n delta_pth_time,\n delta_env_time,\n delta_steps,\n ) = self._collect_rollout_step(\n rollouts, current_episode_reward, running_episode_stats\n )\n pth_time += delta_pth_time\n env_time += delta_env_time\n count_steps += delta_steps\n\n (\n delta_pth_time,\n value_loss,\n action_loss,\n dist_entropy,\n ) = self._update_agent(ppo_cfg, rollouts)\n pth_time += delta_pth_time\n\n for k, v in running_episode_stats.items():\n window_episode_stats[k].append(v.clone())\n\n deltas = {\n k: (\n (v[-1] - v[0]).sum().item()\n if len(v) > 1\n else v[0].sum().item()\n )\n for k, v in window_episode_stats.items()\n }\n deltas[\"count\"] = max(deltas[\"count\"], 1.0)\n\n writer.add_scalar(\n \"reward\", deltas[\"reward\"] / deltas[\"count\"], count_steps\n )\n\n # Check to see if there are any metrics\n # that haven't been logged yet\n\n for k, v in deltas.items():\n if k not in {\"reward\", \"count\"}:\n writer.add_scalar(\n \"metric/\" + k, v / deltas[\"count\"], count_steps\n )\n\n losses = [value_loss, action_loss]\n for l, k in zip(losses, [\"value, policy\"]):\n writer.add_scalar(\"losses/\" + k, l, count_steps)\n\n # log stats\n if update > 0 and update % self.config.LOG_INTERVAL == 0:\n logger.info(\n \"update: {}\\tfps: {:.3f}\\t\".format(\n update, count_steps / (time.time() - t_start)\n )\n )\n\n logger.info(\n \"update: {}\\tenv-time: {:.3f}s\\tpth-time: {:.3f}s\\t\"\n \"frames: {}\".format(\n update, env_time, pth_time, count_steps\n )\n )\n\n logger.info(\n \"Average window size: {} {}\".format(\n len(window_episode_stats[\"count\"]),\n \" \".join(\n \"{}: {:.3f}\".format(k, v / deltas[\"count\"])\n for k, v in deltas.items()\n if k != \"count\"\n ),\n )\n )\n\n # checkpoint model\n if update % self.config.CHECKPOINT_INTERVAL == 0:\n self.save_checkpoint(\n f\"ckpt.{count_checkpoints}.pth\", dict(step=count_steps)\n )\n count_checkpoints += 1\n\n self.envs.close()\n\n def eval(self) -> None:\n r\"\"\"Evaluates the current model\n Returns:\n None\n \"\"\"\n\n config = self.config.clone()\n\n if len(self.config.VIDEO_OPTION) > 0:\n config.defrost()\n config.NUM_PROCESSES = 1\n config.freeze()\n\n logger.info(f\"env config: {config}\")\n with construct_envs(config, get_env_class(config.ENV_NAME)) as envs:\n observations = envs.reset()\n batch = batch_obs(observations, device=self.device)\n\n current_episode_reward = torch.zeros(\n envs.num_envs, 1, device=self.device\n )\n ppo_cfg = self.config.RL.PPO\n test_recurrent_hidden_states = torch.zeros(\n self.actor_critic.net.num_recurrent_layers,\n config.NUM_PROCESSES,\n ppo_cfg.hidden_size,\n device=self.device,\n )\n prev_actions = torch.zeros(\n config.NUM_PROCESSES, 1, device=self.device, dtype=torch.long\n )\n not_done_masks = torch.zeros(\n config.NUM_PROCESSES, 1, device=self.device\n )\n\n rgb_frames = [\n [] for _ in range(self.config.NUM_PROCESSES)\n ] # type: List[List[np.ndarray]]\n\n if len(config.VIDEO_OPTION) > 0:\n os.makedirs(config.VIDEO_DIR, exist_ok=True)\n\n self.actor_critic.eval()\n\n for _i in range(config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS):\n current_episodes = envs.current_episodes()\n\n with torch.no_grad():\n (\n _,\n actions,\n _,\n test_recurrent_hidden_states,\n ) = self.actor_critic.act(\n batch,\n test_recurrent_hidden_states,\n prev_actions,\n not_done_masks,\n deterministic=False,\n )\n\n prev_actions.copy_(actions)\n\n outputs = envs.step([a[0].item() for a in actions])\n\n observations, rewards, dones, infos = [\n list(x) for x in zip(*outputs)\n ]\n batch = batch_obs(observations, device=self.device)\n\n not_done_masks = torch.tensor(\n [[0.0] if done else [1.0] for done in dones],\n dtype=torch.float,\n device=self.device,\n )\n\n rewards = torch.tensor(\n rewards, dtype=torch.float, device=self.device\n ).unsqueeze(1)\n\n current_episode_reward += rewards\n\n # episode ended\n if not_done_masks[0].item() == 0:\n generate_video(\n video_option=self.config.VIDEO_OPTION,\n video_dir=self.config.VIDEO_DIR,\n images=rgb_frames[0],\n episode_id=current_episodes[0].episode_id,\n checkpoint_idx=0,\n metrics=self._extract_scalars_from_info(infos[0]),\n tb_writer=None,\n )\n\n print(\"Evaluation Finished.\")\n print(\"Success: {}\".format(infos[0][\"episode_success\"]))\n print(\n \"Reward: {}\".format(current_episode_reward[0].item())\n )\n print(\n \"Distance To Goal: {}\".format(\n infos[0][\"object_to_goal_distance\"]\n )\n )\n\n return\n\n # episode continues\n elif len(self.config.VIDEO_OPTION) > 0:\n frame = observations_to_image(observations[0], infos[0])\n rgb_frames[0].append(frame)", "_____no_output_____" ], [ "%load_ext tensorboard\n%tensorboard --logdir data/tb", "_____no_output_____" ], [ "# @title Train an RL agent on a single episode\n!if [ -d \"data/tb\" ]; then rm -r data/tb; fi\n\nimport random\n\nimport numpy as np\nimport torch\n\nimport habitat\nfrom habitat import Config\nfrom habitat_baselines.config.default import get_config as get_baseline_config\n\nbaseline_config = get_baseline_config(\n \"habitat_baselines/config/pointnav/ppo_pointnav.yaml\"\n)\nbaseline_config.defrost()\n\nbaseline_config.TASK_CONFIG = config\nbaseline_config.TRAINER_NAME = \"ddppo\"\nbaseline_config.ENV_NAME = \"RearrangementRLEnv\"\nbaseline_config.SIMULATOR_GPU_ID = 0\nbaseline_config.TORCH_GPU_ID = 0\nbaseline_config.VIDEO_OPTION = [\"disk\"]\nbaseline_config.TENSORBOARD_DIR = \"data/tb\"\nbaseline_config.VIDEO_DIR = \"data/videos\"\nbaseline_config.NUM_PROCESSES = 2\nbaseline_config.SENSORS = [\"RGB_SENSOR\", \"DEPTH_SENSOR\"]\nbaseline_config.CHECKPOINT_FOLDER = \"data/checkpoints\"\n\nif vut.is_notebook():\n baseline_config.NUM_UPDATES = 400 # @param {type:\"number\"}\nelse:\n baseline_config.NUM_UPDATES = 1\n\nbaseline_config.LOG_INTERVAL = 10\nbaseline_config.CHECKPOINT_INTERVAL = 50\nbaseline_config.LOG_FILE = \"data/checkpoints/train.log\"\nbaseline_config.EVAL.SPLIT = \"train\"\nbaseline_config.RL.SUCCESS_REWARD = 2.5 # @param {type:\"number\"}\nbaseline_config.RL.SUCCESS_MEASURE = \"object_to_goal_distance\"\nbaseline_config.RL.REWARD_MEASURE = \"object_to_goal_distance\"\nbaseline_config.RL.GRIPPED_SUCCESS_REWARD = 2.5 # @param {type:\"number\"}\n\nbaseline_config.freeze()\nrandom.seed(baseline_config.TASK_CONFIG.SEED)\nnp.random.seed(baseline_config.TASK_CONFIG.SEED)\ntorch.manual_seed(baseline_config.TASK_CONFIG.SEED)\n\nif __name__ == \"__main__\":\n trainer = RearrangementTrainer(baseline_config)\n trainer.train()\n trainer.eval()\n\n if make_video:\n video_file = os.listdir(\"data/videos\")[0]\n vut.display_video(os.path.join(\"data/videos\", video_file))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
cb997a61ae9fdc9512e5f5f7ed6e9ff929559860
47,646
ipynb
Jupyter Notebook
Projects/Training Model/TFIDF/SIC Project D2/Pre - Processing.ipynb
rafay99-epic/Samsunng-Innovation-Campus-Notes
ab90913bc8552ae87ab24ffa3c30a09a9987be28
[ "MIT" ]
null
null
null
Projects/Training Model/TFIDF/SIC Project D2/Pre - Processing.ipynb
rafay99-epic/Samsunng-Innovation-Campus-Notes
ab90913bc8552ae87ab24ffa3c30a09a9987be28
[ "MIT" ]
null
null
null
Projects/Training Model/TFIDF/SIC Project D2/Pre - Processing.ipynb
rafay99-epic/Samsunng-Innovation-Campus-Notes
ab90913bc8552ae87ab24ffa3c30a09a9987be28
[ "MIT" ]
null
null
null
131.256198
15,020
0.759896
[ [ [ "import pandas as pd\nimport numpy as np\nimport nltk\nfrom nltk.stem.porter import *\nimport re\nfrom wordsegment import load, segment, clean\nload() #loading segment\nimport matplotlib.pyplot as plt\nfrom keras.preprocessing.sequence import pad_sequences\n%matplotlib inline", "_____no_output_____" ], [ "df = pd.read_csv('D2.csv', header='infer')", "_____no_output_____" ], [ "del df['id']", "_____no_output_____" ], [ "df.shape", "_____no_output_____" ], [ "print('number of rows and columns i.e. dimension of the dataset: ',df.shape)\nprint('\\ncolumn names of the dataset: ',df.columns)", "number of rows and columns i.e. dimension of the dataset: (31962, 2)\n\ncolumn names of the dataset: Index(['label', 'tweet'], dtype='object')\n" ], [ "ClassLabel = df['label']\ntweets = df.tweet", "_____no_output_____" ], [ "fig, ax = plt.subplots(figsize = (6, 6))\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.spines['left'].set_visible(False)\nplt.hist(ClassLabel, bins=5, color='#33FF83', density=False, edgecolor='black')\nplt.title('HateSpeech Classification Initial Histogram')\nplt.xlabel('Classes \\n0 - HateSpeech, 1 - Offensive Laguage')\nplt.ylabel('Number of Tweets')\nplt.show()", "_____no_output_____" ], [ "print(tweets)", "0 @user when a father is dysfunctional and is s...\n1 @user @user thanks for #lyft credit i can't us...\n2 bihday your majesty\n3 #model i love u take with u all the time in ...\n4 factsguide: society now #motivation\n ... \n31957 ate @user isz that youuu?😍😍😍😍😍ð...\n31958 to see nina turner on the airwaves trying to...\n31959 listening to sad songs on a monday morning otw...\n31960 @user #sikh #temple vandalised in in #calgary,...\n31961 thank you @user for you follow \nName: tweet, Length: 31962, dtype: object\n" ], [ "tweets[20000]", "_____no_output_____" ], [ "corpus = []\nchar_regExp = '[,\\?:\\|]'\nre.compile(emoji_rejix_1)\nfor i in range(0,len(tweets)):\n Modified_Tweets = re.sub(emoji_rejix_1,'',tweets[i]).split()\n Modified_Tweets = \"\".join(Modified_Tweets)\n Modified_Tweets = Modified_Tweets.lower().split() # 3.1.2 - converting to lower characters and removing spaces from left and right\n Modified_Tweets = \" \".join(Modified_Tweets) \n Modified_Tweets = segment(Modified_Tweets)\n Modified_Tweets = \" \".join(Modified_Tweets) \n corpus.append(Modified_Tweets)\n", "_____no_output_____" ], [ "corpus[10]", "_____no_output_____" ], [ "a = tweets[10]", "_____no_output_____" ], [ "a", "_____no_output_____" ], [ "emoji_rejix_1 = '^[a-zA-Z0-9]'\nemoji_rejix_1 = re.compile(emoji_rejix_1)", "_____no_output_____" ], [ "re.sub(emoji_rejix_1,'',a)", "_____no_output_____" ], [ "df1['ClassLabel'] = ClassLabel ", "_____no_output_____" ], [ "df1.columns = ['tweets','ClassLabel']", "_____no_output_____" ], [ "df1.to_csv(r'C:\\Users\\bse183010\\SIC Project D2\\Updated_Dataset_Friday.csv', index = False, header=True)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
cb997d3b300bd9b4a8bf9442d679e9104ff66337
242,801
ipynb
Jupyter Notebook
p4_language-translation/dlnd_language_translation_201707.ipynb
georgeliu1998/dlnd
e0000d4c70bf7905d4f63a31794613df644b23e8
[ "MIT" ]
1
2018-07-06T05:57:40.000Z
2018-07-06T05:57:40.000Z
p4_language-translation/dlnd_language_translation_201707.ipynb
georgeliu1998/dlnd
e0000d4c70bf7905d4f63a31794613df644b23e8
[ "MIT" ]
null
null
null
p4_language-translation/dlnd_language_translation_201707.ipynb
georgeliu1998/dlnd
e0000d4c70bf7905d4f63a31794613df644b23e8
[ "MIT" ]
null
null
null
78.601813
1,341
0.638692
[ [ [ "# Language Translation\nIn this project, you’re going to take a peek into the realm of neural network machine translation. You’ll be training a sequence to sequence model on a dataset of English and French sentences that can translate new sentences from English to French.\n## Get the Data\nSince translating the whole language of English to French will take lots of time to train, we have provided you with a small portion of the English corpus.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport helper\nimport problem_unittests as tests\n\nsource_path = 'data/small_vocab_en'\ntarget_path = 'data/small_vocab_fr'\nsource_text = helper.load_data(source_path)\ntarget_text = helper.load_data(target_path)", "_____no_output_____" ] ], [ [ "## Explore the Data\nPlay around with view_sentence_range to view different parts of the data.", "_____no_output_____" ] ], [ [ "view_sentence_range = (0, 10)\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\n\nprint('Dataset Stats')\nprint('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))\n\nsentences = source_text.split('\\n')\nword_counts = [len(sentence.split()) for sentence in sentences]\nprint('Number of sentences: {}'.format(len(sentences)))\nprint('Average number of words in a sentence: {}'.format(np.average(word_counts)))\n\nprint()\nprint('English sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(source_text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))\nprint()\nprint('French sentences {} to {}:'.format(*view_sentence_range))\nprint('\\n'.join(target_text.split('\\n')[view_sentence_range[0]:view_sentence_range[1]]))", "Dataset Stats\nRoughly the number of unique words: 227\nNumber of sentences: 137861\nAverage number of words in a sentence: 13.225277634719028\n\nEnglish sentences 0 to 10:\nnew jersey is sometimes quiet during autumn , and it is snowy in april .\nthe united states is usually chilly during july , and it is usually freezing in november .\ncalifornia is usually quiet during march , and it is usually hot in june .\nthe united states is sometimes mild during june , and it is cold in september .\nyour least liked fruit is the grape , but my least liked is the apple .\nhis favorite fruit is the orange , but my favorite is the grape .\nparis is relaxing during december , but it is usually chilly in july .\nnew jersey is busy during spring , and it is never hot in march .\nour least liked fruit is the lemon , but my least liked is the grape .\nthe united states is sometimes busy during january , and it is sometimes warm in november .\n\nFrench sentences 0 to 10:\nnew jersey est parfois calme pendant l' automne , et il est neigeux en avril .\nles états-unis est généralement froid en juillet , et il gèle habituellement en novembre .\ncalifornia est généralement calme en mars , et il est généralement chaud en juin .\nles états-unis est parfois légère en juin , et il fait froid en septembre .\nvotre moins aimé fruit est le raisin , mais mon moins aimé est la pomme .\nson fruit préféré est l'orange , mais mon préféré est le raisin .\nparis est relaxant en décembre , mais il est généralement froid en juillet .\nnew jersey est occupé au printemps , et il est jamais chaude en mars .\nnotre fruit est moins aimé le citron , mais mon moins aimé est le raisin .\nles états-unis est parfois occupé en janvier , et il est parfois chaud en novembre .\n" ] ], [ [ "## Implement Preprocessing Function\n### Text to Word Ids\nAs you did with other RNNs, you must turn the text into a number so the computer can understand it. In the function `text_to_ids()`, you'll turn `source_text` and `target_text` from words to ids. However, you need to add the `<EOS>` word id at the end of `target_text`. This will help the neural network predict when the sentence should end.\n\nYou can get the `<EOS>` word id by doing:\n```python\ntarget_vocab_to_int['<EOS>']\n```\nYou can get other word ids using `source_vocab_to_int` and `target_vocab_to_int`.", "_____no_output_____" ] ], [ [ "def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):\n \"\"\"\n Convert source and target text to proper word ids\n :param source_text: String that contains all the source text.\n :param target_text: String that contains all the target text.\n :param source_vocab_to_int: Dictionary to go from the source words to an id\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :return: A tuple of lists (source_id_text, target_id_text)\n \"\"\"\n # Split text into sentences\n source_text_lst = source_text.split('\\n')\n target_text_lst = target_text.split('\\n')\n # Append <EOS> at the end of each sententence\n target_text_lst = [sentence + ' <EOS>' for sentence in target_text_lst]\n # Make lists using vocab to int mapping\n source_id_text = [[source_vocab_to_int[word] for word in sentence.split()] for sentence in source_text_lst]\n target_id_text = [[target_vocab_to_int[word] for word in sentence.split()] for sentence in target_text_lst]\n \n return source_id_text, target_id_text\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_text_to_ids(text_to_ids)", "Tests Passed\n" ] ], [ [ "### Preprocess all the data and save it\nRunning the code cell below will preprocess all the data and save it to file.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nhelper.preprocess_and_save_data(source_path, target_path, text_to_ids)", "_____no_output_____" ] ], [ [ "# Check Point\nThis is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport numpy as np\nimport helper\n\n(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()", "_____no_output_____" ] ], [ [ "### Check the Version of TensorFlow and Access to GPU\nThis will check to make sure you have the correct version of TensorFlow and access to a GPU", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nfrom distutils.version import LooseVersion\nimport warnings\nimport tensorflow as tf\nfrom tensorflow.python.layers.core import Dense\n\n# Check TensorFlow Version\nassert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'\nprint('TensorFlow Version: {}'.format(tf.__version__))\n\n# Check for a GPU\nif not tf.test.gpu_device_name():\n warnings.warn('No GPU found. Please use a GPU to train your neural network.')\nelse:\n print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))", "TensorFlow Version: 1.1.0\n" ] ], [ [ "## Build the Neural Network\nYou'll build the components necessary to build a Sequence-to-Sequence model by implementing the following functions below:\n- `model_inputs`\n- `process_decoder_input`\n- `encoding_layer`\n- `decoding_layer_train`\n- `decoding_layer_infer`\n- `decoding_layer`\n- `seq2seq_model`\n\n### Input\nImplement the `model_inputs()` function to create TF Placeholders for the Neural Network. It should create the following placeholders:\n\n- Input text placeholder named \"input\" using the TF Placeholder name parameter with rank 2.\n- Targets placeholder with rank 2.\n- Learning rate placeholder with rank 0.\n- Keep probability placeholder named \"keep_prob\" using the TF Placeholder name parameter with rank 0.\n- Target sequence length placeholder named \"target_sequence_length\" with rank 1\n- Max target sequence length tensor named \"max_target_len\" getting its value from applying tf.reduce_max on the target_sequence_length placeholder. Rank 0.\n- Source sequence length placeholder named \"source_sequence_length\" with rank 1\n\nReturn the placeholders in the following the tuple (input, targets, learning rate, keep probability, target sequence length, max target sequence length, source sequence length)", "_____no_output_____" ] ], [ [ "def model_inputs():\n \"\"\"\n Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.\n :return: Tuple (input, targets, learning rate, keep probability, target sequence length,\n max target sequence length, source sequence length)\n \"\"\"\n inputs = tf.placeholder(tf.int32, shape=(None, None), name='input')\n targets = tf.placeholder(tf.int32, shape=(None, None))\n learn_rate = tf.placeholder(tf.float32, shape=None)\n keep_prob = tf.placeholder(tf.float32, shape=None, name='keep_prob')\n target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')\n max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')\n source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')\n \n return inputs, targets, learn_rate, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_model_inputs(model_inputs)", "Tests Passed\n" ] ], [ [ "### Process Decoder Input\nImplement `process_decoder_input` by removing the last word id from each batch in `target_data` and concat the GO ID to the begining of each batch.", "_____no_output_____" ] ], [ [ "def process_decoder_input(target_data, target_vocab_to_int, batch_size):\n \"\"\"\n Preprocess target data for encoding\n :param target_data: Target Placehoder\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :param batch_size: Batch Size\n :return: Preprocessed target data\n \"\"\"\n ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])\n dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)\n\n return dec_input\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_process_encoding_input(process_decoder_input)", "Tests Passed\n" ] ], [ [ "### Encoding\nImplement `encoding_layer()` to create a Encoder RNN layer:\n * Embed the encoder input using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence)\n * Construct a [stacked](https://github.com/tensorflow/tensorflow/blob/6947f65a374ebf29e74bb71e36fd82760056d82c/tensorflow/docs_src/tutorials/recurrent.md#stacking-multiple-lstms) [`tf.contrib.rnn.LSTMCell`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/LSTMCell) wrapped in a [`tf.contrib.rnn.DropoutWrapper`](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/DropoutWrapper)\n * Pass cell and embedded input to [`tf.nn.dynamic_rnn()`](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn)", "_____no_output_____" ] ], [ [ "from imp import reload\nreload(tests)\n\ndef encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, \n source_sequence_length, source_vocab_size, \n encoding_embedding_size):\n \"\"\"\n Create encoding layer\n :param rnn_inputs: Inputs for the RNN\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param keep_prob: Dropout keep probability\n :param source_sequence_length: a list of the lengths of each sequence in the batch\n :param source_vocab_size: vocabulary size of source data\n :param encoding_embedding_size: embedding size of source data\n :return: tuple (RNN output, RNN state)\n \"\"\"\n # Encoder embedding\n enc_embed_input = tf.contrib.layers.embed_sequence(rnn_inputs, source_vocab_size, encoding_embedding_size)\n\n # RNN cell\n def make_cell(rnn_size):\n enc_cell = tf.contrib.rnn.LSTMCell(rnn_size,\n initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\n return enc_cell\n\n enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])\n \n drop = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob=keep_prob)\n \n enc_output, enc_state = tf.nn.dynamic_rnn(drop, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)\n \n return enc_output, enc_state\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_encoding_layer(encoding_layer)", "Tests Passed\n" ] ], [ [ "### Decoding - Training\nCreate a training decoding layer:\n* Create a [`tf.contrib.seq2seq.TrainingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/TrainingHelper) \n* Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)\n* Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode)", "_____no_output_____" ] ], [ [ "def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, \n target_sequence_length, max_summary_length, \n output_layer, keep_prob):\n \"\"\"\n Create a decoding layer for training\n :param encoder_state: Encoder State\n :param dec_cell: Decoder RNN Cell\n :param dec_embed_input: Decoder embedded input\n :param target_sequence_length: The lengths of each sequence in the target batch\n :param max_summary_length: The length of the longest sequence in the batch\n :param output_layer: Function to apply the output layer\n :param keep_prob: Dropout keep probability\n :return: BasicDecoderOutput containing training logits and sample_id\n \"\"\"\n \n # Helper for the training process. Used by BasicDecoder to read inputs.\n training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,\n sequence_length=target_sequence_length,\n time_major=False)\n \n \n # Basic decoder\n training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,\n training_helper,\n encoder_state,\n output_layer) \n \n # Perform dynamic decoding using the decoder\n training_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder,\n impute_finished=True,\n maximum_iterations=max_summary_length) \n \n return training_decoder_output\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer_train(decoding_layer_train)", "Tests Passed\n" ] ], [ [ "### Decoding - Inference\nCreate inference decoder:\n* Create a [`tf.contrib.seq2seq.GreedyEmbeddingHelper`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/GreedyEmbeddingHelper)\n* Create a [`tf.contrib.seq2seq.BasicDecoder`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/BasicDecoder)\n* Obtain the decoder outputs from [`tf.contrib.seq2seq.dynamic_decode`](https://www.tensorflow.org/api_docs/python/tf/contrib/seq2seq/dynamic_decode)", "_____no_output_____" ] ], [ [ "def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,\n end_of_sequence_id, max_target_sequence_length,\n vocab_size, output_layer, batch_size, keep_prob):\n \"\"\"\n Create a decoding layer for inference\n :param encoder_state: Encoder state\n :param dec_cell: Decoder RNN Cell\n :param dec_embeddings: Decoder embeddings\n :param start_of_sequence_id: GO ID\n :param end_of_sequence_id: EOS Id\n :param max_target_sequence_length: Maximum length of target sequences\n :param vocab_size: Size of decoder/target vocabulary\n :param decoding_scope: TenorFlow Variable Scope for decoding\n :param output_layer: Function to apply the output layer\n :param batch_size: Batch size\n :param keep_prob: Dropout keep probability\n :return: BasicDecoderOutput containing inference logits and sample_id\n \"\"\"\n # Reuses the same parameters trained by the training process\n \n start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32), [batch_size], name='start_tokens')\n\n # Helper for the inference process.\n inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,\n start_tokens,\n end_of_sequence_id)\n\n # Basic decoder\n inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,\n inference_helper,\n encoder_state,\n output_layer)\n \n # Perform dynamic decoding using the decoder\n inference_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(inference_decoder,\n impute_finished=True,\n maximum_iterations=max_target_sequence_length)\n \n return inference_decoder_output\n\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer_infer(decoding_layer_infer)", "Tests Passed\n" ] ], [ [ "### Build the Decoding Layer\nImplement `decoding_layer()` to create a Decoder RNN layer.\n\n* Embed the target sequences\n* Construct the decoder LSTM cell (just like you constructed the encoder cell above)\n* Create an output layer to map the outputs of the decoder to the elements of our vocabulary\n* Use the your `decoding_layer_train(encoder_state, dec_cell, dec_embed_input, target_sequence_length, max_target_sequence_length, output_layer, keep_prob)` function to get the training logits.\n* Use your `decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, vocab_size, output_layer, batch_size, keep_prob)` function to get the inference logits.\n\nNote: You'll need to use [tf.variable_scope](https://www.tensorflow.org/api_docs/python/tf/variable_scope) to share variables between training and inference.", "_____no_output_____" ] ], [ [ "def decoding_layer(dec_input, encoder_state,\n target_sequence_length, max_target_sequence_length,\n rnn_size,\n num_layers, target_vocab_to_int, target_vocab_size,\n batch_size, keep_prob, decoding_embedding_size):\n \"\"\"\n Create decoding layer\n :param dec_input: Decoder input\n :param encoder_state: Encoder state\n :param target_sequence_length: The lengths of each sequence in the target batch\n :param max_target_sequence_length: Maximum length of target sequences\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :param target_vocab_size: Size of target vocabulary\n :param batch_size: The size of the batch\n :param keep_prob: Dropout keep probability\n :param decoding_embedding_size: Decoding embedding size\n :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)\n \"\"\"\n # Embed the target sequences\n dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))\n dec_embed_inputs = tf.nn.embedding_lookup(dec_embeddings, dec_input)\n\n # Construct the decoder LSTM cell (just like you constructed the encoder cell above)\n def make_cell(rnn_size):\n dec_cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))\n return dec_cell\n dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])\n\n # Create an output layer to map the outputs of the decoder to the elements of our vocabulary\n output_layer = Dense(target_vocab_size, kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))\n\n\n\n with tf.variable_scope(\"decode\"):\n\n # Helper for the training process. Used by BasicDecoder to read inputs.\n training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_inputs,\n sequence_length=target_sequence_length,\n time_major=False)\n \n \n # Basic decoder\n training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,\n training_helper,\n encoder_state,\n output_layer) \n \n # Perform dynamic decoding using the decoder\n training_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(training_decoder,\n impute_finished=True,\n maximum_iterations=max_target_sequence_length)\n # 5. Inference Decoder\n # Reuses the same parameters trained by the training process\n with tf.variable_scope(\"decode\", reuse=True):\n start_tokens = tf.tile(tf.constant([target_vocab_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_tokens')\n\n # Helper for the inference process.\n inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,\n start_tokens,\n target_vocab_to_int['<EOS>'])\n\n # Basic decoder\n inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,\n inference_helper,\n encoder_state,\n output_layer)\n \n # Perform dynamic decoding using the decoder\n inference_decoder_output, _ = tf.contrib.seq2seq.dynamic_decode(inference_decoder,\n impute_finished=True,\n maximum_iterations=max_target_sequence_length)\n \n\n \n return training_decoder_output, inference_decoder_output\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_decoding_layer(decoding_layer)", "Tests Passed\n" ] ], [ [ "### Build the Neural Network\nApply the functions you implemented above to:\n\n- Encode the input using your `encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob, source_sequence_length, source_vocab_size, encoding_embedding_size)`.\n- Process target data using your `process_decoder_input(target_data, target_vocab_to_int, batch_size)` function.\n- Decode the encoded input using your `decoding_layer(dec_input, enc_state, target_sequence_length, max_target_sentence_length, rnn_size, num_layers, target_vocab_to_int, target_vocab_size, batch_size, keep_prob, dec_embedding_size)` function.", "_____no_output_____" ] ], [ [ "def seq2seq_model(input_data, target_data, keep_prob, batch_size,\n source_sequence_length, target_sequence_length,\n max_target_sentence_length,\n source_vocab_size, target_vocab_size,\n enc_embedding_size, dec_embedding_size,\n rnn_size, num_layers, target_vocab_to_int):\n \"\"\"\n Build the Sequence-to-Sequence part of the neural network\n :param input_data: Input placeholder\n :param target_data: Target placeholder\n :param keep_prob: Dropout keep probability placeholder\n :param batch_size: Batch Size\n :param source_sequence_length: Sequence Lengths of source sequences in the batch\n :param target_sequence_length: Sequence Lengths of target sequences in the batch\n :param source_vocab_size: Source vocabulary size\n :param target_vocab_size: Target vocabulary size\n :param enc_embedding_size: Decoder embedding size\n :param dec_embedding_size: Encoder embedding size\n :param rnn_size: RNN Size\n :param num_layers: Number of layers\n :param target_vocab_to_int: Dictionary to go from the target words to an id\n :return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)\n \"\"\"\n # Pass the input data through the encoder. We'll ignore the encoder output, but use the state\n _, enc_state = encoding_layer(input_data, \n rnn_size, \n num_layers,\n keep_prob,\n source_sequence_length,\n source_vocab_size, \n enc_embedding_size)\n \n \n # Prepare the target sequences we'll feed to the decoder in training mode\n dec_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)\n \n # Pass encoder state and decoder inputs to the decoders\n training_decoder_output, inference_decoder_output = decoding_layer(dec_input,\n enc_state,\n target_sequence_length,\n max_target_sentence_length,\n rnn_size,\n num_layers,\n target_vocab_to_int, \n target_vocab_size,\n batch_size,\n keep_prob,\n dec_embedding_size) \n \n return training_decoder_output, inference_decoder_output\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_seq2seq_model(seq2seq_model)", "Tests Passed\n" ] ], [ [ "## Neural Network Training\n### Hyperparameters\nTune the following parameters:\n\n- Set `epochs` to the number of epochs.\n- Set `batch_size` to the batch size.\n- Set `rnn_size` to the size of the RNNs.\n- Set `num_layers` to the number of layers.\n- Set `encoding_embedding_size` to the size of the embedding for the encoder.\n- Set `decoding_embedding_size` to the size of the embedding for the decoder.\n- Set `learning_rate` to the learning rate.\n- Set `keep_probability` to the Dropout keep probability\n- Set `display_step` to state how many steps between each debug output statement", "_____no_output_____" ] ], [ [ "# Number of Epochs\nepochs = 30\n# Batch Size\nbatch_size = 128\n# RNN Size\nrnn_size = 50\n# Number of Layers\nnum_layers = 2\n# Embedding Size\nencoding_embedding_size = 13\ndecoding_embedding_size = 13\n# Learning Rate\nlearning_rate = 0.001\n# Dropout Keep Probability\nkeep_probability = 0.8\ndisplay_step = 10", "_____no_output_____" ] ], [ [ "### Build the Graph\nBuild the graph using the neural network you implemented.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nsave_path = 'checkpoints/dev'\n(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()\nmax_target_sentence_length = max([len(sentence) for sentence in source_int_text])\n\ntrain_graph = tf.Graph()\nwith train_graph.as_default():\n input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()\n\n #sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')\n input_shape = tf.shape(input_data)\n\n train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),\n targets,\n keep_prob,\n batch_size,\n source_sequence_length,\n target_sequence_length,\n max_target_sequence_length,\n len(source_vocab_to_int),\n len(target_vocab_to_int),\n encoding_embedding_size,\n decoding_embedding_size,\n rnn_size,\n num_layers,\n target_vocab_to_int)\n\n\n training_logits = tf.identity(train_logits.rnn_output, name='logits')\n inference_logits = tf.identity(inference_logits.sample_id, name='predictions')\n\n masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')\n\n with tf.name_scope(\"optimization\"):\n # Loss function\n cost = tf.contrib.seq2seq.sequence_loss(\n training_logits,\n targets,\n masks)\n\n # Optimizer\n optimizer = tf.train.AdamOptimizer(lr)\n\n # Gradient Clipping\n gradients = optimizer.compute_gradients(cost)\n capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]\n train_op = optimizer.apply_gradients(capped_gradients)\n", "_____no_output_____" ] ], [ [ "Batch and pad the source and target sequences", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\ndef pad_sentence_batch(sentence_batch, pad_int):\n \"\"\"Pad sentences with <PAD> so that each sentence of a batch has the same length\"\"\"\n max_sentence = max([len(sentence) for sentence in sentence_batch])\n return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]\n\n\ndef get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):\n \"\"\"Batch targets, sources, and the lengths of their sentences together\"\"\"\n for batch_i in range(0, len(sources)//batch_size):\n start_i = batch_i * batch_size\n\n # Slice the right amount for the batch\n sources_batch = sources[start_i:start_i + batch_size]\n targets_batch = targets[start_i:start_i + batch_size]\n\n # Pad\n pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))\n pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))\n\n # Need the lengths for the _lengths parameters\n pad_targets_lengths = []\n for target in pad_targets_batch:\n pad_targets_lengths.append(len(target))\n\n pad_source_lengths = []\n for source in pad_sources_batch:\n pad_source_lengths.append(len(source))\n\n yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths\n", "_____no_output_____" ] ], [ [ "### Train\nTrain the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\ndef get_accuracy(target, logits):\n \"\"\"\n Calculate accuracy\n \"\"\"\n max_seq = max(target.shape[1], logits.shape[1])\n if max_seq - target.shape[1]:\n target = np.pad(\n target,\n [(0,0),(0,max_seq - target.shape[1])],\n 'constant')\n if max_seq - logits.shape[1]:\n logits = np.pad(\n logits,\n [(0,0),(0,max_seq - logits.shape[1])],\n 'constant')\n\n return np.mean(np.equal(target, logits))\n\n# Split data to training and validation sets\ntrain_source = source_int_text[batch_size:]\ntrain_target = target_int_text[batch_size:]\nvalid_source = source_int_text[:batch_size]\nvalid_target = target_int_text[:batch_size]\n(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,\n valid_target,\n batch_size,\n source_vocab_to_int['<PAD>'],\n target_vocab_to_int['<PAD>'])) \nwith tf.Session(graph=train_graph) as sess:\n sess.run(tf.global_variables_initializer())\n\n for epoch_i in range(epochs):\n for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(\n get_batches(train_source, train_target, batch_size,\n source_vocab_to_int['<PAD>'],\n target_vocab_to_int['<PAD>'])):\n\n _, loss = sess.run(\n [train_op, cost],\n {input_data: source_batch,\n targets: target_batch,\n lr: learning_rate,\n target_sequence_length: targets_lengths,\n source_sequence_length: sources_lengths,\n keep_prob: keep_probability})\n\n\n if batch_i % display_step == 0 and batch_i > 0:\n\n\n batch_train_logits = sess.run(\n inference_logits,\n {input_data: source_batch,\n source_sequence_length: sources_lengths,\n target_sequence_length: targets_lengths,\n keep_prob: 1.0})\n\n\n batch_valid_logits = sess.run(\n inference_logits,\n {input_data: valid_sources_batch,\n source_sequence_length: valid_sources_lengths,\n target_sequence_length: valid_targets_lengths,\n keep_prob: 1.0})\n\n train_acc = get_accuracy(target_batch, batch_train_logits)\n\n valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)\n\n print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'\n .format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))\n\n # Save Model\n saver = tf.train.Saver()\n saver.save(sess, save_path)\n print('Model Trained and Saved')", "Epoch 0 Batch 10/1077 - Train Accuracy: 0.1982, Validation Accuracy: 0.3050, Loss: 5.6915\nEpoch 0 Batch 20/1077 - Train Accuracy: 0.2316, Validation Accuracy: 0.3050, Loss: 4.8823\nEpoch 0 Batch 30/1077 - Train Accuracy: 0.2277, Validation Accuracy: 0.3050, Loss: 4.1802\nEpoch 0 Batch 40/1077 - Train Accuracy: 0.2434, Validation Accuracy: 0.3185, Loss: 3.8007\nEpoch 0 Batch 50/1077 - Train Accuracy: 0.2562, Validation Accuracy: 0.3349, Loss: 3.6376\nEpoch 0 Batch 60/1077 - Train Accuracy: 0.2958, Validation Accuracy: 0.3356, Loss: 3.3752\nEpoch 0 Batch 70/1077 - Train Accuracy: 0.2294, Validation Accuracy: 0.3363, Loss: 3.5404\nEpoch 0 Batch 80/1077 - Train Accuracy: 0.2711, Validation Accuracy: 0.3363, Loss: 3.3012\nEpoch 0 Batch 90/1077 - Train Accuracy: 0.2668, Validation Accuracy: 0.3363, Loss: 3.3037\nEpoch 0 Batch 100/1077 - Train Accuracy: 0.2617, Validation Accuracy: 0.3363, Loss: 3.2706\nEpoch 0 Batch 110/1077 - Train Accuracy: 0.2836, Validation Accuracy: 0.3363, Loss: 3.1537\nEpoch 0 Batch 120/1077 - Train Accuracy: 0.2715, Validation Accuracy: 0.3366, Loss: 3.1797\nEpoch 0 Batch 130/1077 - Train Accuracy: 0.3002, Validation Accuracy: 0.3366, Loss: 3.0328\nEpoch 0 Batch 140/1077 - Train Accuracy: 0.2706, Validation Accuracy: 0.3668, Loss: 3.2532\nEpoch 0 Batch 150/1077 - Train Accuracy: 0.3464, Validation Accuracy: 0.3629, Loss: 2.9439\nEpoch 0 Batch 160/1077 - Train Accuracy: 0.3117, Validation Accuracy: 0.3672, Loss: 3.0082\nEpoch 0 Batch 170/1077 - Train Accuracy: 0.2848, Validation Accuracy: 0.3668, Loss: 3.0962\nEpoch 0 Batch 180/1077 - Train Accuracy: 0.3164, Validation Accuracy: 0.3714, Loss: 2.9769\nEpoch 0 Batch 190/1077 - Train Accuracy: 0.3195, Validation Accuracy: 0.3807, Loss: 2.9718\nEpoch 0 Batch 200/1077 - Train Accuracy: 0.3367, Validation Accuracy: 0.3860, Loss: 2.9073\nEpoch 0 Batch 210/1077 - Train Accuracy: 0.3687, Validation Accuracy: 0.3999, Loss: 2.8066\nEpoch 0 Batch 220/1077 - Train Accuracy: 0.3339, Validation Accuracy: 0.4087, Loss: 2.9455\nEpoch 0 Batch 230/1077 - Train Accuracy: 0.3996, Validation Accuracy: 0.4187, Loss: 2.6837\nEpoch 0 Batch 240/1077 - Train Accuracy: 0.3816, Validation Accuracy: 0.4169, Loss: 2.7339\nEpoch 0 Batch 250/1077 - Train Accuracy: 0.4194, Validation Accuracy: 0.4311, Loss: 2.5818\nEpoch 0 Batch 260/1077 - Train Accuracy: 0.4022, Validation Accuracy: 0.4329, Loss: 2.6296\nEpoch 0 Batch 270/1077 - Train Accuracy: 0.3578, Validation Accuracy: 0.4407, Loss: 2.8374\nEpoch 0 Batch 280/1077 - Train Accuracy: 0.3934, Validation Accuracy: 0.4403, Loss: 2.6754\nEpoch 0 Batch 290/1077 - Train Accuracy: 0.3918, Validation Accuracy: 0.4407, Loss: 2.6673\nEpoch 0 Batch 300/1077 - Train Accuracy: 0.3647, Validation Accuracy: 0.4538, Loss: 2.7644\nEpoch 0 Batch 310/1077 - Train Accuracy: 0.3898, Validation Accuracy: 0.4585, Loss: 2.6497\nEpoch 0 Batch 320/1077 - Train Accuracy: 0.4102, Validation Accuracy: 0.4585, Loss: 2.5968\nEpoch 0 Batch 330/1077 - Train Accuracy: 0.4250, Validation Accuracy: 0.4666, Loss: 2.5461\nEpoch 0 Batch 340/1077 - Train Accuracy: 0.3919, Validation Accuracy: 0.4702, Loss: 2.6551\nEpoch 0 Batch 350/1077 - Train Accuracy: 0.4090, Validation Accuracy: 0.4663, Loss: 2.6011\nEpoch 0 Batch 360/1077 - Train Accuracy: 0.4258, Validation Accuracy: 0.4712, Loss: 2.5022\nEpoch 0 Batch 370/1077 - Train Accuracy: 0.4349, Validation Accuracy: 0.4783, Loss: 2.4385\nEpoch 0 Batch 380/1077 - Train Accuracy: 0.4379, Validation Accuracy: 0.4787, Loss: 2.3980\nEpoch 0 Batch 390/1077 - Train Accuracy: 0.4016, Validation Accuracy: 0.4801, Loss: 2.5506\nEpoch 0 Batch 400/1077 - Train Accuracy: 0.4355, Validation Accuracy: 0.4844, Loss: 2.4391\nEpoch 0 Batch 410/1077 - Train Accuracy: 0.4132, Validation Accuracy: 0.4897, Loss: 2.4743\nEpoch 0 Batch 420/1077 - Train Accuracy: 0.4359, Validation Accuracy: 0.4918, Loss: 2.3791\nEpoch 0 Batch 430/1077 - Train Accuracy: 0.4352, Validation Accuracy: 0.4961, Loss: 2.3211\nEpoch 0 Batch 440/1077 - Train Accuracy: 0.4512, Validation Accuracy: 0.4972, Loss: 2.3080\nEpoch 0 Batch 450/1077 - Train Accuracy: 0.4484, Validation Accuracy: 0.5004, Loss: 2.2993\nEpoch 0 Batch 460/1077 - Train Accuracy: 0.4273, Validation Accuracy: 0.4961, Loss: 2.3286\nEpoch 0 Batch 470/1077 - Train Accuracy: 0.4437, Validation Accuracy: 0.5050, Loss: 2.3458\nEpoch 0 Batch 480/1077 - Train Accuracy: 0.4548, Validation Accuracy: 0.5096, Loss: 2.3155\nEpoch 0 Batch 490/1077 - Train Accuracy: 0.4332, Validation Accuracy: 0.5075, Loss: 2.2801\nEpoch 0 Batch 500/1077 - Train Accuracy: 0.4711, Validation Accuracy: 0.5007, Loss: 2.2058\nEpoch 0 Batch 510/1077 - Train Accuracy: 0.4746, Validation Accuracy: 0.5053, Loss: 2.1570\nEpoch 0 Batch 520/1077 - Train Accuracy: 0.4888, Validation Accuracy: 0.5092, Loss: 2.1051\nEpoch 0 Batch 530/1077 - Train Accuracy: 0.4539, Validation Accuracy: 0.5138, Loss: 2.2290\nEpoch 0 Batch 540/1077 - Train Accuracy: 0.4559, Validation Accuracy: 0.5078, Loss: 2.0705\nEpoch 0 Batch 550/1077 - Train Accuracy: 0.4395, Validation Accuracy: 0.5092, Loss: 2.2084\nEpoch 0 Batch 560/1077 - Train Accuracy: 0.4734, Validation Accuracy: 0.5046, Loss: 2.0718\nEpoch 0 Batch 570/1077 - Train Accuracy: 0.4457, Validation Accuracy: 0.5085, Loss: 2.1676\nEpoch 0 Batch 580/1077 - Train Accuracy: 0.5041, Validation Accuracy: 0.5099, Loss: 2.0072\nEpoch 0 Batch 590/1077 - Train Accuracy: 0.4437, Validation Accuracy: 0.5124, Loss: 2.1529\nEpoch 0 Batch 600/1077 - Train Accuracy: 0.5060, Validation Accuracy: 0.5146, Loss: 1.9558\nEpoch 0 Batch 610/1077 - Train Accuracy: 0.4416, Validation Accuracy: 0.5107, Loss: 2.0952\nEpoch 0 Batch 620/1077 - Train Accuracy: 0.4477, Validation Accuracy: 0.5043, Loss: 2.0067\nEpoch 0 Batch 630/1077 - Train Accuracy: 0.4785, Validation Accuracy: 0.5036, Loss: 1.9731\nEpoch 0 Batch 640/1077 - Train Accuracy: 0.4606, Validation Accuracy: 0.5117, Loss: 1.9234\nEpoch 0 Batch 650/1077 - Train Accuracy: 0.4570, Validation Accuracy: 0.5067, Loss: 1.9877\nEpoch 0 Batch 660/1077 - Train Accuracy: 0.4543, Validation Accuracy: 0.5117, Loss: 1.9672\nEpoch 0 Batch 670/1077 - Train Accuracy: 0.5273, Validation Accuracy: 0.5153, Loss: 1.7059\nEpoch 0 Batch 680/1077 - Train Accuracy: 0.4472, Validation Accuracy: 0.4993, Loss: 1.8541\nEpoch 0 Batch 690/1077 - Train Accuracy: 0.4531, Validation Accuracy: 0.4979, Loss: 1.8666\nEpoch 0 Batch 700/1077 - Train Accuracy: 0.4477, Validation Accuracy: 0.5007, Loss: 1.8418\nEpoch 0 Batch 710/1077 - Train Accuracy: 0.4285, Validation Accuracy: 0.5071, Loss: 1.8764\nEpoch 0 Batch 720/1077 - Train Accuracy: 0.4379, Validation Accuracy: 0.4993, Loss: 1.9502\nEpoch 0 Batch 730/1077 - Train Accuracy: 0.4461, Validation Accuracy: 0.5089, Loss: 1.8165\nEpoch 0 Batch 740/1077 - Train Accuracy: 0.4648, Validation Accuracy: 0.5028, Loss: 1.7836\nEpoch 0 Batch 750/1077 - Train Accuracy: 0.4344, Validation Accuracy: 0.4688, Loss: 1.7935\nEpoch 0 Batch 760/1077 - Train Accuracy: 0.4410, Validation Accuracy: 0.4801, Loss: 1.7999\nEpoch 0 Batch 770/1077 - Train Accuracy: 0.4699, Validation Accuracy: 0.4862, Loss: 1.7045\nEpoch 0 Batch 780/1077 - Train Accuracy: 0.4504, Validation Accuracy: 0.4933, Loss: 1.7772\nEpoch 0 Batch 790/1077 - Train Accuracy: 0.4207, Validation Accuracy: 0.4904, Loss: 1.8271\nEpoch 0 Batch 800/1077 - Train Accuracy: 0.4352, Validation Accuracy: 0.4908, Loss: 1.7772\nEpoch 0 Batch 810/1077 - Train Accuracy: 0.4829, Validation Accuracy: 0.5071, Loss: 1.6145\nEpoch 0 Batch 820/1077 - Train Accuracy: 0.4496, Validation Accuracy: 0.5114, Loss: 1.7259\nEpoch 0 Batch 830/1077 - Train Accuracy: 0.4633, Validation Accuracy: 0.5064, Loss: 1.6647\nEpoch 0 Batch 840/1077 - Train Accuracy: 0.4562, Validation Accuracy: 0.5032, Loss: 1.6704\nEpoch 0 Batch 850/1077 - Train Accuracy: 0.4769, Validation Accuracy: 0.5075, Loss: 1.6796\nEpoch 0 Batch 860/1077 - Train Accuracy: 0.4833, Validation Accuracy: 0.4790, Loss: 1.5955\nEpoch 0 Batch 870/1077 - Train Accuracy: 0.4243, Validation Accuracy: 0.4851, Loss: 1.7547\nEpoch 0 Batch 880/1077 - Train Accuracy: 0.4719, Validation Accuracy: 0.4879, Loss: 1.5698\n" ] ], [ [ "### Save Parameters\nSave the `batch_size` and `save_path` parameters for inference.", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\n# Save parameters for checkpoint\nhelper.save_params(save_path)", "_____no_output_____" ] ], [ [ "# Checkpoint", "_____no_output_____" ] ], [ [ "\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\nimport helper\nimport problem_unittests as tests\n\n_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()\nload_path = helper.load_params()", "_____no_output_____" ] ], [ [ "## Sentence to Sequence\nTo feed a sentence into the model for translation, you first need to preprocess it. Implement the function `sentence_to_seq()` to preprocess new sentences.\n\n- Convert the sentence to lowercase\n- Convert words into ids using `vocab_to_int`\n - Convert words not in the vocabulary, to the `<UNK>` word id.", "_____no_output_____" ] ], [ [ "def sentence_to_seq(sentence, vocab_to_int):\n \"\"\"\n Convert a sentence to a sequence of ids\n :param sentence: String\n :param vocab_to_int: Dictionary to go from the words to an id\n :return: List of word ids\n \"\"\"\n # TODO: Implement Function\n return None\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE\n\"\"\"\ntests.test_sentence_to_seq(sentence_to_seq)", "_____no_output_____" ] ], [ [ "## Translate\nThis will translate `translate_sentence` from English to French.", "_____no_output_____" ] ], [ [ "translate_sentence = 'he saw a old yellow truck .'\n\n\n\"\"\"\nDON'T MODIFY ANYTHING IN THIS CELL\n\"\"\"\ntranslate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)\n\nloaded_graph = tf.Graph()\nwith tf.Session(graph=loaded_graph) as sess:\n # Load saved model\n loader = tf.train.import_meta_graph(load_path + '.meta')\n loader.restore(sess, load_path)\n\n input_data = loaded_graph.get_tensor_by_name('input:0')\n logits = loaded_graph.get_tensor_by_name('predictions:0')\n target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')\n source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')\n keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')\n\n translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,\n target_sequence_length: [len(translate_sentence)*2]*batch_size,\n source_sequence_length: [len(translate_sentence)]*batch_size,\n keep_prob: 1.0})[0]\n\nprint('Input')\nprint(' Word Ids: {}'.format([i for i in translate_sentence]))\nprint(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))\n\nprint('\\nPrediction')\nprint(' Word Ids: {}'.format([i for i in translate_logits]))\nprint(' French Words: {}'.format(\" \".join([target_int_to_vocab[i] for i in translate_logits])))\n", "_____no_output_____" ] ], [ [ "## Imperfect Translation\nYou might notice that some sentences translate better than others. Since the dataset you're using only has a vocabulary of 227 English words of the thousands that you use, you're only going to see good results using these words. For this project, you don't need a perfect translation. However, if you want to create a better translation model, you'll need better data.\n\nYou can train on the [WMT10 French-English corpus](http://www.statmt.org/wmt10/training-giga-fren.tar). This dataset has more vocabulary and richer in topics discussed. However, this will take you days to train, so make sure you've a GPU and the neural network is performing well on dataset we provided. Just make sure you play with the WMT10 corpus after you've submitted this project.\n## Submitting This Project\nWhen submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as \"dlnd_language_translation.ipynb\" and save it as a HTML file under \"File\" -> \"Download as\". Include the \"helper.py\" and \"problem_unittests.py\" files in your submission.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb997e4f384f19157b74c879caf357f0782e73db
146,858
ipynb
Jupyter Notebook
Classification/Support Vector Machine/LinearSVC_Scaled.ipynb
surya2365/ds-seed
74ef58479333fed95522f7b691f1209f7d70fc95
[ "Apache-2.0" ]
null
null
null
Classification/Support Vector Machine/LinearSVC_Scaled.ipynb
surya2365/ds-seed
74ef58479333fed95522f7b691f1209f7d70fc95
[ "Apache-2.0" ]
null
null
null
Classification/Support Vector Machine/LinearSVC_Scaled.ipynb
surya2365/ds-seed
74ef58479333fed95522f7b691f1209f7d70fc95
[ "Apache-2.0" ]
null
null
null
216.924668
40,651
0.711599
[ [ [ "# Linear Support Vector Classification with StandardScaler\r\n\r\n", "_____no_output_____" ], [ "This Code template is for the Classification task using a simple Linear Support Vector Classifier(LinearSVC) based on the Support Vector Machine algorithm and feature rescaling technique StandardScaler in a pipeline.", "_____no_output_____" ], [ "### Required Packages", "_____no_output_____" ] ], [ [ "!pip install imblearn", "_____no_output_____" ], [ "import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as se\r\nimport warnings\r\nfrom sklearn.model_selection import train_test_split\r\nfrom imblearn.over_sampling import RandomOverSampler\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.preprocessing import LabelEncoder,StandardScaler\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.metrics import classification_report,plot_confusion_matrix\r\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "### Initialization\n\nFilepath of CSV file", "_____no_output_____" ] ], [ [ "#filepath\r\nfile_path= \"\"", "_____no_output_____" ] ], [ [ "List of features which are required for model training .", "_____no_output_____" ] ], [ [ "#x_values\r\nfeatures=[]", "_____no_output_____" ] ], [ [ "Target variable for prediction.", "_____no_output_____" ] ], [ [ "#y_value\r\ntarget=\"\"", "_____no_output_____" ] ], [ [ "### Data Fetching\n\nPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.", "_____no_output_____" ] ], [ [ "df=pd.read_csv(file_path)\r\ndf.head()", "_____no_output_____" ] ], [ [ "### Feature Selections\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.", "_____no_output_____" ] ], [ [ "X=df[features]\r\nY=df[target]", "_____no_output_____" ] ], [ [ "### Data Preprocessing\n\nSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.\n", "_____no_output_____" ] ], [ [ "def NullClearner(df):\r\n if(isinstance(df, pd.Series) and (df.dtype in [\"float64\",\"int64\"])):\r\n df.fillna(df.mean(),inplace=True)\r\n return df\r\n elif(isinstance(df, pd.Series)):\r\n df.fillna(df.mode()[0],inplace=True)\r\n return df\r\n else:return df\r\ndef EncodeX(df):\r\n return pd.get_dummies(df)\r\ndef EncodeY(df):\r\n if len(df.unique())<=2:\r\n return df\r\n else:\r\n un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')\r\n df=LabelEncoder().fit_transform(df)\r\n EncodedT=[xi for xi in range(len(un_EncodedT))]\r\n print(\"Encoded Target: {} to {}\".format(un_EncodedT,EncodedT))\r\n return df", "_____no_output_____" ] ], [ [ "Calling preprocessing functions on the feature and target set.\n", "_____no_output_____" ] ], [ [ "x=X.columns.to_list()\r\nfor i in x:\r\n X[i]=NullClearner(X[i])\r\nX=EncodeX(X)\r\nY=EncodeY(NullClearner(Y))\r\nX.head()", "_____no_output_____" ] ], [ [ "#### Correlation Map\n\nIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.", "_____no_output_____" ] ], [ [ "f,ax = plt.subplots(figsize=(18, 18))\r\nmatrix = np.triu(X.corr())\r\nse.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)\r\nplt.show()", "_____no_output_____" ] ], [ [ "#### Distribution Of Target Variable", "_____no_output_____" ] ], [ [ "plt.figure(figsize = (10,6))\r\nse.countplot(Y)", "_____no_output_____" ] ], [ [ "### Data Splitting\n\nThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.", "_____no_output_____" ] ], [ [ "x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)", "_____no_output_____" ] ], [ [ "#### Handling Target Imbalance\n\nThe challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.\n\nOne approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library. ", "_____no_output_____" ] ], [ [ "x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)", "_____no_output_____" ] ], [ [ "### Model\nSupport vector machines (SVMs) are a set of supervised learning methods used for classification, regression and outliers detection.\n\nA Support Vector Machine is a discriminative classifier formally defined by a separating hyperplane. In other terms, for a given known/labelled data points, the SVM outputs an appropriate hyperplane that classifies the inputted new cases based on the hyperplane. In 2-Dimensional space, this hyperplane is a line separating a plane into two segments where each class or group occupied on either side.\n\nLinearSVC is similar to SVC with kernel=’linear’. It has more flexibility in the choice of tuning parameters and is suited for large samples.\n\n* #### Model Tuning Parameters\n > * penalty -> Specifies the norm used in the penalization. The ‘l2’ penalty is the standard used in SVC. The ‘l1’ leads to coef_ vectors that are sparse.\n \n > * Loss -> Specifies the loss function. ‘hinge’ is the standard SVM loss (used e.g. by the SVC class) while ‘squared_hinge’ is the square of the hinge loss. The combination of penalty='l1' and loss='hinge' is not supported. \n \n > * C -> Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.\n \n > * tolerance -> Tolerance for stopping criteria.\n \n > * dual -> Select the algorithm to either solve the dual or primal optimization problem. Prefer dual=False when n_samples > n_features.", "_____no_output_____" ] ], [ [ "model=make_pipeline(StandardScaler(),LinearSVC(random_state=123))\r\nmodel.fit(x_train,y_train)", "_____no_output_____" ] ], [ [ "#### Model Accuracy\n\n\nscore() method return the mean accuracy on the given test data and labels.\n\nIn multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.", "_____no_output_____" ] ], [ [ "print(\"Accuracy score {:.2f} %\\n\".format(model.score(x_test,y_test)*100))", "Accuracy score 82.50 %\n\n" ] ], [ [ "#### Confusion Matrix\n\nA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.", "_____no_output_____" ] ], [ [ "plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)", "_____no_output_____" ] ], [ [ "\n#### Classification Report\n\nA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.\n\n* where:\n - Precision:- Accuracy of positive predictions.\n - Recall:- Fraction of positives that were correctly identified.\n - f1-score:- percent of positive predictions were correct\n - support:- Support is the number of actual occurrences of the class in the specified dataset.", "_____no_output_____" ] ], [ [ "print(classification_report(y_test,model.predict(x_test)))", " precision recall f1-score support\n\n 0 0.91 0.80 0.85 50\n 1 0.72 0.87 0.79 30\n\n accuracy 0.82 80\n macro avg 0.82 0.83 0.82 80\nweighted avg 0.84 0.82 0.83 80\n\n" ] ], [ [ "#### Creator: Thilakraj Devadiga , Github: [Profile](https://github.com/Thilakraj1998)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
cb99888fb2391dc8c075884c9287da05d4e73d8e
218,515
ipynb
Jupyter Notebook
06_simple_linear_regression/simple_linear_reg_algorithm.ipynb
yohanesnuwara/machine-learning
54d55d18cc6e94aaf0de95cf697f6dfb225c6f71
[ "MIT" ]
11
2020-05-01T04:05:42.000Z
2022-03-03T10:53:06.000Z
06_simple_linear_regression/simple_linear_reg_algorithm.ipynb
yohanesnuwara/machine-learning
54d55d18cc6e94aaf0de95cf697f6dfb225c6f71
[ "MIT" ]
null
null
null
06_simple_linear_regression/simple_linear_reg_algorithm.ipynb
yohanesnuwara/machine-learning
54d55d18cc6e94aaf0de95cf697f6dfb225c6f71
[ "MIT" ]
6
2020-05-01T04:05:44.000Z
2021-12-12T05:54:00.000Z
190.344077
83,846
0.87858
[ [ [ "<a href=\"https://colab.research.google.com/github/yohanesnuwara/machine-learning/blob/master/06_simple_linear_regression/simple_linear_reg_algorithm.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# **Simple Linear Regression**", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Method 1 (\"Traditional\")\n\nCalculate bias (or intercept $B_0$) and slope ($B_1$) using:\n\n$$B_1 = \\frac{\\sum_{i=1}^{n}(x_i-mean(x))(y_i-mean(y))}{\\sum_{i=1}^{n}(x_i-mean(x))^2}$$\n\n$$B_0 = mean(y) - B_1 \\cdot mean(x)$$\n\nto construct simple linear regression model: $$y = B_0 + B_1 \\cdot x$$", "_____no_output_____" ] ], [ [ "x = [1, 2, 4, 3, 5]\ny = [1, 3, 3, 2, 5]\n\n# visualize our data \nplt.plot(x, y, 'o')", "_____no_output_____" ] ], [ [ "Calculate mean of data", "_____no_output_____" ] ], [ [ "mean_x = np.mean(x)\nmean_y = np.mean(y)\nprint(mean_x, mean_y)", "3.0 2.8\n" ] ], [ [ "Calculate error", "_____no_output_____" ] ], [ [ "err_x = x - mean_x\nerr_y = y - mean_y\nprint(err_x)\nprint(err_y)", "[-2. -1. 1. 0. 2.]\n[-1.8 0.2 0.2 -0.8 2.2]\n" ] ], [ [ "Multiply error of x and error of y", "_____no_output_____" ] ], [ [ "err_mult = err_x * err_y\nprint(err_mult)", "[ 3.6 -0.2 0.2 -0. 4.4]\n" ] ], [ [ "Calculate numerator by summing up the errors", "_____no_output_____" ] ], [ [ "numerator = np.sum(err_mult)\nnumerator", "_____no_output_____" ] ], [ [ "Calculate denominator by squaring the x error and summing them up", "_____no_output_____" ] ], [ [ "err_x_squared = err_x**2\ndenominator = np.sum(err_x_squared)\nprint(denominator)", "10.0\n" ] ], [ [ "Calculate the **slope (B1)** !", "_____no_output_____" ] ], [ [ "B1 = numerator / denominator\nprint(B1)", "0.8\n" ] ], [ [ "And we can calculate the **intercept (c)** !", "_____no_output_____" ] ], [ [ "B0 = mean_y - B1 * mean_x\nprint(B0)", "0.39999999999999947\n" ] ], [ [ "We now have the coefficents for our simple linear regression equation.\n$$y = B_0 + B_1 x = 0.4 + 0.8 x$$\n", "_____no_output_____" ], [ "### Test the model to our training data", "_____no_output_____" ] ], [ [ "x_test = np.array([1, 2, 3, 4, 5])\ny_predicted = B0 + B1 * x_test\n\np1 = plt.plot(x, y, 'o')\np2 = plt.plot(x_test, y_predicted, 'o-', color='r')\nplt.legend((p1[0], p2[0]), (['y data', 'predicted y']))", "_____no_output_____" ] ], [ [ "### Estimating Error (Root Mean Squared Error)\n\n$$RMSE = \\sqrt{\\frac{\\sum_{i=1}^{n} (p_i - y_i)^2}{n}}$$", "_____no_output_____" ] ], [ [ "numerator = np.sum((y_predicted - y)**2)\ndenominator = len(y)\nrmse = np.sqrt(numerator / denominator)\nrmse", "_____no_output_____" ] ], [ [ "### Wrap all up", "_____no_output_____" ] ], [ [ "def simple_linear_regression_traditional(x, y, x_test):\n import numpy as np\n x = np.array(x); y = np.array(y); x_test = np.array(x_test)\n mean_x = np.mean(x)\n mean_y = np.mean(y)\n err_x = x - mean_x\n err_y = y - mean_y\n err_mult = err_x * err_y\n numerator = np.sum(err_mult)\n err_x_squared = err_x**2\n denominator = np.sum(err_x_squared)\n B1 = numerator / denominator\n B0 = mean_y - B1 * mean_x\n y_predicted = B0 + B1 * x_test\n return(B0, B1, y_predicted)\n\ndef linreg_error(y, y_predicted):\n import numpy as np\n y = np.array(y); y_predicted = np.array(y_predicted)\n numerator = np.sum((y_predicted - y)**2)\n denominator = len(y)\n rmse = np.sqrt(numerator / denominator)\n return(rmse)", "_____no_output_____" ] ], [ [ "## Method 2 (\"Advanced\")\n\nCalculate bias (or intercept $B_0$) and slope ($B_1$) using:\n\n$$B_1 = corr(x, y) \\cdot \\frac{stdev(y)}{stdev(x)}$$\n\nThen, similar to **Method 1**. \n$$B_0 = mean(y) - B_1 \\cdot mean(x)$$\n\nto construct simple linear regression model: $$y = B_0 + B_1 \\cdot x$$", "_____no_output_____" ], [ "Calculate the **pearson's correlation coefficient $corr(x,y)$**. First, calculate mean and standard deviation.", "_____no_output_____" ] ], [ [ "import statistics as stat\nmean_x = np.mean(x)\nmean_y = np.mean(y)\nstdev_x = stat.stdev(x)\nstdev_y = stat.stdev(y)\nprint(stdev_x, stdev_y)", "1.5811388300841898 1.4832396974191326\n" ] ], [ [ "Calculate **covariance**. Covariance is the relationship that can be summarized between two variables. The sign of the covariance can be interpreted as whether the two variables change in the same direction (positive) or change in different directions (negative). A covariance value of zero indicates that both variables are completely independent.", "_____no_output_____" ] ], [ [ "cov_x_y = (np.sum((x - mean_x) * (y - mean_y))) * (1 / (len(x) - 1))\ncov_x_y", "_____no_output_____" ] ], [ [ "Calculate **Pearson's Correlation Coefficient**. It summarizes the strength of the linear relationship between two data samples. It is the normalization of the covariance between the two variables. The coefficient returns a value between -1 and 1 that represents the limits of correlation from a full negative correlation to a full positive correlation. A value of 0 means no correlation. The value must be interpreted, where often a value below -0.5 or above 0.5 indicates a notable correlation, and values below those values suggests a less notable correlation.", "_____no_output_____" ] ], [ [ "corr_x_y = cov_x_y / (stdev_x * stdev_y)\ncorr_x_y", "_____no_output_____" ] ], [ [ "Calculate slope $B_1$", "_____no_output_____" ] ], [ [ "B1 = corr_x_y * (stdev_y / stdev_x)\nB1", "_____no_output_____" ] ], [ [ "Next, is similar to **Method 1**. ", "_____no_output_____" ] ], [ [ "B0 = mean_y - B1 * mean_x\n\nx_test = np.array([1, 2, 3, 4, 5])\ny_predicted = B0 + B1 * x_test\n\np1 = plt.plot(x, y, 'o')\np2 = plt.plot(x_test, y_predicted, 'o-', color='r')\nplt.legend((p1[0], p2[0]), (['y data', 'predicted y']))", "_____no_output_____" ] ], [ [ "Calculate RMSE", "_____no_output_____" ] ], [ [ "rmse = linreg_error(y, y_predicted)\nrmse", "_____no_output_____" ] ], [ [ "### Wrap all up", "_____no_output_____" ] ], [ [ "def simple_linear_regression_advanced(x, y, x_test):\n import numpy as np\n import statistics as stat\n x = np.array(x); y = np.array(y); x_test = np.array(x_test)\n mean_x = np.mean(x)\n mean_y = np.mean(y)\n stdev_x = stat.stdev(x)\n stdev_y = stat.stdev(y)\n cov_x_y = (np.sum((x - mean_x) * (y - mean_y))) * (1 / (len(x) - 1))\n corr_x_y = cov_x_y / (stdev_x * stdev_y)\n B1 = corr_x_y * (stdev_y / stdev_x)\n B0 = mean_y - B1 * mean_x\n y_predicted = B0 + B1 * x_test\n return(B0, B1, y_predicted)", "_____no_output_____" ] ], [ [ "## Implement to Real Dataset\n\nSimple linear regression to WTI and Brent Daily Oil Price (1980-2020)", "_____no_output_____" ] ], [ [ "!git clone https://www.github.com/yohanesnuwara/machine-learning", "Cloning into 'machine-learning'...\nwarning: redirecting to https://github.com/yohanesnuwara/machine-learning.git/\nremote: Enumerating objects: 156, done.\u001b[K\nremote: Counting objects: 100% (156/156), done.\u001b[K\nremote: Compressing objects: 100% (151/151), done.\u001b[K\nremote: Total 156 (delta 65), reused 0 (delta 0), pack-reused 0\u001b[K\nReceiving objects: 100% (156/156), 330.11 KiB | 1.73 MiB/s, done.\nResolving deltas: 100% (65/65), done.\n" ], [ "import pandas as pd\n\nbrent = pd.read_csv('/content/machine-learning/datasets/brent-daily_csv.csv')\nwti = pd.read_csv('/content/machine-learning/datasets/wti-daily_csv.csv')\n\n# Converting to Panda datetime\nbrent['Date'] = pd.to_datetime(brent['Date'], format='%Y-%m-%d') # depends on the data, format check web: https://strftime.org/\nwti['Date'] = pd.to_datetime(wti['Date'], format='%Y-%m-%d') # depends on the data, format check web: https://strftime.org/\n\nbrent.head(10)", "_____no_output_____" ] ], [ [ "Visualize data", "_____no_output_____" ] ], [ [ "from pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\nplt.figure(figsize=(15, 6))\nplt.plot(brent.Date, brent.Price, '.', color='blue')\nplt.plot(wti.Date, wti.Price, '.', color='red')\nplt.title('Daily Oil Price')\nplt.xlabel('Year'); plt.ylabel('Price ($/bbl)')", "_____no_output_____" ], [ "# convert datetime to ordinal\nimport datetime as dt\nbrent_date = np.array(brent['Date'].map(dt.datetime.toordinal))\nbrent_price = brent.Price\nbrent_test = brent_date\n\nB0_brent, B1_brent, brent_price_predicted = simple_linear_regression_advanced(brent_date, brent_price, brent_test)\n\nwti_date = np.array(wti['Date'].map(dt.datetime.toordinal))\nwti_price = wti.Price\nwti_test = wti_date\n\nB0_wti, B1_wti, wti_price_predicted = simple_linear_regression_advanced(wti_date, wti_price, wti_test)\n\nplt.figure(figsize=(15, 6))\np1 = plt.plot(brent.Date, brent.Price, '.', color='blue')\np2 = plt.plot(wti.Date, wti.Price, '.', color='red')\np3 = plt.plot(brent_test, brent_price_predicted, color='blue')\np4 = plt.plot(wti_test, wti_price_predicted, color='red')\nplt.legend((p1[0], p2[0], p3[0], p4[0]), (['Brent data', 'WTI data', 'Brent predicted', 'WTI predicted']))\nplt.title('Daily Oil Price')\nplt.xlabel('Year'); plt.ylabel('Price ($/bbl)')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]